Skip to content

Commit

Permalink
formatting changes from black 22.3.0
Browse files Browse the repository at this point in the history
Summary:
Applies the black-fbsource codemod with the new build of pyfmt.

paintitblack

Reviewed By: lisroach

Differential Revision: D36324783

fbshipit-source-id: 280c09e88257e5e569ab729691165d8dedd767bc
  • Loading branch information
amyreese authored and facebook-github-bot committed May 12, 2022
1 parent 988663d commit 3232620
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
10 changes: 5 additions & 5 deletions torchrecipes/vision/image_generation/models/infogan.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def __init__(

self.init_size: int = img_size // 4 # Initial size before upsampling
self.l1: nn.modules.Sequential = nn.Sequential(
nn.Linear(input_dim, 128 * self.init_size ** 2)
nn.Linear(input_dim, 128 * self.init_size**2)
)

self.conv_blocks: nn.modules.Sequential = nn.Sequential(
Expand Down Expand Up @@ -99,17 +99,17 @@ def discriminator_block(
)

# The height and width of downsampled image
ds_size = img_size // 2 ** 4
ds_size = img_size // 2**4

# Output layers
self.adv_layer: nn.modules.Sequential = nn.Sequential(
nn.Linear(128 * ds_size ** 2, 1)
nn.Linear(128 * ds_size**2, 1)
)
self.aux_layer: nn.modules.Sequential = nn.Sequential(
nn.Linear(128 * ds_size ** 2, n_classes), nn.Softmax()
nn.Linear(128 * ds_size**2, n_classes), nn.Softmax()
)
self.latent_layer: nn.modules.Sequential = nn.Sequential(
nn.Linear(128 * ds_size ** 2, code_dim)
nn.Linear(128 * ds_size**2, code_dim)
)

def forward(
Expand Down
8 changes: 4 additions & 4 deletions torchrecipes/vision/image_generation/module/infogan.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def __init__(

# Static generator inputs for sampling
self.register_buffer(
"static_z", torch.zeros([self.n_classes ** 2, self.latent_dim])
"static_z", torch.zeros([self.n_classes**2, self.latent_dim])
)
self.register_buffer(
"static_label",
Expand All @@ -126,7 +126,7 @@ def __init__(
),
)
self.register_buffer(
"static_code", torch.zeros([self.n_classes ** 2, self.code_dim])
"static_code", torch.zeros([self.n_classes**2, self.code_dim])
)

def training_step(
Expand Down Expand Up @@ -292,7 +292,7 @@ def validation_step(
def generate_sample_image(self, n_row: int = 10) -> Tensor:
# Static sample
z = torch.tensor(
np.random.normal(0, 1, (n_row ** 2, self.latent_dim)),
np.random.normal(0, 1, (n_row**2, self.latent_dim)),
dtype=torch.float,
device=self.device,
)
Expand All @@ -301,7 +301,7 @@ def generate_sample_image(self, n_row: int = 10) -> Tensor:
static_img = make_grid(static_sample, nrow=n_row, normalize=True, padding=0)

# Get varied c1 and c2
zeros = np.zeros((n_row ** 2, 1))
zeros = np.zeros((n_row**2, 1))
c_varied = np.repeat(np.linspace(-1, 1, n_row)[:, np.newaxis], n_row, 0)
c1 = torch.tensor(
np.concatenate((c_varied, zeros), -1),
Expand Down

0 comments on commit 3232620

Please sign in to comment.