-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathsample_gan.py
205 lines (178 loc) · 9.55 KB
/
sample_gan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
import os
import os.path as osp
import argparse
import torch
import json
from torch import nn
from hashlib import sha1
from torchvision.transforms import ToPILImage
from lib import *
from models.gan_load import build_biggan, build_proggan, build_stylegan2, build_sngan
from models.vae import VAE, ConvVAE
import numpy as np
def tensor2image(tensor, adaptive=False):
tensor = tensor.squeeze(dim=0)
if adaptive:
tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())
return ToPILImage()((255 * tensor.cpu().detach()).to(torch.uint8))
else:
tensor = (tensor + 1) / 2
tensor.clamp(0, 1)
return ToPILImage()((255 * tensor.cpu().detach()).to(torch.uint8))
class DataParallelPassthrough(nn.DataParallel):
def __getattr__(self, name):
try:
return super(DataParallelPassthrough, self).__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def main():
"""A script for sampling from a pre-trained GAN latent space and generating images. The generated images, along with
the corresponding latent codes (in torch.Tensor format), will be stored under
`experiments/latent_codes/<gan_type>/<pool>/`.
If no pool name is given, then `<gan_type>_<num_samples>/` will be used instead.
Options:
-v, --verbose : set verbose mode on
-g, --gan-type : set GAN type (SNGAN_MNIST, SNGAN_AnimeFaces, BigGAN, ProgGAN, or StyleGAN2)
--z-truncation : set latent code sampling truncation parameter. If set, latent codes will be sampled
from a standard Gaussian distribution truncated to the range [-args.z_truncation,
+args.z_truncation]
--biggan-target-classes : set list of classes to use for conditional BigGAN (see BIGGAN_CLASSES in
lib/config.py). E.g., --biggan-target-classes 14 239.
--stylegan2-resolution : set StyleGAN2 generator output images resolution (256 or 1024)
--num-samples : set the number of latent codes to sample for generating images
--pool : set name of the latent codes/images pool.
--cuda : use CUDA (default)
--no-cuda : do not use CUDA
"""
parser = argparse.ArgumentParser(description="Sample a pre-trained GAN latent space and generate images")
parser.add_argument('-v', '--verbose', action='store_true', help="set verbose mode on")
parser.add_argument('-g', '--gan-type', type=str, required=True, help='GAN generator model type')
parser.add_argument('--shift-in-w-space', action='store_true', help="search latent paths in StyleGAN2's W-space")
parser.add_argument('--z-truncation', type=float, help="set latent code sampling truncation parameter")
parser.add_argument('--biggan-target-classes', nargs='+', type=int, help="list of classes for conditional BigGAN")
parser.add_argument('--stylegan2-resolution', type=int, default=1024, choices=(256, 1024),
help="StyleGAN2 image resolution")
parser.add_argument('--num-samples', type=int, default=4, help="number of latent codes to sample")
parser.add_argument('--pool', type=str, help="name of latent codes/images pool")
parser.add_argument('--cuda', dest='cuda', action='store_true', help="use CUDA during training")
parser.add_argument('--no-cuda', dest='cuda', action='store_false', help="do NOT use CUDA during training")
parser.set_defaults(cuda=True)
# ================================================================================================================ #
# Parse given arguments
args = parser.parse_args()
# Create output dir for generated images
out_dir = osp.join('experiments', 'latent_codes', args.gan_type)
biggan_classes = None
if args.gan_type == 'BigGAN':
# Get BigGAN classes
if args.biggan_target_classes is None:
raise parser.error("In case of BigGAN, a list of classes needs to be determined.")
biggan_classes = ''
for c in args.biggan_target_classes:
biggan_classes += '-{}'.format(c)
out_dir += biggan_classes
if args.pool:
out_dir = osp.join(out_dir, args.pool)
else:
out_dir = osp.join(out_dir, '{}_{}'.format(args.gan_type + biggan_classes if args.gan_type == 'BigGAN'
else args.gan_type, args.num_samples))
os.makedirs(out_dir, exist_ok=True)
# Save argument in json file
with open(osp.join(out_dir, 'args.json'), 'w') as args_json_file:
json.dump(args.__dict__, args_json_file)
# Set default tensor type
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("*** WARNING ***: It looks like you have a CUDA device, but aren't using CUDA.\n"
" Run with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
use_cuda = args.cuda and torch.cuda.is_available()
# Build GAN generator model and load with pre-trained weights
if args.verbose:
print("#. Build GAN generator model G and load with pre-trained weights...")
print(" \\__GAN type: {}".format(args.gan_type))
if args.gan_type == 'BigGAN':
print(" \\__Target classes: {}".format(args.biggan_target_classes))
print(" \\__Pre-trained weights: {}".format(
GAN_WEIGHTS[args.gan_type]['weights'][args.stylegan2_resolution] if args.gan_type == 'StyleGAN2' else
GAN_WEIGHTS[args.gan_type]['weights'][GAN_RESOLUTIONS[args.gan_type]]))
# -- BigGAN
if args.gan_type == 'BigGAN':
G = build_biggan(pretrained_gan_weights=GAN_WEIGHTS[args.gan_type]['weights'][GAN_RESOLUTIONS[args.gan_type]],
target_classes=args.biggan_target_classes)
# -- ProgGAN
elif args.gan_type == 'ProgGAN':
G = build_proggan(pretrained_gan_weights=GAN_WEIGHTS[args.gan_type]['weights'][GAN_RESOLUTIONS[args.gan_type]])
# -- StyleGAN2
elif args.gan_type== 'VAE_MNIST':
#G = VAE(encoder_layer_sizes=[784,256],latent_size=16,decoder_layer_sizes=[256,784])
G = ConvVAE(num_channel=1, latent_size=18 * 18, img_size=28)
G.load_state_dict(torch.load("vae_mnist_conv.pt", map_location='cpu'))
elif args.gan_type== 'VAE_DSPRITES':
#G = ConvVAE(num_channel=1,latent_size=256)
G = ConvVAE(num_channel=1, latent_size=15 * 15 + 1, img_size=64)
G.load_state_dict(torch.load("vae_dsprites_conv.pt", map_location='cpu'))
elif args.gan_type == 'StyleGAN2':
G = build_stylegan2(resolution=args.stylegan2_resolution,
pretrained_gan_weights=GAN_WEIGHTS[args.gan_type]['weights'][args.stylegan2_resolution],
shift_in_w_space=args.shift_in_w_space)
# -- Spectrally Normalised GAN (SNGAN)
else:
G = build_sngan(pretrained_gan_weights=GAN_WEIGHTS[args.gan_type]['weights'][GAN_RESOLUTIONS[args.gan_type]],
gan_type=args.gan_type)
# Upload GAN generator model to GPU
if use_cuda:
G = G.cuda()
# Set generator to evaluation mode
G.eval()
# Latent codes sampling
if args.verbose:
#print("#. Sample {} {}-dimensional latent codes...".format(args.num_samples, G.dim_z))
if args.z_truncation:
print(" \\__Truncate standard Gaussian to range [{}, +{}]".format(-args.z_truncation, args.z_truncation))
# zs = torch.randn(args.num_samples, G.dim_z)
if args.gan_type=='VAE_MNIST' or args.gan_type=='VAE_DSPRITES':
zs = sample_z(batch_size=args.num_samples, dim_z=G.latent_size, truncation=args.z_truncation)
else:
zs = sample_z(batch_size=args.num_samples, dim_z=G.dim_z, truncation=args.z_truncation)
if use_cuda:
zs = zs.cuda()
if args.verbose:
print("#. Generate images...")
print(" \\__{}".format(out_dir))
# Iterate over given latent codes
for i in range(args.num_samples):
# Un-squeeze current latent code in shape [1, dim] and create hash code for it
z = zs[i, :].unsqueeze(0)
latent_code_hash = sha1(z.cpu().numpy()).hexdigest()
if args.verbose:
update_progress(
" \\__.Latent code hash: {} [{:03d}/{:03d}] ".format(latent_code_hash, i + 1, args.num_samples),
args.num_samples, i)
# Create directory for current latent code
latent_code_dir = osp.join(out_dir, '{}'.format(latent_code_hash))
os.makedirs(latent_code_dir, exist_ok=True)
# Save latent code tensor under `latent_code_dir`
torch.save(z.cpu(), osp.join(latent_code_dir, 'latent_code.pt'))
# Generate image for the given latent code z
with torch.no_grad():
if args.gan_type== 'VAE_MNIST' or args.gan_type=='VAE_DSPRITES':
img = G.inference(z).cpu()
else:
img = G(z).cpu()
#if args.gan_type== 'VAE_MNIST':
# H = img.size(1)
# img = img.view(1, int(np.sqrt(H)), int(np.sqrt(H)))
# Convert image's tensor into an RGB image and save it
img_pil = tensor2image(img, adaptive=True)
img_pil.save(osp.join(latent_code_dir, 'image.jpg'), "JPEG", quality=95, optimize=True, progressive=True)
if args.verbose:
update_stdout(1)
print()
print()
if __name__ == '__main__':
main()