text stringlengths 1 93.6k |
|---|
if len(chunked_outputs) == 1:
|
return chunked_outputs[0]
|
return torch.cat(chunked_outputs, dim=0)
|
def styles_def_to_tensor(styles_def):
|
return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)
|
def gs_to_rgb(image, color):
|
image_rgb = image.repeat(1, 3, 1, 1)
|
return 1-image_rgb*color
|
@torch.no_grad()
|
def generate_truncated(S, G, style, noi, trunc_psi = 0.75, num_image_tiles = 8, bitmap_feats=None, batch_size=8):
|
latent_dim = G.latent_dim
|
z = noise(2000, latent_dim)
|
samples = evaluate_in_chunks(batch_size, S, z).cpu().numpy()
|
av = np.mean(samples, axis = 0)
|
av = np.expand_dims(av, axis = 0)
|
w_space = []
|
for tensor, num_layers in style:
|
tmp = S(tensor)
|
av_torch = torch.from_numpy(av).cuda()
|
# import ipdb;ipdb.set_trace()
|
tmp = trunc_psi * (tmp - av_torch) + av_torch
|
w_space.append((tmp, num_layers))
|
w_styles = styles_def_to_tensor(w_space)
|
generated_images = evaluate_in_chunks_unet(batch_size, G, bitmap_feats, w_styles, noi)
|
return generated_images.clamp_(0., 1.)
|
@torch.no_grad()
|
def generate_part(model, partial_image, partial_rgb, color=None, part_name=20, num=0, num_image_tiles=8, trunc_psi=1., save_img=False, trans_std=2, results_dir='../results/bird_seq_unet_5fold'):
|
model.eval()
|
ext = 'png'
|
num_rows = num_image_tiles
|
latent_dim = model.G.latent_dim
|
image_size = model.G.image_size
|
num_layers = model.G.num_layers
|
def translate_image(image, trans_std=2, rot_std=3, scale_std=2):
|
affine_image = torch.zeros_like(image)
|
side = image.shape[-1]
|
x_shift = np.random.normal(0, trans_std)
|
y_shift = np.random.normal(0, trans_std)
|
theta = np.random.normal(0, rot_std)
|
scale = int(np.random.normal(0, scale_std))
|
T = np.float32([[1, 0, x_shift], [0, 1, y_shift]])
|
M = cv2.getRotationMatrix2D((side/2,side/2),theta,1)
|
for i in range(image.shape[1]):
|
sketch_channel = image[0, i].cpu().data.numpy()
|
sketch_translation = cv2.warpAffine(sketch_channel, T, (side, side))
|
affine_image[0, i] = torch.cuda.FloatTensor(sketch_translation)
|
return affine_image, x_shift, y_shift, theta, scale
|
def recover_image(image, x_shift, y_shift, theta, scale):
|
x_shift *= -1
|
y_shift *= -1
|
theta *= -1
|
# scale *= -1
|
affine_image = torch.zeros_like(image)
|
side = image.shape[-1]
|
T = np.float32([[1, 0, x_shift], [0, 1, y_shift]])
|
M = cv2.getRotationMatrix2D((side/2,side/2),theta,1)
|
for i in range(image.shape[1]):
|
sketch_channel = image[0, i].cpu().data.numpy()
|
sketch_translation = cv2.warpAffine(sketch_channel, T, (side, side))
|
affine_image[0, i] = torch.cuda.FloatTensor(sketch_translation)
|
return affine_image
|
# latents and noise
|
latents_z = noise_list(num_rows ** 2, num_layers, latent_dim)
|
n = image_noise(num_rows ** 2, image_size)
|
image_partial_batch = partial_image[:, -1:, :, :]
|
translated_image, dx, dy, theta, scale = translate_image(partial_image, trans_std=trans_std)
|
bitmap_feats = model.Enc(translated_image)
|
# bitmap_feats = model.Enc(partial_image)
|
# generated_partial_images = generate_truncated(model.S, model.G, latents_z, n, trunc_psi = trunc_psi, bitmap_feats=bitmap_feats)
|
generated_partial_images = recover_image(generate_truncated(model.S, model.G, latents_z, n, trunc_psi = trunc_psi, bitmap_feats=bitmap_feats), dx, dy, theta, scale)
|
# post process
|
generated_partial_rgb = gs_to_rgb(generated_partial_images, color)
|
generated_images = generated_partial_images + image_partial_batch
|
generated_rgb = 1 - ((1-generated_partial_rgb)+(1-partial_rgb))
|
if save_img:
|
torchvision.utils.save_image(generated_partial_rgb, os.path.join(results_dir, f'{str(num)}-{part_name}-comp.{ext}'), nrow=num_rows)
|
torchvision.utils.save_image(generated_rgb, os.path.join(results_dir, f'{str(num)}-{part_name}.{ext}'), nrow=num_rows)
|
return generated_partial_images.clamp_(0., 1.), generated_images.clamp_(0., 1.), generated_partial_rgb.clamp_(0., 1.), generated_rgb.clamp_(0., 1.)
|
def train_from_folder(
|
data_path = '../../data',
|
results_dir = '../../results',
|
models_dir = '../../models',
|
n_part = 1,
|
image_size = 128,
|
network_capacity = 16,
|
batch_size = 3,
|
num_image_tiles = 8,
|
trunc_psi = 0.75,
|
generate_all=False,
|
):
|
min_step = 599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.