text stringlengths 1 93.6k |
|---|
model_tail = Trainer_cond_unet(name_tail, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
|
model_tail.load_config()
|
model_tail.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_tail, load_from)))
|
name_fin='long_generic_creative_sequential_r6_partstack_aug_fin_unet_largeaug'
|
load_from = load_latest(models_dir, name_fin)
|
load_from = min(min_step, load_from)
|
model_fin = Trainer_cond_unet(name_fin, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
|
model_fin.load_config()
|
model_fin.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_fin, load_from)))
|
name_horns='long_generic_creative_sequential_r6_partstack_aug_horns_unet_largeaug'
|
load_from = load_latest(models_dir, name_horns)
|
load_from = min(min_step, load_from)
|
model_horns = Trainer_cond_unet(name_horns, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
|
model_horns.load_config()
|
model_horns.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_horns, load_from)))
|
name_paws='long_generic_creative_sequential_r6_partstack_aug_paws_unet_largeaug'
|
load_from = load_latest(models_dir, name_paws)
|
load_from = min(min_step, load_from)
|
model_paws = Trainer_cond_unet(name_paws, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
|
model_paws.load_config()
|
model_paws.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_paws, load_from)))
|
name_arms='long_generic_creative_sequential_r6_partstack_aug_arms_unet_largeaug'
|
load_from = load_latest(models_dir, name_arms)
|
load_from = min(min_step, load_from)
|
model_arms = Trainer_cond_unet(name_arms, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
|
model_arms.load_config()
|
model_arms.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_arms, load_from)))
|
name_selector='long_generic_creative_selector_aug'
|
load_from = load_latest(models_dir, name_selector)
|
part_selector = Trainer_selector(name_selector, results_dir, models_dir, n_part = n_part, batch_size = batch_size, image_size = image_size, network_capacity=network_capacity)
|
part_selector.load_config()
|
part_selector.clf.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_selector, load_from)))
|
inital_dir = '%s/generic_long_test_init_strokes_%d'%(data_path, image_size)
|
if not os.path.exists(results_dir):
|
os.mkdir(results_dir)
|
dataset = Initialstroke_Dataset(inital_dir, image_size=image_size)
|
dataloader = data.DataLoader(dataset, num_workers=5, batch_size=batch_size, drop_last=False, shuffle=False, pin_memory=True)
|
models = [model_eye, model_arms, model_beak, model_mouth, model_body, model_ears, model_feet, model_fin, model_hair,
|
model_hands, model_head, model_horns, model_legs, model_nose, model_paws, model_tail, model_wings]
|
target_parts = ['eye', 'arms', 'beak', 'mouth', 'body', 'ears', 'feet', 'fin',
|
'hair', 'hands', 'head', 'horns', 'legs', 'nose', 'paws', 'tail', 'wings', 'none']
|
part_to_id = {'initial': 0, 'eye': 1, 'arms': 2, 'beak': 3, 'mouth': 4, 'body': 5, 'ears': 6, 'feet': 7, 'fin': 8,
|
'hair': 9, 'hands': 10, 'head': 11, 'horns': 12, 'legs': 13, 'nose': 14, 'paws': 15, 'tail': 16, 'wings':17}
|
max_iter = 10
|
if generate_all:
|
generation_dir = os.path.join(results_dir, 'DoodlerGAN_all')
|
if not os.path.exists(generation_dir):
|
os.mkdir(generation_dir)
|
os.mkdir(os.path.join(generation_dir, 'bw'))
|
os.mkdir(os.path.join(generation_dir, 'color_initial'))
|
os.mkdir(os.path.join(generation_dir, 'color'))
|
for count, initial_strokes in enumerate(dataloader):
|
initial_strokes = initial_strokes.cuda()
|
start_point = len(os.listdir(os.path.join(generation_dir, 'bw')))
|
print('%d sketches generated'%start_point)
|
for i in range(batch_size):
|
samples_name = f'generated-{start_point+i}'
|
stack_parts = torch.zeros(1, 19, image_size, image_size).cuda()
|
initial_strokes_rgb = gs_to_rgb(initial_strokes[i], COLORS['initial'])
|
stack_parts[:, 0] = initial_strokes[i, 0]
|
stack_parts[:, -1] = initial_strokes[i, 0]
|
partial_rgbs = initial_strokes_rgb.clone()
|
prev_part = []
|
for iter_i in range(max_iter):
|
outputs = part_selector.clf.D(stack_parts)
|
part_rgbs = torch.ones(1, 3, image_size, image_size).cuda()
|
select_part_order = 0
|
select_part_ids = torch.topk(outputs, k=10, dim=0)[1]
|
select_part_id = select_part_ids[select_part_order].item()
|
select_part = target_parts[select_part_id]
|
while (select_part == 'none' and iter_i < 6 or select_part in prev_part):
|
select_part_order += 1
|
select_part_id = select_part_ids[select_part_order].item()
|
select_part = target_parts[select_part_id]
|
if select_part == 'none':
|
break
|
prev_part.append(select_part)
|
sketch_rgb = partial_rgbs
|
stack_part = stack_parts[0].unsqueeze(0)
|
select_model = models[select_part_id]
|
part, partial, part_rgb, partial_rgb = generate_part(select_model.GAN, stack_part, sketch_rgb, COLORS[select_part], select_part, samples_name, 1, trans_std=0, results_dir=results_dir)
|
stack_parts[0, part_to_id[select_part]] = part[0, 0]
|
stack_parts[0, -1] = partial[0, 0]
|
partial_rgbs[0] = partial_rgb[0]
|
part_rgbs[0] = part_rgb[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.