text
stringlengths
1
93.6k
initial_colored_full = np.tile(np.max(stack_parts.cpu().data.numpy()[:, 1:-1], 1), [3, 1, 1])
initial_colored_full = 1-np.max(np.stack([1-initial_strokes_rgb.cpu().data.numpy()[0], initial_colored_full]), 0)
cv2.imwrite(os.path.join(generation_dir, 'bw', f'{str(samples_name)}.png'), (1-stack_parts[0, -1].cpu().data.numpy())*255.)
cv2.imwrite(os.path.join(generation_dir, 'color_initial', f'{str(samples_name)}-color.png'), cv2.cvtColor(initial_colored_full.transpose(1, 2, 0)*255., cv2.COLOR_RGB2BGR))
cv2.imwrite(os.path.join(generation_dir, 'color', f'{str(samples_name)}-color.png'), cv2.cvtColor(partial_rgbs[0].cpu().data.numpy().transpose(1, 2, 0)*255., cv2.COLOR_RGB2BGR))
else:
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
stack_parts = torch.zeros(num_image_tiles*num_image_tiles, 19, image_size, image_size).cuda()
initial_strokes = dataset.sample(num_image_tiles*num_image_tiles).cuda()
initial_strokes_rgb = gs_to_rgb(initial_strokes, COLORS['initial'])
stack_parts[:, 0] = initial_strokes[:, 0]
stack_parts[:, -1] = initial_strokes[:, 0]
partial_rgbs = initial_strokes_rgb.clone()
prev_parts = [[] for _ in range(num_image_tiles**2)]
samples_name = f'generated-{timestamp}-{min_step}'
for iter_i in range(max_iter):
outputs = part_selector.clf.D(stack_parts)
part_rgbs = torch.ones(num_image_tiles*num_image_tiles, 3, image_size, image_size).cuda()
for i in range(num_image_tiles**2):
prev_part = prev_parts[i]
select_part_order = 0
select_part_ids = torch.topk(outputs[i], k=16, dim=0)[1]
select_part_id = select_part_ids[select_part_order].item()
select_part = target_parts[select_part_id]
while (select_part == 'none' and iter_i < 6 or select_part in prev_part):
select_part_order += 1
select_part_id = select_part_ids[select_part_order].item()
select_part = target_parts[select_part_id]
if select_part == 'none':
continue
prev_parts[i].append(select_part)
sketch_rgb = partial_rgbs[i].clone().unsqueeze(0)
stack_part = stack_parts[i].unsqueeze(0)
select_model = models[select_part_id]
part, partial, part_rgb, partial_rgb = generate_part(select_model.GAN, stack_part, sketch_rgb, COLORS[select_part], select_part, samples_name, 1, trans_std=2, results_dir=results_dir)
stack_parts[i, part_to_id[select_part]] = part[0, 0]
stack_parts[i, -1] = partial[0, 0]
partial_rgbs[i] = partial_rgb[0]
part_rgbs[i] = part_rgb[0]
torchvision.utils.save_image(partial_rgbs, os.path.join(results_dir, f'{str(samples_name)}-round{iter_i}.png'), nrow=num_image_tiles)
torchvision.utils.save_image(part_rgbs, os.path.join(results_dir, f'{str(samples_name)}-part-round{iter_i}.png'), nrow=num_image_tiles)
torchvision.utils.save_image(1-stack_parts[:, -1:], os.path.join(results_dir, f'{str(samples_name)}-final_pred.png'), nrow=num_image_tiles)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='../data')
parser.add_argument("--results_dir", type=str, default='../results/creative_creature_generation')
parser.add_argument("--models_dir", type=str, default='../models')
parser.add_argument('--n_part', type=int, default=19)
parser.add_argument('--image_size', type=int, default=64)
parser.add_argument('--network_capacity', type=int, default=16)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--num_image_tiles', type=int, default=8)
parser.add_argument('--trunc_psi', type=float, default=1.)
parser.add_argument('--generate_all', action='store_true')
args = parser.parse_args()
print(args)
train_from_folder(args.data_dir, args.results_dir, args.models_dir, args.n_part, args.image_size, args.network_capacity,
args.batch_size, args.num_image_tiles, args.trunc_psi, args.generate_all)
# <FILESEP>
"""Evaluating CodeGen Performance on NL-to-Code Generation.
No wrapped prompt for CodeGen, just comment-type nl descriptions.
E.g., "# this function prints hello world"
CodeGen condictions on the concatenation of interleaved
past prompts (nl) and generated responses (code).
We can input the `test_start` as previous-step code.
However, we cannot inform model the `suffix` (return arguments) beforehand,
hopefully the variable specification in the intent could help.
"""
import gc
import json
import torch
import src.slurm, src.config, src.data, src.utils
from typing import Dict, List
from pathlib import Path
from torch.utils.data import DataLoader, SequentialSampler
from transformers import AutoTokenizer, AutoModelForCausalLM
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
TRUC_PATTERN_LIST = [] # [r"\n\n^#", "^'''"] # removed "\n\n\n"
def print_scores(scores_dict: Dict) -> str:
return f"{scores_dict}"
def remove_input_from_outputs(predictions: List[str], prompt: str, verbose: bool = False) -> List[str]:
# prompt_sections = [f"def {p}" for p in prompt.split("def ") if p]
# for pp in prompt_sections: print(f"Sub Prompt: {pp}")
if verbose:
print(f"Prompt: \n{prompt}")
trimmed_predictions = []