text
stringlengths
1
93.6k
def main(raw_args=None):
args = parse_args(raw_args)
args = process_args(args)
project(args)
if __name__ == '__main__':
main()
# <FILESEP>
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import torch
import numpy as np
import argparse
import torchvision
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
from retry.api import retry_call
from torch.utils import data
from torchvision import transforms
from part_selector import Trainer as Trainer_selector
from part_generator import Trainer as Trainer_cond_unet
from scipy.ndimage.morphology import distance_transform_edt
COLORS = {'initial':1-torch.cuda.FloatTensor([45, 169, 145]).view(1, -1, 1, 1)/255., 'eye':1-torch.cuda.FloatTensor([243, 156, 18]).view(1, -1, 1, 1)/255., 'none':1-torch.cuda.FloatTensor([149, 165, 166]).view(1, -1, 1, 1)/255.,
'arms':1-torch.cuda.FloatTensor([211, 84, 0]).view(1, -1, 1, 1)/255., 'beak':1-torch.cuda.FloatTensor([41, 128, 185]).view(1, -1, 1, 1)/255., 'mouth':1-torch.cuda.FloatTensor([54, 153, 219]).view(1, -1, 1, 1)/255.,
'body':1-torch.cuda.FloatTensor([192, 57, 43]).view(1, -1, 1, 1)/255., 'ears':1-torch.cuda.FloatTensor([142, 68, 173]).view(1, -1, 1, 1)/255., 'feet':1-torch.cuda.FloatTensor([39, 174, 96]).view(1, -1, 1, 1)/255.,
'fin':1-torch.cuda.FloatTensor([69, 85, 101]).view(1, -1, 1, 1)/255., 'hair':1-torch.cuda.FloatTensor([127, 140, 141]).view(1, -1, 1, 1)/255., 'hands':1-torch.cuda.FloatTensor([45, 63, 81]).view(1, -1, 1, 1)/255.,
'head':1-torch.cuda.FloatTensor([241, 197, 17]).view(1, -1, 1, 1)/255., 'horns':1-torch.cuda.FloatTensor([51, 205, 117]).view(1, -1, 1, 1)/255., 'legs':1-torch.cuda.FloatTensor([232, 135, 50]).view(1, -1, 1, 1)/255.,
'nose':1-torch.cuda.FloatTensor([233, 90, 75]).view(1, -1, 1, 1)/255., 'paws':1-torch.cuda.FloatTensor([160, 98, 186]).view(1, -1, 1, 1)/255., 'tail':1-torch.cuda.FloatTensor([58, 78, 99]).view(1, -1, 1, 1)/255.,
'wings':1-torch.cuda.FloatTensor([198, 203, 207]).view(1, -1, 1, 1)/255., 'details':1-torch.cuda.FloatTensor([171, 190, 191]).view(1, -1, 1, 1)/255.}
class Initialstroke_Dataset(data.Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for p in Path(f'{folder}').glob(f'**/*.png')]
self.transform = transforms.Compose([
transforms.ToTensor(),
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = self.transform(Image.open(path))
return img
def sample(self, n):
sample_ids = [np.random.randint(self.__len__()) for _ in range(n)]
samples = [self.transform(Image.open(self.paths[sample_id])) for sample_id in sample_ids]
return torch.stack(samples).cuda()
def load_latest(model_dir, name):
model_dir = Path(model_dir)
file_paths = [p for p in Path(model_dir / name).glob('model_*.pt')]
saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))
if len(saved_nums) == 0:
return
num = saved_nums[-1]
print(f'continuing -{name} from previous epoch - {num}')
return num
def noise(n, latent_dim):
return torch.randn(n, latent_dim).cuda()
def noise_list(n, layers, latent_dim):
return [(noise(n, latent_dim), layers)]
def mixed_list(n, layers, latent_dim):
tt = int(torch.rand(()).numpy() * layers)
return noise_list(n, tt, latent_dim) + noise_list(n, layers - tt, latent_dim)
def image_noise(n, im_size):
return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).cuda()
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def evaluate_in_chunks_unet(max_batch_size, model, map_feats, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
split_map_feats = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), map_feats))))
chunked_outputs = [model(*i, j) for i, j in zip(split_args, split_map_feats)]