text
stringlengths
1
93.6k
if acc > best_acc:
state = {
'net': net.state_dict(),
}
torch.save(state, saving_ckpt_name)
rprint(f'Saving~ {saving_ckpt_name}', rank)
# update best acc
best_acc = acc
def main_worker(rank, ngpus_per_node=ngpus_per_node):
# print configuration
print_configuration(args, rank)
# setting gpu id of this process
torch.cuda.set_device(rank)
# DDP environment settings
print(f'Use GPU: {gpu_list[rank]} for training')
dist.init_process_group(backend='nccl', world_size=ngpus_per_node, rank=rank)
# init model and Distributed Data Parallel
net = get_network(network=args.network,
depth=args.depth,
dataset=args.dataset,
tran_type=args.tran_type,
img_size=args.img_resize,
patch_size=args.patch_size,
pretrain=args.pretrain)
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
net = net.to(memory_format=torch.channels_last).cuda()
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[rank], output_device=[rank])
# upsampling for transformer
upsample = True if args.network in transformer_list else False
# fast dataloader
trainloader, testloader, decoder = get_fast_dataloader(dataset=args.dataset, train_batch_size=args.batch_size,
test_batch_size=args.test_batch_size, upsample=upsample)
if args.network in transformer_list:
t_total = args.num_steps
optimizer = optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
lr_scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
optimizer = optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0, max_lr=args.learning_rate,
step_size_up=int(round(args.epochs/15))*len(trainloader),
step_size_down=args.epochs*len(trainloader)-int(round(args.epochs/15))*len(trainloader))
# training and testing
for epoch in range(args.epochs):
rprint('\nEpoch: %d' % (epoch+1), rank)
if args.dataset == "imagenet":
if args.network in transformer_list:
res = args.img_resize
else:
res = get_resolution(epoch=epoch, min_res=160, max_res=192,
start_ramp=int(math.floor(args.epochs * 0.5)),
end_ramp=int(math.floor(args.epochs * 0.7)))
decoder.output_size = (res, res)
train(net, trainloader, optimizer, lr_scheduler, scaler)
test(net, testloader, rank)
def run():
torch.multiprocessing.spawn(main_worker, nprocs=ngpus_per_node, join=True)
if __name__ == '__main__':
run()
# <FILESEP>
import os
import os.path as osp
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
from torch.autograd import Function
from model.network import Discriminator, PCN, Encoder, Decoder, Disentangler, Z_Mapper, Classifier, Generator, ViewPredictor
from utils.common_utils import *
from loss import *
from evaluation.pointnet import *
import time
from external.ChamferDistancePytorch.chamfer_python import distChamfer, distChamfer_raw
class RotMatDecoder(nn.Module):
def __init__(self):
super(RotMatDecoder, self).__init__()
def forward(self, x):
reshaped_x = x.view(-1, 3, 2)
b1 = F.normalize(reshaped_x[:,:,0], dim=1)
dot_prod = torch.sum( b1 * reshaped_x[:,:,1], dim=1, keepdim=True)
b2 = F.normalize(reshaped_x[:,:,1] - dot_prod * b1, dim=-1)