text
stringlengths 1
93.6k
|
|---|
val_cifar_c()
|
if args.mode in ['v2']:
|
val_cifar10_1()
|
if args.mode in ['sta']:
|
val_cifar_worst_of_k_affine(args.k)
|
elif args.dataset == 'tin':
|
if args.mode in ['clean', 'all']:
|
val_tin()
|
if args.mode in ['c', 'all']:
|
val_tin_c()
|
elif args.dataset == 'IN':
|
if args.mode in ['clean', 'all']:
|
val_IN()
|
if args.mode in ['c', 'all']:
|
val_IN_c()
|
# <FILESEP>
|
import os
|
import time
|
import torch
|
import numpy as np
|
import torch.nn as nn
|
import torch.nn.parallel
|
import torch.optim as optim
|
import torch.backends.cudnn as cudnn
|
from torch.autograd import Variable
|
from tensorboardX import SummaryWriter
|
from utils import *
|
from options import get_args
|
from dataloader import nyudv2_dataloader
|
from models.loss import cal_spatial_loss, cal_temporal_loss
|
from models.backbone_dict import backbone_dict
|
from models import modules
|
from models import net
|
cudnn.benchmark = True
|
args = get_args('train')
|
os.environ['CUDA_VISIBLE_DEVICES'] = args.devices
|
# Create folder
|
makedir(args.checkpoint_dir)
|
makedir(args.logdir)
|
# creat summary logger
|
logger = SummaryWriter(args.logdir)
|
# dataset, dataloader
|
TrainImgLoader = nyudv2_dataloader.getTrainingData_NYUDV2(args.batch_size, args.trainlist_path, args.root_path)
|
# model, optimizer
|
device = 'cuda' if torch.cuda.is_available() and args.use_cuda else 'cpu'
|
backbone = backbone_dict[args.backbone]()
|
Encoder = modules.E_resnet(backbone)
|
if args.backbone in ['resnet50']:
|
model = net.model(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048], refinenet=args.refinenet)
|
elif args.backbone in ['resnet18', 'resnet34']:
|
model = net.model(Encoder, num_features=512, block_channel=[64, 128, 256, 512], refinenet=args.refinenet)
|
model = nn.DataParallel(model).cuda()
|
disc = net.C_C3D_1().cuda()
|
optimizer = build_optimizer(model = model,
|
learning_rate=args.lr,
|
optimizer_name=args.optimizer_name,
|
weight_decay = args.weight_decay,
|
epsilon=args.epsilon,
|
momentum=args.momentum
|
)
|
start_epoch = 0
|
if args.resume:
|
all_saved_ckpts = [ckpt for ckpt in os.listdir(args.checkpoint_dir) if ckpt.endswith(".pth.tar")]
|
print(all_saved_ckpts)
|
all_saved_ckpts = sorted(all_saved_ckpts, key=lambda x:int(x.split('_')[-1].split('.')[0]))
|
loadckpt = os.path.join(args.checkpoint_dir, all_saved_ckpts[-1])
|
start_epoch = int(all_saved_ckpts[-1].split('_')[-1].split('.')[0])
|
print("loading the lastest model in checkpoint_dir: {}".format(loadckpt))
|
state_dict = torch.load(loadckpt)
|
model.load_state_dict(state_dict)
|
elif args.loadckpt is not None:
|
print("loading model {}".format(args.loadckpt))
|
start_epoch = args.loadckpt.split('_')[-1].split('.')[0]
|
state_dict = torch.load(args.loadckpt)
|
model.load_state_dict(state_dict)
|
else:
|
print("start at epoch {}".format(start_epoch))
|
def train():
|
for epoch in range(start_epoch, args.epochs):
|
adjust_learning_rate(optimizer, epoch, args.lr)
|
batch_time = AverageMeter()
|
losses = AverageMeter()
|
model.train()
|
end = time.time()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.