text stringlengths 1 93.6k |
|---|
# init data loader
|
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
|
std=[0.25, 0.25, 0.25])
|
train_dataset = FileListDataset(
|
args.train_filelist, args.train_prefix,
|
transforms.Compose([
|
transforms.Resize(args.input_size),
|
transforms.ToTensor(),
|
normalize,
|
]))
|
val_dataset = FileListDataset(
|
args.val_filelist, args.val_prefix,
|
transforms.Compose([
|
transforms.Resize(args.input_size),
|
transforms.ToTensor(),
|
normalize,
|
]))
|
if args.distributed:
|
train_sampler = torch.utils.data.distributed.DistributedSampler(
|
train_dataset)
|
val_sampler = DistSequentialSampler(val_dataset, args.world_size,
|
args.rank)
|
else:
|
train_sampler = None
|
val_sampler = None
|
train_loader = torch.utils.data.DataLoader(train_dataset,
|
batch_size=args.batch_size,
|
shuffle=(train_sampler is None),
|
num_workers=args.workers,
|
pin_memory=True,
|
sampler=train_sampler)
|
if args.test_batch_size is None:
|
args.test_batch_size = 2 * args.batch_size
|
val_loader = torch.utils.data.DataLoader(val_dataset,
|
batch_size=args.test_batch_size,
|
shuffle=False,
|
num_workers=args.workers,
|
pin_memory=True,
|
sampler=val_sampler)
|
# create model
|
print("=> creating model '{}'".format(args.arch))
|
model = models.__dict__[args.arch](feature_dim=args.feature_dim)
|
if args.sampled:
|
if args.rank > 0:
|
assert args.distributed
|
assert args.sample_num <= args.num_classes
|
model = models.build_classifier(args.classifier_type, model,
|
**args.__dict__)
|
if not args.distributed:
|
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
|
model.features = torch.nn.DataParallel(model.features)
|
model.cuda()
|
else:
|
model = torch.nn.DataParallel(model).cuda()
|
else:
|
model.cuda()
|
model = torch.nn.parallel.DistributedDataParallel(model, [args.rank])
|
print('create DistributedDataParallel model successfully', args.rank)
|
# define loss function (criterion) and optimizer
|
criterion = nn.CrossEntropyLoss().cuda()
|
optimizer = torch.optim.SGD(model.parameters(),
|
args.lr,
|
momentum=args.momentum,
|
weight_decay=args.weight_decay)
|
# optionally resume from a checkpoint
|
if args.resume:
|
if os.path.isfile(args.resume):
|
args.start_epoch, best_prec1 = load_ckpt(args.resume,
|
model,
|
optimizer=optimizer)
|
if args.sampled:
|
with ParameterClient(args.tmp_client_id) as client:
|
cls_resume = args.resume.replace('.pth.tar', '_cls.h5')
|
if os.path.isfile(cls_resume):
|
client.resume(cls_resume)
|
print("=> loaded checkpoint '{}' (epoch {})".format(
|
cls_resume, checkpoint['epoch']))
|
else:
|
print("=> no checkpoint found at '{}'".format(
|
cls_resume))
|
else:
|
print("=> no checkpoint found at '{}'".format(args.resume))
|
cudnn.benchmark = True
|
if args.evaluate:
|
validate(val_loader, model, criterion, args.print_freq, args.rank,
|
logger, args.sampled)
|
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.