text stringlengths 1 93.6k |
|---|
assert max(args.lr_steps) < args.epochs
|
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
|
optimizer, args.lr_steps, args.gamma)
|
for epoch in range(args.start_epoch, args.epochs):
|
if args.distributed:
|
train_sampler.set_epoch(epoch)
|
# train for one epoch
|
train(train_loader, model, criterion, optimizer, epoch,
|
args.print_freq, args.rank, logger, tb_logger, args.sampled)
|
lr_scheduler.step()
|
# evaluate on validation set
|
prec1, loss = validate(val_loader, model, criterion, args.print_freq,
|
args.rank, logger, args.sampled)
|
# remember best prec@1 and save checkpoint
|
if args.rank == 0:
|
if tb_logger is not None:
|
tb_logger.add_scalar('test_acc', prec1, epoch)
|
tb_logger.add_scalar('test_loss', loss, epoch)
|
is_best = prec1 > best_prec1
|
best_prec1 = max(prec1, best_prec1)
|
save_ckpt(
|
{
|
'epoch': epoch + 1,
|
'arch': args.arch,
|
'state_dict': model.state_dict(),
|
'best_prec1': best_prec1,
|
'optimizer': optimizer.state_dict(),
|
}, args.save_path, epoch + 1, is_best)
|
if args.sampled:
|
with ParameterClient(args.tmp_client_id) as client:
|
client.snapshot('{}_epoch_{}_cls.h5'.format(
|
args.save_path, epoch + 1))
|
def train(train_loader,
|
model,
|
criterion,
|
optimizer,
|
epoch,
|
print_freq,
|
rank,
|
logger,
|
tb_logger=None,
|
sampled=None):
|
batch_time = AverageMeter(10)
|
data_time = AverageMeter(10)
|
losses = AverageMeter(10)
|
top1 = AverageMeter(10)
|
# switch to train mode
|
model.train()
|
end = time.time()
|
for i, (input, target) in enumerate(train_loader):
|
# measure data loading time
|
data_time.update(time.time() - end)
|
# target = target.cuda(non_blocking=True)
|
target = target.cuda()
|
# compute output
|
if not sampled:
|
output = model(input, target)
|
else:
|
output, target = model(input, target)
|
loss = criterion(output, target)
|
# measure accuracy and record loss
|
prec1, = accuracy(output, target, topk=(1, ))
|
losses.update(loss.item())
|
top1.update(prec1[0])
|
# compute gradient and do SGD step
|
optimizer.zero_grad()
|
loss.backward()
|
optimizer.step()
|
# measure elapsed time
|
batch_time.update(time.time() - end)
|
end = time.time()
|
if i % print_freq == 0 and rank == 0 and logger is not None:
|
logger.info('Epoch: [{0}][{1}/{2}]\t'
|
'LR: {3}\t'
|
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
|
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
|
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
|
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
|
epoch,
|
i,
|
len(train_loader),
|
optimizer.param_groups[0]['lr'],
|
batch_time=batch_time,
|
data_time=data_time,
|
loss=losses,
|
top1=top1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.