text
stringlengths
1
93.6k
distributed=n_gpu > 1)
meta_loader = PrefetchLoader(meta_loader)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForPretrainingForVCR.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, img_label_dim=IMG_LABEL_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
task2scaler = {t: i for i, t in enumerate(train_dataloaders.keys())}
model, optimizer = amp.initialize(model, optimizer,
num_losses=len(task2scaler),
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
# to compute training statistics
task2loss = {task: RunningMeter(f'loss/{task}')
for task in train_dataloaders.keys()}
n_examples = defaultdict(int)
n_in_units = defaultdict(int)
n_loss_units = defaultdict(int)
grad_norm = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
for step, (name, batch) in enumerate(meta_loader):
# forward pass
n_examples[name] += batch['input_ids'].size(0)
n_in_units[name] += (batch['attn_masks'] == 1).sum().item()
task = name.split('_')[0]
loss = model(batch, task=task, compute_loss=True)
n_loss_units[name] += loss.size(0)
loss = loss.mean() # loss is not normalized in model
# backward pass
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale,
loss_id=task2scaler[name]) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
task2loss[name](loss.item())
# optimizer update and logging
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.log_scaler_dict({ll.name: ll.val
for ll in task2loss.values()
if ll.val is not None})
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'==============Step {global_step}===============')
for t in train_dataloaders.keys():
assert all(tt == t for tt in all_gather_list(t))
tot_ex = sum(all_gather_list(n_examples[t]))