text stringlengths 1 93.6k |
|---|
ex_per_sec = int(tot_ex / (time()-start))
|
tot_in = sum(all_gather_list(n_in_units[t]))
|
in_per_sec = int(tot_in / (time()-start))
|
tot_l = sum(all_gather_list(n_loss_units[t]))
|
l_per_sec = int(tot_l / (time()-start))
|
LOGGER.info(f'{t}: {tot_ex} examples trained at '
|
f'{ex_per_sec} ex/s')
|
TB_LOGGER.add_scalar(f'perf/{t}_ex_per_s', ex_per_sec,
|
global_step)
|
TB_LOGGER.add_scalar(f'perf/{t}_in_per_s', in_per_sec,
|
global_step)
|
TB_LOGGER.add_scalar(f'perf/{t}_loss_per_s', l_per_sec,
|
global_step)
|
LOGGER.info('===============================================')
|
if global_step % opts.valid_steps == 0:
|
LOGGER.info(f'Step {global_step}: start validation')
|
validate(model, val_dataloaders)
|
model_saver.save(model, global_step)
|
if global_step >= opts.num_train_steps:
|
break
|
if global_step % opts.valid_steps != 0:
|
LOGGER.info(f'Step {global_step}: start validation')
|
validate(model, val_dataloaders)
|
model_saver.save(model, global_step)
|
def validate(model, val_dataloaders):
|
model.eval()
|
for task, loader in val_dataloaders.items():
|
LOGGER.info(f"validate on {task} task")
|
if task.startswith('mlm'):
|
val_log = validate_mlm(model, loader)
|
elif task.startswith('mrfr'):
|
val_log = validate_mrfr(model, loader)
|
elif task.startswith('mrc'):
|
val_log = validate_mrc(model, loader, task)
|
else:
|
raise ValueError(f'Undefined task {task}')
|
val_log = {f'{task}_{k}': v for k, v in val_log.items()}
|
TB_LOGGER.log_scaler_dict(
|
{f'valid_{task}/{k}': v for k, v in val_log.items()})
|
model.train()
|
@torch.no_grad()
|
def validate_mlm(model, val_loader):
|
LOGGER.info("start running MLM validation...")
|
val_loss = 0
|
n_correct = 0
|
n_word = 0
|
st = time()
|
for i, batch in enumerate(val_loader):
|
scores = model(batch, task='mlm', compute_loss=False)
|
labels = batch['txt_labels']
|
labels = labels[labels != -1]
|
loss = F.cross_entropy(scores, labels, reduction='sum')
|
val_loss += loss.item()
|
n_correct += (scores.max(dim=-1)[1] == labels).sum().item()
|
n_word += labels.numel()
|
val_loss = sum(all_gather_list(val_loss))
|
n_correct = sum(all_gather_list(n_correct))
|
n_word = sum(all_gather_list(n_word))
|
tot_time = time()-st
|
val_loss /= n_word
|
acc = n_correct / n_word
|
val_log = {'loss': val_loss,
|
'acc': acc,
|
'tok_per_s': n_word/tot_time}
|
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
|
f"acc: {acc*100:.2f}")
|
return val_log
|
def accuracy_count(out, labels):
|
outputs = out.max(dim=-1)[1]
|
mask = labels != -1
|
n_correct = (outputs == labels).masked_select(mask).sum().item()
|
return n_correct
|
@torch.no_grad()
|
def validate_mrfr(model, val_loader):
|
LOGGER.info("start running MRFR validation...")
|
val_loss = 0
|
n_feat = 0
|
st = time()
|
for i, batch in enumerate(val_loader):
|
loss = model(batch, task='mrfr', compute_loss=True)
|
val_loss += loss.sum().item() / IMG_DIM
|
n_feat += batch['img_mask_tgt'].sum().item()
|
val_loss = sum(all_gather_list(val_loss))
|
n_feat = sum(all_gather_list(n_feat))
|
tot_time = time()-st
|
val_loss /= n_feat
|
val_log = {'loss': val_loss,
|
'feat_per_s': n_feat/tot_time}
|
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
|
f"loss: {val_loss:.2f}")
|
return val_log
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.