text stringlengths 1 93.6k |
|---|
@torch.no_grad()
|
def validate_mrc(model, val_loader, task):
|
LOGGER.info("start running MRC validation...")
|
val_loss = 0
|
n_feat = 0
|
st = time()
|
tot_score = 0
|
for i, batch in enumerate(val_loader):
|
prediction_soft_label = model(
|
batch, task=task, compute_loss=False)
|
if "kl" in task:
|
prediction_soft_label = F.log_softmax(
|
prediction_soft_label, dim=-1)
|
label_targets = batch['label_targets']
|
loss = F.kl_div(
|
prediction_soft_label, label_targets, reduction='sum')
|
tot_score += compute_accuracy_for_soft_targets(
|
prediction_soft_label, label_targets)
|
else:
|
# background class should not be the target
|
cls_label_targets = label_targets[:, 1:].max(dim=-1)[1] + 1
|
loss = F.cross_entropy(
|
prediction_soft_label, cls_label_targets,
|
ignore_index=0, reduction='sum')
|
tot_score += compute_accuracy_for_soft_targets(
|
prediction_soft_label[:, 1:], label_targets[:, 1:])
|
val_loss += loss.item()
|
n_feat += batch['img_mask_tgt'].sum().item()
|
val_loss = sum(all_gather_list(val_loss))
|
tot_score = sum(all_gather_list(tot_score))
|
n_feat = sum(all_gather_list(n_feat))
|
tot_time = time()-st
|
val_loss /= n_feat
|
val_acc = tot_score / n_feat
|
val_log = {'loss': val_loss,
|
'acc': val_acc,
|
'feat_per_s': n_feat/tot_time}
|
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
|
f"score: {val_acc*100:.2f}")
|
return val_log
|
def compute_accuracy_for_soft_targets(out, labels):
|
outputs = out.max(dim=-1)[1]
|
labels = labels.max(dim=-1)[1] # argmax
|
n_correct = (outputs == labels).sum().item()
|
return n_correct
|
if __name__ == "__main__":
|
parser = argparse.ArgumentParser()
|
# Required parameters
|
# NOTE: train tasks and val tasks cannot take command line arguments
|
parser.add_argument('--compressed_db', action='store_true',
|
help='use compressed LMDB')
|
parser.add_argument("--model_config", type=str,
|
help="path to model structure config json")
|
parser.add_argument("--checkpoint", default=None, type=str,
|
help="path to model checkpoint (*.pt)")
|
parser.add_argument(
|
"--output_dir", default=None, type=str,
|
help="The output directory where the model checkpoints will be "
|
"written.")
|
parser.add_argument('--mrm_prob', default=0.15, type=float,
|
help='probability to mask in MRM training')
|
# Prepro parameters
|
parser.add_argument('--max_txt_len', type=int, default=60,
|
help='max number of tokens in text (BERT BPE)')
|
parser.add_argument('--conf_th', type=float, default=0.2,
|
help='threshold for dynamic bounding boxes '
|
'(-1 for fixed)')
|
parser.add_argument('--max_bb', type=int, default=100,
|
help='max number of bounding boxes')
|
parser.add_argument('--min_bb', type=int, default=10,
|
help='min number of bounding boxes')
|
parser.add_argument('--num_bb', type=int, default=36,
|
help='static number of bounding boxes')
|
# training parameters
|
parser.add_argument("--train_batch_size", default=4096, type=int,
|
help="Total batch size for training. "
|
"(batch by tokens)")
|
parser.add_argument("--val_batch_size", default=4096, type=int,
|
help="Total batch size for validation. "
|
"(batch by tokens)")
|
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
|
help="Number of updates steps to accumualte before "
|
"performing a backward/update pass.")
|
parser.add_argument("--learning_rate", default=3e-5, type=float,
|
help="The initial learning rate for Adam.")
|
parser.add_argument("--valid_steps", default=1000, type=int,
|
help="Run validation every X steps")
|
parser.add_argument("--num_train_steps", default=100000, type=int,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.