text stringlengths 0 93.6k |
|---|
if tb_logger is not None: |
_iter = epoch * len(train_loader) + i |
tb_logger.add_scalar('train_acc', top1.avg, _iter) |
tb_logger.add_scalar('train_loss', losses.avg, _iter) |
def validate(val_loader, |
model, |
criterion, |
print_freq, |
rank, |
logger, |
sampled=None): |
n = len(val_loader) |
batch_time = AverageMeter(10) |
losses = AverageMeter(n) |
top1 = AverageMeter(n) |
model.eval() |
with torch.no_grad(): |
end = time.time() |
for i, (input, target) in enumerate(val_loader): |
target = target.cuda(non_blocking=True) |
if not sampled: |
output = model(input, target) |
else: |
output, target = model(input, target) |
loss = criterion(output, target) |
prec1, = accuracy(output, target, topk=(1, )) |
losses.update(loss.item()) |
top1.update(prec1[0]) |
batch_time.update(time.time() - end) |
end = time.time() |
if i % print_freq == 0 and rank == 0 and logger is not None: |
logger.info( |
'Test: [{0}/{1}]\t' |
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' |
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' |
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( |
i, |
len(val_loader), |
batch_time=batch_time, |
loss=losses, |
top1=top1)) |
if rank == 0: |
logger.info(' * Prec@1 {top1.avg:.3f}'.format(top1=top1)) |
return top1.avg, losses.avg |
if __name__ == '__main__': |
main() |
# <FILESEP> |
"""GPT-like model in Mesh-Tensorflow""" |
from functools import partial |
import mesh_tensorflow as mtf |
import tensorflow.compat.v1 as tf |
from tensorflow.python.tpu import tpu_config, tpu_estimator |
from tensorflow_estimator.python.estimator import estimator as estimator_lib |
from utils import save_config, expand_attention_types_params, yes_or_no, remove_gs_or_filepath, setup_logging, \ |
check_dataset |
from inputs import sequential_input, pred_input, handle_pred_output, mlm_sample_text, generic_text |
from export import export_model |
from model_fns import model_fn |
from data.encoders import fetch_encoder |
from configs import fetch_model_params |
from tasks import task_descriptors |
import argparse |
import json |
import numpy |
def parse_args(): |
# Parse command line arguments |
parser = argparse.ArgumentParser() |
parser.add_argument("--tpu", type=str, help="Name of TPU to train on, if any.") |
parser.add_argument("--gpu_ids", nargs="+", type=str, default=["device:GPU:0"], |
help="If training on GPU, can specify your GPU names in a list - i.e 'device:GPU:0 device:GPU:1'") |
parser.add_argument("--model", type=str, default=None, help="JSON file that contains model parameters.") |
parser.add_argument("--steps_per_checkpoint", type=int, default=5000, help="Save a model checkpoint every X steps.") |
parser.add_argument("--auto_layout", action="store_true", help="If set, generates and prints the most memory " |
"efficient layout according to MTF auto layout.") |
parser.add_argument("--auto_layout_and_mesh_shape", action="store_true", |
help="If set, generates and prints the most memory efficient layout and mesh shape according to" |
" MTF auto layout.") |
parser.add_argument("--new", action="store_true", help="If set, deletes previous checkpoint, if it exists, and " |
"starts a new training run") |
parser.add_argument("--predict", action="store_true", help="If set, uses the model to predict rather than train.") |
parser.add_argument("--eval", action="store_true", help="If set, run model in evaluation mode.") |
parser.add_argument("--prompt", type=str, help="path to .txt file containing a prompt for prediction. If empty, " |
"defaults to unicorns.", |
default="") |
parser.add_argument("--check_dataset", action="store_true", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.