text
stringlengths
1
93.6k
save_summary_steps=params["iterations"],
tpu_config=tpu_config.TPUConfig(
num_shards=mesh_shape.size,
iterations_per_loop=params["iterations"],
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST))
estimator = tpu_estimator.TPUEstimator(
use_tpu=params["use_tpu"],
model_fn=model_fn,
config=config,
train_batch_size=params["train_batch_size"],
eval_batch_size=params["train_batch_size"],
predict_batch_size=params["predict_batch_size"],
params=params)
def _make_task_estimator(task):
task_params = params.copy()
task_params["eval_task"] = task
return tpu_estimator.TPUEstimator(
use_tpu=params["use_tpu"],
model_fn=model_fn,
config=config,
train_batch_size=params["train_batch_size"],
eval_batch_size=params["eval_batch_size"],
predict_batch_size=params["predict_batch_size"],
params=task_params)
eval_task_estimators = {
task: _make_task_estimator(task)
for task in eval_tasks
}
if args.export:
export_model(estimator, "export", params)
return
if args.predict:
# Predict
predictions = estimator.predict(input_fn=pred_input_fn)
logger.info("Predictions generated")
enc = fetch_encoder(params)
handle_pred_output_fn(predictions, logger, enc, params, out_name=f"predictions_{args.sacred_id}_{current_step}")
return
def save_eval_results(task, eval_results):
def as_python(x):
if isinstance(x, numpy.generic):
return x.item()
return x
eval_results = {k: as_python(v) for k, v in eval_results.items()}
with open(f'eval_{args.sacred_id}.jsonl', 'a') as fh:
json.dump({'task': task, 'current_step': current_step, **eval_results}, fh)
fh.write('\n')
def run_eval():
logger.info("Running evaluation...")
eval_results = estimator.evaluate(
input_fn=partial(input_fn, eval=True),
steps=params["eval_steps"])
logger.info(f"Eval results: {eval_results}")
save_eval_results('validation', eval_results)
def run_eval_tasks():
for task in eval_tasks:
logger.info(f"Starting evaluation task '{task}'")
task_info = task_descriptors[task]["get_task_info_fn"](params)
task_estimator = eval_task_estimators[task]
task_input_fn = task_descriptors[task]["input_fn"]
eval_results = task_estimator.evaluate(
input_fn=task_input_fn,
steps=task_info["n_steps"],
name=task)
logger.info(f"Eval task '{task}' results: {eval_results}")
save_eval_results(task, eval_results)
if args.eval:
run_eval_tasks()
if params["eval_steps"] > 0:
run_eval()
return
elif has_predict_or_eval_steps_or_eval_tasks:
# Eval and train - stop and predict and/or eval every checkpoint
while current_step < params["train_steps"]:
next_checkpoint = min(current_step + args.steps_per_checkpoint,
params["train_steps"])
estimator.train(input_fn=partial(input_fn, global_step=current_step, eval=False), max_steps=next_checkpoint)
current_step = next_checkpoint
if params["predict_steps"] > 0:
logger.info("Running prediction...")
predictions = estimator.predict(input_fn=pred_input_fn)
enc = fetch_encoder(params)
handle_pred_output_fn(predictions, logger, enc, params, out_name=f"predictions_{args.sacred_id}_{current_step}")
if params["eval_steps"] > 0:
run_eval()