text stringlengths 0 93.6k |
|---|
help="If set, outputs sample from the dataset and quits.") |
parser.add_argument("--sacred_id", type=str, default="nosacred", help="Sacred run id.") |
parser.add_argument("--entmax_sampling", action="store_true", help="(experimental) use entmax sampling") |
parser.add_argument("--export", action="store_true", help="If set, will export the model.") |
args = parser.parse_args() |
assert args.model is not None, "Model must be set" |
return args |
def main(args): |
# Setup logging |
logger = setup_logging(args) |
# Read params of model |
params = fetch_model_params(args.model) |
# Fetch appropriate input functions |
input_fn = params.get("input_fn", "sequential_input") |
if input_fn == "sequential_input": |
input_fn = sequential_input |
elif input_fn == "generic_text": |
input_fn = generic_text |
pred_input_fn = pred_input |
handle_pred_output_fn = handle_pred_output |
# get current step |
current_step = int(estimator_lib._load_global_step_from_checkpoint_dir(params["model_path"])) |
logger.info(f"Current step {current_step}") |
if params["mlm_training"]: |
mlm_sample_text_fn = partial(mlm_sample_text, params) |
input_fn = partial(generic_text, sample_text_fn=mlm_sample_text_fn) |
if args.check_dataset: |
check_dataset(input_fn, params) |
# Fetch encoder per params |
encoder = fetch_encoder(params) |
pred_input_fn = partial(pred_input_fn, path_to_prompt=args.prompt, logger=logger, enc=encoder) |
# Sample from Dataset if check dataset flag is on |
if args.check_dataset: |
check_dataset(input_fn, params, global_step=current_step) |
# Confirm deletion of checkpoint files if --new flag is set |
if args.new: |
if yes_or_no(f"Are you sure you want to remove '{params['model_path']}' to start afresh?"): |
remove_gs_or_filepath(params["model_path"]) |
else: |
exit() |
# Save config to logdir for experiment management |
save_config(params, params["model_path"]) |
# Add to params: auto_layout, auto_layout_and_mesh_shape, use_tpu, num_cores |
mesh_shape = mtf.convert_to_shape(params["mesh_shape"]) |
params["num_cores"] = mesh_shape.size |
params["auto_layout"] = args.auto_layout |
params["auto_layout_and_mesh_shape"] = args.auto_layout_and_mesh_shape |
params["use_tpu"] = True if not args.tpu is None else False |
params["gpu_ids"] = args.gpu_ids |
params["steps_per_checkpoint"] = args.steps_per_checkpoint |
# Expand attention types param |
params["attention_types"] = expand_attention_types_params(params["attention_types"]) |
assert len(params["attention_types"]) == params["n_layer"] # Assert that the length of expanded list = num layers |
params["predict_batch_size"] = params.get("predict_batch_size", 1) # Default to 1 |
params["predict"] = args.predict |
params['model'] = params.get("model", "GPT") # Default model selection to GPT since it's the only option for now |
params["export"] = args.export |
# Set sampling parameters |
params["sampling_use_entmax"] = args.entmax_sampling |
# Sample quality of MoE models suffers when using the faster sampling method, so default to slow_sampling if |
# moe layers are present |
params["slow_sampling"] = True if params["moe_layers"] is not None else False |
logger.info(f"params = {params}") |
# Get eval tasks from params |
eval_tasks = params.get("eval_tasks", []) |
has_predict_or_eval_steps_or_eval_tasks = params["predict_steps"] > 0 or params["eval_steps"] > 0 or len( |
eval_tasks) > 0 |
for t in eval_tasks: |
assert t in task_descriptors, f"Eval task '{t}' is not known" |
task_descriptors[t]["init_fn"](params) |
# Set up TPUs and Estimator |
if args.tpu == "colab": |
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver() if params["use_tpu"] else None |
else: |
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(args.tpu) if params["use_tpu"] else None |
config = tpu_config.RunConfig( |
cluster=tpu_cluster_resolver, |
model_dir=params["model_path"], |
save_checkpoints_steps=None, # Disable the default saver |
save_checkpoints_secs=None, # Disable the default saver |
log_step_count_steps=params["iterations"], |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.