| { |
| "best_global_step": 2476, |
| "best_metric": 0.03474666550755501, |
| "best_model_checkpoint": "./multilingual-e5-large-instruct-tuned/checkpoint-2476", |
| "epoch": 1.0, |
| "eval_steps": 2476, |
| "global_step": 49503, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.050017170676524655, |
| "grad_norm": 2.024930715560913, |
| "learning_rate": 1.4826e-05, |
| "loss": 0.1209, |
| "step": 2476 |
| }, |
| { |
| "epoch": 0.050017170676524655, |
| "eval_loss": 0.03474666550755501, |
| "eval_runtime": 4.009, |
| "eval_samples_per_second": 249.438, |
| "eval_steps_per_second": 20.953, |
| "step": 2476 |
| }, |
| { |
| "epoch": 0.10003434135304931, |
| "grad_norm": 20.096084594726562, |
| "learning_rate": 2.9681999999999998e-05, |
| "loss": 0.0737, |
| "step": 4952 |
| }, |
| { |
| "epoch": 0.10003434135304931, |
| "eval_loss": 0.04586857557296753, |
| "eval_runtime": 3.9771, |
| "eval_samples_per_second": 251.442, |
| "eval_steps_per_second": 21.121, |
| "step": 4952 |
| }, |
| { |
| "epoch": 0.15005151202957395, |
| "grad_norm": 28.073986053466797, |
| "learning_rate": 2.836797519268364e-05, |
| "loss": 0.087, |
| "step": 7428 |
| }, |
| { |
| "epoch": 0.15005151202957395, |
| "eval_loss": 0.07319504022598267, |
| "eval_runtime": 4.0741, |
| "eval_samples_per_second": 245.454, |
| "eval_steps_per_second": 20.618, |
| "step": 7428 |
| }, |
| { |
| "epoch": 0.20006868270609862, |
| "grad_norm": 17.107769012451172, |
| "learning_rate": 2.6700222456913017e-05, |
| "loss": 0.0825, |
| "step": 9904 |
| }, |
| { |
| "epoch": 0.20006868270609862, |
| "eval_loss": 0.12089800834655762, |
| "eval_runtime": 3.9723, |
| "eval_samples_per_second": 251.744, |
| "eval_steps_per_second": 21.146, |
| "step": 9904 |
| }, |
| { |
| "epoch": 0.2500858533826233, |
| "grad_norm": 3.858909845352173, |
| "learning_rate": 2.503112149742714e-05, |
| "loss": 0.0783, |
| "step": 12380 |
| }, |
| { |
| "epoch": 0.2500858533826233, |
| "eval_loss": 0.0933559387922287, |
| "eval_runtime": 4.0558, |
| "eval_samples_per_second": 246.558, |
| "eval_steps_per_second": 20.711, |
| "step": 12380 |
| }, |
| { |
| "epoch": 0.3001030240591479, |
| "grad_norm": 14.209930419921875, |
| "learning_rate": 2.3362694649798892e-05, |
| "loss": 0.071, |
| "step": 14856 |
| }, |
| { |
| "epoch": 0.3001030240591479, |
| "eval_loss": 0.07928071916103363, |
| "eval_runtime": 4.0248, |
| "eval_samples_per_second": 248.461, |
| "eval_steps_per_second": 20.871, |
| "step": 14856 |
| }, |
| { |
| "epoch": 0.3501201947356726, |
| "grad_norm": 10.779126167297363, |
| "learning_rate": 2.1693593690313013e-05, |
| "loss": 0.0661, |
| "step": 17332 |
| }, |
| { |
| "epoch": 0.3501201947356726, |
| "eval_loss": 0.08546418696641922, |
| "eval_runtime": 4.043, |
| "eval_samples_per_second": 247.343, |
| "eval_steps_per_second": 20.777, |
| "step": 17332 |
| }, |
| { |
| "epoch": 0.40013736541219724, |
| "grad_norm": 1.4697766304016113, |
| "learning_rate": 2.002584095454239e-05, |
| "loss": 0.0652, |
| "step": 19808 |
| }, |
| { |
| "epoch": 0.40013736541219724, |
| "eval_loss": 0.09644335508346558, |
| "eval_runtime": 4.0609, |
| "eval_samples_per_second": 246.249, |
| "eval_steps_per_second": 20.685, |
| "step": 19808 |
| }, |
| { |
| "epoch": 0.4501545360887219, |
| "grad_norm": 2.2271530628204346, |
| "learning_rate": 1.8356739995056515e-05, |
| "loss": 0.063, |
| "step": 22284 |
| }, |
| { |
| "epoch": 0.4501545360887219, |
| "eval_loss": 0.08915343880653381, |
| "eval_runtime": 4.0402, |
| "eval_samples_per_second": 247.51, |
| "eval_steps_per_second": 20.791, |
| "step": 22284 |
| }, |
| { |
| "epoch": 0.5001717067652466, |
| "grad_norm": 1.1830430030822754, |
| "learning_rate": 1.668966137114352e-05, |
| "loss": 0.056, |
| "step": 24760 |
| }, |
| { |
| "epoch": 0.5001717067652466, |
| "eval_loss": 0.09230654686689377, |
| "eval_runtime": 3.9998, |
| "eval_samples_per_second": 250.015, |
| "eval_steps_per_second": 21.001, |
| "step": 24760 |
| }, |
| { |
| "epoch": 0.5501888774417713, |
| "grad_norm": 1.6715331077575684, |
| "learning_rate": 1.5020560411657642e-05, |
| "loss": 0.0509, |
| "step": 27236 |
| }, |
| { |
| "epoch": 0.5501888774417713, |
| "eval_loss": 0.10158851742744446, |
| "eval_runtime": 4.0427, |
| "eval_samples_per_second": 247.359, |
| "eval_steps_per_second": 20.778, |
| "step": 27236 |
| }, |
| { |
| "epoch": 0.6002060481182958, |
| "grad_norm": 12.792489051818848, |
| "learning_rate": 1.3351459452171763e-05, |
| "loss": 0.045, |
| "step": 29712 |
| }, |
| { |
| "epoch": 0.6002060481182958, |
| "eval_loss": 0.09177897125482559, |
| "eval_runtime": 4.0062, |
| "eval_samples_per_second": 249.616, |
| "eval_steps_per_second": 20.968, |
| "step": 29712 |
| }, |
| { |
| "epoch": 0.6502232187948205, |
| "grad_norm": 2.540958881378174, |
| "learning_rate": 1.1683032604543514e-05, |
| "loss": 0.0472, |
| "step": 32188 |
| }, |
| { |
| "epoch": 0.6502232187948205, |
| "eval_loss": 0.08962409943342209, |
| "eval_runtime": 3.9987, |
| "eval_samples_per_second": 250.082, |
| "eval_steps_per_second": 21.007, |
| "step": 32188 |
| }, |
| { |
| "epoch": 0.7002403894713451, |
| "grad_norm": 1.4448179006576538, |
| "learning_rate": 1.0015279868772891e-05, |
| "loss": 0.0396, |
| "step": 34664 |
| }, |
| { |
| "epoch": 0.7002403894713451, |
| "eval_loss": 0.09593009948730469, |
| "eval_runtime": 4.0564, |
| "eval_samples_per_second": 246.525, |
| "eval_steps_per_second": 20.708, |
| "step": 34664 |
| }, |
| { |
| "epoch": 0.7502575601478698, |
| "grad_norm": 0.11427940428256989, |
| "learning_rate": 8.346178909287014e-06, |
| "loss": 0.0371, |
| "step": 37140 |
| }, |
| { |
| "epoch": 0.7502575601478698, |
| "eval_loss": 0.08187365531921387, |
| "eval_runtime": 4.0715, |
| "eval_samples_per_second": 245.607, |
| "eval_steps_per_second": 20.631, |
| "step": 37140 |
| }, |
| { |
| "epoch": 0.8002747308243945, |
| "grad_norm": 0.4649958312511444, |
| "learning_rate": 6.677752061658764e-06, |
| "loss": 0.0341, |
| "step": 39616 |
| }, |
| { |
| "epoch": 0.8002747308243945, |
| "eval_loss": 0.08447403460741043, |
| "eval_runtime": 3.9953, |
| "eval_samples_per_second": 250.296, |
| "eval_steps_per_second": 21.025, |
| "step": 39616 |
| }, |
| { |
| "epoch": 0.8502919015009192, |
| "grad_norm": 0.39804375171661377, |
| "learning_rate": 5.009999325888142e-06, |
| "loss": 0.0344, |
| "step": 42092 |
| }, |
| { |
| "epoch": 0.8502919015009192, |
| "eval_loss": 0.07903166115283966, |
| "eval_runtime": 3.9825, |
| "eval_samples_per_second": 251.1, |
| "eval_steps_per_second": 21.092, |
| "step": 42092 |
| }, |
| { |
| "epoch": 0.9003090721774438, |
| "grad_norm": 0.6405961513519287, |
| "learning_rate": 3.341572478259893e-06, |
| "loss": 0.0288, |
| "step": 44568 |
| }, |
| { |
| "epoch": 0.9003090721774438, |
| "eval_loss": 0.08632776886224747, |
| "eval_runtime": 4.0332, |
| "eval_samples_per_second": 247.94, |
| "eval_steps_per_second": 20.827, |
| "step": 44568 |
| }, |
| { |
| "epoch": 0.9503262428539685, |
| "grad_norm": 0.09592943638563156, |
| "learning_rate": 1.6724715187740154e-06, |
| "loss": 0.03, |
| "step": 47044 |
| }, |
| { |
| "epoch": 0.9503262428539685, |
| "eval_loss": 0.07667936384677887, |
| "eval_runtime": 4.0126, |
| "eval_samples_per_second": 249.217, |
| "eval_steps_per_second": 20.934, |
| "step": 47044 |
| } |
| ], |
| "logging_steps": 2476, |
| "max_steps": 49503, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 2476, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 0.0, |
| "train_batch_size": 12, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|