afrias5's picture
Upload 13 files
a3aa16f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.736842105263158,
"eval_steps": 500,
"global_step": 81,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10526315789473684,
"grad_norm": 14.31118392944336,
"learning_rate": 2e-05,
"loss": 2.4434,
"step": 1
},
{
"epoch": 0.21052631578947367,
"grad_norm": 10.385307312011719,
"learning_rate": 4e-05,
"loss": 2.5855,
"step": 2
},
{
"epoch": 0.3157894736842105,
"grad_norm": 18.257061004638672,
"learning_rate": 6e-05,
"loss": 2.5327,
"step": 3
},
{
"epoch": 0.42105263157894735,
"grad_norm": 9.803607940673828,
"learning_rate": 8e-05,
"loss": 2.401,
"step": 4
},
{
"epoch": 0.5263157894736842,
"grad_norm": 10.1410493850708,
"learning_rate": 0.0001,
"loss": 2.3054,
"step": 5
},
{
"epoch": 0.631578947368421,
"grad_norm": 8.388815879821777,
"learning_rate": 0.00012,
"loss": 2.3298,
"step": 6
},
{
"epoch": 0.7368421052631579,
"grad_norm": 7.938385963439941,
"learning_rate": 0.00014,
"loss": 2.0323,
"step": 7
},
{
"epoch": 0.8421052631578947,
"grad_norm": 7.110404968261719,
"learning_rate": 0.00016,
"loss": 2.0579,
"step": 8
},
{
"epoch": 0.9473684210526315,
"grad_norm": 3.487077474594116,
"learning_rate": 0.00018,
"loss": 1.6501,
"step": 9
},
{
"epoch": 1.0789473684210527,
"grad_norm": 5.468748092651367,
"learning_rate": 0.0002,
"loss": 2.8277,
"step": 10
},
{
"epoch": 1.1842105263157894,
"grad_norm": 2.3874928951263428,
"learning_rate": 0.0001999229036240723,
"loss": 1.2231,
"step": 11
},
{
"epoch": 1.2894736842105263,
"grad_norm": 5.447896480560303,
"learning_rate": 0.0001996917333733128,
"loss": 1.5471,
"step": 12
},
{
"epoch": 1.3947368421052633,
"grad_norm": 3.8631601333618164,
"learning_rate": 0.00019930684569549264,
"loss": 1.1826,
"step": 13
},
{
"epoch": 1.5,
"grad_norm": 12.651008605957031,
"learning_rate": 0.00019876883405951377,
"loss": 1.4406,
"step": 14
},
{
"epoch": 1.6052631578947367,
"grad_norm": 2.3405802249908447,
"learning_rate": 0.00019807852804032305,
"loss": 1.0181,
"step": 15
},
{
"epoch": 1.7105263157894737,
"grad_norm": 6.5888671875,
"learning_rate": 0.00019723699203976766,
"loss": 1.2595,
"step": 16
},
{
"epoch": 1.8157894736842106,
"grad_norm": 4.1633758544921875,
"learning_rate": 0.00019624552364536473,
"loss": 1.0053,
"step": 17
},
{
"epoch": 1.9210526315789473,
"grad_norm": 2.3248441219329834,
"learning_rate": 0.00019510565162951537,
"loss": 0.9642,
"step": 18
},
{
"epoch": 2.0526315789473686,
"grad_norm": 5.530248641967773,
"learning_rate": 0.00019381913359224842,
"loss": 2.2521,
"step": 19
},
{
"epoch": 2.1578947368421053,
"grad_norm": 1.3485472202301025,
"learning_rate": 0.0001923879532511287,
"loss": 0.7636,
"step": 20
},
{
"epoch": 2.263157894736842,
"grad_norm": 2.785285472869873,
"learning_rate": 0.00019081431738250814,
"loss": 1.0499,
"step": 21
},
{
"epoch": 2.3684210526315788,
"grad_norm": 2.2571582794189453,
"learning_rate": 0.0001891006524188368,
"loss": 0.8719,
"step": 22
},
{
"epoch": 2.473684210526316,
"grad_norm": 1.8401918411254883,
"learning_rate": 0.00018724960070727972,
"loss": 0.8511,
"step": 23
},
{
"epoch": 2.5789473684210527,
"grad_norm": 2.386539936065674,
"learning_rate": 0.00018526401643540922,
"loss": 0.8703,
"step": 24
},
{
"epoch": 2.6842105263157894,
"grad_norm": 1.384438157081604,
"learning_rate": 0.00018314696123025454,
"loss": 0.6987,
"step": 25
},
{
"epoch": 2.7894736842105265,
"grad_norm": 1.705210566520691,
"learning_rate": 0.00018090169943749476,
"loss": 0.8401,
"step": 26
},
{
"epoch": 2.8947368421052633,
"grad_norm": 1.8555241823196411,
"learning_rate": 0.00017853169308807448,
"loss": 1.023,
"step": 27
},
{
"epoch": 3.026315789473684,
"grad_norm": 3.781007766723633,
"learning_rate": 0.0001760405965600031,
"loss": 1.4192,
"step": 28
},
{
"epoch": 3.1315789473684212,
"grad_norm": 1.589735507965088,
"learning_rate": 0.00017343225094356855,
"loss": 0.7493,
"step": 29
},
{
"epoch": 3.236842105263158,
"grad_norm": 2.163848638534546,
"learning_rate": 0.00017071067811865476,
"loss": 1.0892,
"step": 30
},
{
"epoch": 3.3421052631578947,
"grad_norm": 1.8226677179336548,
"learning_rate": 0.0001678800745532942,
"loss": 0.7154,
"step": 31
},
{
"epoch": 3.4473684210526314,
"grad_norm": 1.5850944519042969,
"learning_rate": 0.00016494480483301836,
"loss": 0.8619,
"step": 32
},
{
"epoch": 3.5526315789473686,
"grad_norm": 1.5230872631072998,
"learning_rate": 0.00016190939493098344,
"loss": 0.6566,
"step": 33
},
{
"epoch": 3.6578947368421053,
"grad_norm": 1.870915174484253,
"learning_rate": 0.00015877852522924732,
"loss": 0.8506,
"step": 34
},
{
"epoch": 3.763157894736842,
"grad_norm": 1.698755145072937,
"learning_rate": 0.00015555702330196023,
"loss": 0.762,
"step": 35
},
{
"epoch": 3.8684210526315788,
"grad_norm": 1.6367840766906738,
"learning_rate": 0.0001522498564715949,
"loss": 0.7136,
"step": 36
},
{
"epoch": 3.973684210526316,
"grad_norm": 2.9744303226470947,
"learning_rate": 0.00014886212414969553,
"loss": 0.9568,
"step": 37
},
{
"epoch": 4.105263157894737,
"grad_norm": 1.3073623180389404,
"learning_rate": 0.00014539904997395468,
"loss": 0.6397,
"step": 38
},
{
"epoch": 4.2105263157894735,
"grad_norm": 1.2846301794052124,
"learning_rate": 0.0001418659737537428,
"loss": 0.4418,
"step": 39
},
{
"epoch": 4.315789473684211,
"grad_norm": 1.6083595752716064,
"learning_rate": 0.000138268343236509,
"loss": 0.7064,
"step": 40
},
{
"epoch": 4.421052631578947,
"grad_norm": 1.2033262252807617,
"learning_rate": 0.0001346117057077493,
"loss": 0.5957,
"step": 41
},
{
"epoch": 4.526315789473684,
"grad_norm": 1.3466635942459106,
"learning_rate": 0.00013090169943749476,
"loss": 0.7566,
"step": 42
},
{
"epoch": 4.631578947368421,
"grad_norm": 1.616913914680481,
"learning_rate": 0.00012714404498650743,
"loss": 0.7539,
"step": 43
},
{
"epoch": 4.7368421052631575,
"grad_norm": 1.4130221605300903,
"learning_rate": 0.00012334453638559057,
"loss": 0.4622,
"step": 44
},
{
"epoch": 4.842105263157895,
"grad_norm": 1.3309547901153564,
"learning_rate": 0.00011950903220161285,
"loss": 0.7263,
"step": 45
},
{
"epoch": 4.947368421052632,
"grad_norm": 1.354387879371643,
"learning_rate": 0.0001156434465040231,
"loss": 0.5625,
"step": 46
},
{
"epoch": 5.078947368421052,
"grad_norm": 3.008500337600708,
"learning_rate": 0.00011175373974578378,
"loss": 0.7619,
"step": 47
},
{
"epoch": 5.184210526315789,
"grad_norm": 1.5682655572891235,
"learning_rate": 0.0001078459095727845,
"loss": 0.3858,
"step": 48
},
{
"epoch": 5.2894736842105265,
"grad_norm": 1.8495001792907715,
"learning_rate": 0.00010392598157590688,
"loss": 0.4573,
"step": 49
},
{
"epoch": 5.394736842105263,
"grad_norm": 2.635056972503662,
"learning_rate": 0.0001,
"loss": 0.7921,
"step": 50
},
{
"epoch": 5.5,
"grad_norm": 1.5783675909042358,
"learning_rate": 9.607401842409317e-05,
"loss": 0.3204,
"step": 51
},
{
"epoch": 5.605263157894737,
"grad_norm": 2.0661301612854004,
"learning_rate": 9.215409042721552e-05,
"loss": 0.6708,
"step": 52
},
{
"epoch": 5.7105263157894735,
"grad_norm": 1.4969799518585205,
"learning_rate": 8.824626025421626e-05,
"loss": 0.4052,
"step": 53
},
{
"epoch": 5.815789473684211,
"grad_norm": 1.6124238967895508,
"learning_rate": 8.435655349597689e-05,
"loss": 0.4829,
"step": 54
},
{
"epoch": 5.921052631578947,
"grad_norm": 1.431242823600769,
"learning_rate": 8.049096779838719e-05,
"loss": 0.4273,
"step": 55
},
{
"epoch": 6.052631578947368,
"grad_norm": 3.2095937728881836,
"learning_rate": 7.66554636144095e-05,
"loss": 0.8437,
"step": 56
},
{
"epoch": 6.157894736842105,
"grad_norm": 0.8892129063606262,
"learning_rate": 7.285595501349258e-05,
"loss": 0.2127,
"step": 57
},
{
"epoch": 6.2631578947368425,
"grad_norm": 1.6543368101119995,
"learning_rate": 6.909830056250527e-05,
"loss": 0.4305,
"step": 58
},
{
"epoch": 6.368421052631579,
"grad_norm": 1.4523223638534546,
"learning_rate": 6.538829429225069e-05,
"loss": 0.3762,
"step": 59
},
{
"epoch": 6.473684210526316,
"grad_norm": 0.9503556489944458,
"learning_rate": 6.173165676349103e-05,
"loss": 0.3628,
"step": 60
},
{
"epoch": 6.578947368421053,
"grad_norm": 1.4268410205841064,
"learning_rate": 5.8134026246257225e-05,
"loss": 0.4751,
"step": 61
},
{
"epoch": 6.684210526315789,
"grad_norm": 0.9671533703804016,
"learning_rate": 5.4600950026045326e-05,
"loss": 0.2934,
"step": 62
},
{
"epoch": 6.7894736842105265,
"grad_norm": 1.1906907558441162,
"learning_rate": 5.113787585030454e-05,
"loss": 0.2345,
"step": 63
},
{
"epoch": 6.894736842105263,
"grad_norm": 1.5449588298797607,
"learning_rate": 4.7750143528405126e-05,
"loss": 0.4283,
"step": 64
},
{
"epoch": 7.026315789473684,
"grad_norm": 2.223735809326172,
"learning_rate": 4.444297669803981e-05,
"loss": 0.4928,
"step": 65
},
{
"epoch": 7.131578947368421,
"grad_norm": 0.9985809326171875,
"learning_rate": 4.12214747707527e-05,
"loss": 0.3185,
"step": 66
},
{
"epoch": 7.2368421052631575,
"grad_norm": 1.0797033309936523,
"learning_rate": 3.8090605069016595e-05,
"loss": 0.4603,
"step": 67
},
{
"epoch": 7.342105263157895,
"grad_norm": 1.0436688661575317,
"learning_rate": 3.5055195166981645e-05,
"loss": 0.4009,
"step": 68
},
{
"epoch": 7.447368421052632,
"grad_norm": 1.1368814706802368,
"learning_rate": 3.211992544670582e-05,
"loss": 0.2486,
"step": 69
},
{
"epoch": 7.552631578947368,
"grad_norm": 1.0792099237442017,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.3308,
"step": 70
},
{
"epoch": 7.657894736842105,
"grad_norm": 0.9220963716506958,
"learning_rate": 2.6567749056431467e-05,
"loss": 0.3044,
"step": 71
},
{
"epoch": 7.7631578947368425,
"grad_norm": 1.1355466842651367,
"learning_rate": 2.3959403439996907e-05,
"loss": 0.2657,
"step": 72
},
{
"epoch": 7.868421052631579,
"grad_norm": 0.9300779104232788,
"learning_rate": 2.146830691192553e-05,
"loss": 0.2587,
"step": 73
},
{
"epoch": 7.973684210526316,
"grad_norm": 2.0543935298919678,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.5869,
"step": 74
},
{
"epoch": 8.105263157894736,
"grad_norm": 1.0328178405761719,
"learning_rate": 1.6853038769745467e-05,
"loss": 0.3424,
"step": 75
},
{
"epoch": 8.210526315789474,
"grad_norm": 1.0409759283065796,
"learning_rate": 1.4735983564590783e-05,
"loss": 0.3577,
"step": 76
},
{
"epoch": 8.31578947368421,
"grad_norm": 1.2581232786178589,
"learning_rate": 1.2750399292720283e-05,
"loss": 0.354,
"step": 77
},
{
"epoch": 8.421052631578947,
"grad_norm": 1.073712706565857,
"learning_rate": 1.0899347581163221e-05,
"loss": 0.3332,
"step": 78
},
{
"epoch": 8.526315789473685,
"grad_norm": 1.0400298833847046,
"learning_rate": 9.185682617491863e-06,
"loss": 0.3112,
"step": 79
},
{
"epoch": 8.631578947368421,
"grad_norm": 1.7903478145599365,
"learning_rate": 7.612046748871327e-06,
"loss": 0.4138,
"step": 80
},
{
"epoch": 8.736842105263158,
"grad_norm": 1.1479917764663696,
"learning_rate": 6.180866407751595e-06,
"loss": 0.2799,
"step": 81
}
],
"logging_steps": 1,
"max_steps": 90,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0747062240097075e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}