| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.530250481695568, | |
| "eval_steps": 500, | |
| "global_step": 16600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.6790625, | |
| "epoch": 0.10276172125883108, | |
| "grad_norm": 5.0809830592989, | |
| "kl": 2.854698256850243, | |
| "learning_rate": 9.897225077081192e-07, | |
| "loss": 0.1145, | |
| "reward": 0.7617031238693744, | |
| "reward_std": 0.08005153443926247, | |
| "rewards/iqa_reward_score": 0.7617031238693744, | |
| "step": 200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.8296875, | |
| "epoch": 0.20552344251766216, | |
| "grad_norm": 9.623104019454459, | |
| "kl": 0.835672607421875, | |
| "learning_rate": 9.794450154162384e-07, | |
| "loss": 0.0334, | |
| "reward": 0.8317257825657726, | |
| "reward_std": 0.05232097678628634, | |
| "rewards/iqa_reward_score": 0.8317257825657726, | |
| "step": 400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.5725, | |
| "epoch": 0.25690430314707774, | |
| "grad_norm": 13.777697334252704, | |
| "kl": 1.6955078125, | |
| "learning_rate": 9.74306269270298e-07, | |
| "loss": 0.0678, | |
| "reward": 0.8289125004410743, | |
| "reward_std": 0.054873663182370365, | |
| "rewards/iqa_reward_score": 0.8289125004410743, | |
| "step": 500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 25.366875, | |
| "epoch": 0.30828516377649323, | |
| "grad_norm": 4.847558824602628, | |
| "kl": 0.994892578125, | |
| "learning_rate": 9.691675231243576e-07, | |
| "loss": 0.0398, | |
| "reward": 0.8202585917711258, | |
| "reward_std": 0.07689082582248374, | |
| "rewards/iqa_reward_score": 0.8202585917711258, | |
| "step": 600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.56625, | |
| "epoch": 0.4110468850353243, | |
| "grad_norm": 4.395243940209246, | |
| "kl": 0.655250244140625, | |
| "learning_rate": 9.58890030832477e-07, | |
| "loss": 0.0262, | |
| "reward": 0.8299445299804211, | |
| "reward_std": 0.06683978972781915, | |
| "rewards/iqa_reward_score": 0.8299445299804211, | |
| "step": 800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.8171875, | |
| "epoch": 0.5138086062941555, | |
| "grad_norm": 16.05160688969383, | |
| "kl": 0.8154248046875, | |
| "learning_rate": 9.486125385405961e-07, | |
| "loss": 0.0326, | |
| "reward": 0.8257433583214879, | |
| "reward_std": 0.06838053988220054, | |
| "rewards/iqa_reward_score": 0.8257433583214879, | |
| "step": 1000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.175, | |
| "epoch": 0.6165703275529865, | |
| "grad_norm": 27.21633422585466, | |
| "kl": 0.713743896484375, | |
| "learning_rate": 9.383350462487153e-07, | |
| "loss": 0.0286, | |
| "reward": 0.8305933582037688, | |
| "reward_std": 0.06543343772966181, | |
| "rewards/iqa_reward_score": 0.8305933582037688, | |
| "step": 1200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.2303125, | |
| "epoch": 0.7193320488118176, | |
| "grad_norm": 3.4545070827612996, | |
| "kl": 0.751297607421875, | |
| "learning_rate": 9.280575539568345e-07, | |
| "loss": 0.03, | |
| "reward": 0.8306282886117696, | |
| "reward_std": 0.06594495058219764, | |
| "rewards/iqa_reward_score": 0.8306282886117696, | |
| "step": 1400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.125625, | |
| "epoch": 0.7707129094412332, | |
| "grad_norm": 7.5528485456603915, | |
| "kl": 0.73730712890625, | |
| "learning_rate": 9.229188078108942e-07, | |
| "loss": 0.0295, | |
| "reward": 0.8229234362393618, | |
| "reward_std": 0.055542698715289586, | |
| "rewards/iqa_reward_score": 0.8229234362393618, | |
| "step": 1500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.258125, | |
| "epoch": 0.8220937700706487, | |
| "grad_norm": 23.45674070812895, | |
| "kl": 0.69072021484375, | |
| "learning_rate": 9.177800616649537e-07, | |
| "loss": 0.0276, | |
| "reward": 0.832437502220273, | |
| "reward_std": 0.06442531289503677, | |
| "rewards/iqa_reward_score": 0.832437502220273, | |
| "step": 1600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.23, | |
| "epoch": 0.9248554913294798, | |
| "grad_norm": 12.52502343446259, | |
| "kl": 0.839024658203125, | |
| "learning_rate": 9.075025693730729e-07, | |
| "loss": 0.0336, | |
| "reward": 0.8224937494471669, | |
| "reward_std": 0.08065707363217371, | |
| "rewards/iqa_reward_score": 0.8224937494471669, | |
| "step": 1800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.955625, | |
| "epoch": 1.0277456647398844, | |
| "grad_norm": 5.6579978426164885, | |
| "kl": 0.96297119140625, | |
| "learning_rate": 8.972250770811921e-07, | |
| "loss": 0.0385, | |
| "reward": 0.832288283109665, | |
| "reward_std": 0.06895677681895904, | |
| "rewards/iqa_reward_score": 0.832288283109665, | |
| "step": 2000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.1115625, | |
| "epoch": 1.1305073859987154, | |
| "grad_norm": 12.39659796004838, | |
| "kl": 0.695694580078125, | |
| "learning_rate": 8.869475847893114e-07, | |
| "loss": 0.0278, | |
| "reward": 0.8560353901609778, | |
| "reward_std": 0.06492814173863735, | |
| "rewards/iqa_reward_score": 0.8560353901609778, | |
| "step": 2200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.026875, | |
| "epoch": 1.2332691072575466, | |
| "grad_norm": 7.918243884203464, | |
| "kl": 0.668778076171875, | |
| "learning_rate": 8.766700924974306e-07, | |
| "loss": 0.0268, | |
| "reward": 0.8575455713272094, | |
| "reward_std": 0.05481337786943186, | |
| "rewards/iqa_reward_score": 0.8575455713272094, | |
| "step": 2400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.2875, | |
| "epoch": 1.284649967886962, | |
| "grad_norm": 8.951939513541785, | |
| "kl": 0.63311767578125, | |
| "learning_rate": 8.715313463514901e-07, | |
| "loss": 0.0253, | |
| "reward": 0.8540877342224121, | |
| "reward_std": 0.05867271815717686, | |
| "rewards/iqa_reward_score": 0.8540877342224121, | |
| "step": 2500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.24, | |
| "epoch": 1.3360308285163778, | |
| "grad_norm": 61.12484483215961, | |
| "kl": 0.57617919921875, | |
| "learning_rate": 8.663926002055498e-07, | |
| "loss": 0.023, | |
| "reward": 0.8668073464185, | |
| "reward_std": 0.04293165822920855, | |
| "rewards/iqa_reward_score": 0.8668073464185, | |
| "step": 2600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.4415625, | |
| "epoch": 1.4387925497752088, | |
| "grad_norm": 9.595515127565994, | |
| "kl": 0.912845458984375, | |
| "learning_rate": 8.56115107913669e-07, | |
| "loss": 0.0365, | |
| "reward": 1.8409671924263238, | |
| "reward_std": 0.10609373373445123, | |
| "rewards/format_reward": 0.98, | |
| "rewards/iqa_reward_score": 0.8609671889990568, | |
| "step": 2800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.39375, | |
| "epoch": 1.5415542710340397, | |
| "grad_norm": 14.47016329543208, | |
| "kl": 0.811949462890625, | |
| "learning_rate": 8.458376156217882e-07, | |
| "loss": 0.0325, | |
| "reward": 1.8501656317710877, | |
| "reward_std": 0.08619433440471766, | |
| "rewards/format_reward": 0.9828125, | |
| "rewards/iqa_reward_score": 0.8673531262949109, | |
| "step": 3000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.2296875, | |
| "epoch": 1.644315992292871, | |
| "grad_norm": 38.07779098696265, | |
| "kl": 0.782005615234375, | |
| "learning_rate": 8.355601233299074e-07, | |
| "loss": 0.0313, | |
| "reward": 1.8367290687561035, | |
| "reward_std": 0.10177788778877585, | |
| "rewards/format_reward": 0.980625, | |
| "rewards/iqa_reward_score": 0.8561040629819036, | |
| "step": 3200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.8190625, | |
| "epoch": 1.747077713551702, | |
| "grad_norm": 7.776135628712392, | |
| "kl": 0.9329833984375, | |
| "learning_rate": 8.252826310380266e-07, | |
| "loss": 0.0373, | |
| "reward": 1.84249222189188, | |
| "reward_std": 0.09717632832071103, | |
| "rewards/format_reward": 0.98125, | |
| "rewards/iqa_reward_score": 0.8612422209605575, | |
| "step": 3400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.059375, | |
| "epoch": 1.7984585741811174, | |
| "grad_norm": 14.07929192854843, | |
| "kl": 0.868226318359375, | |
| "learning_rate": 8.201438848920863e-07, | |
| "loss": 0.0347, | |
| "reward": 1.837091253399849, | |
| "reward_std": 0.08139224100508727, | |
| "rewards/format_reward": 0.98875, | |
| "rewards/iqa_reward_score": 0.8483412505686283, | |
| "step": 3500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.99375, | |
| "epoch": 1.8498394348105331, | |
| "grad_norm": 11.735278559157656, | |
| "kl": 0.7970263671875, | |
| "learning_rate": 8.150051387461459e-07, | |
| "loss": 0.0319, | |
| "reward": 1.849564844816923, | |
| "reward_std": 0.07906727260153275, | |
| "rewards/format_reward": 0.9875, | |
| "rewards/iqa_reward_score": 0.8620648480206728, | |
| "step": 3600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.0225, | |
| "epoch": 1.9526011560693641, | |
| "grad_norm": 7.583634891952906, | |
| "kl": 0.7811505126953125, | |
| "learning_rate": 8.047276464542652e-07, | |
| "loss": 0.0312, | |
| "reward": 1.8461687500029802, | |
| "reward_std": 0.08467546943458729, | |
| "rewards/format_reward": 0.9859375, | |
| "rewards/iqa_reward_score": 0.8602312506362796, | |
| "step": 3800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.99625, | |
| "epoch": 2.0554913294797688, | |
| "grad_norm": 14.625364812925723, | |
| "kl": 0.786697998046875, | |
| "learning_rate": 7.944501541623844e-07, | |
| "loss": 0.0315, | |
| "reward": 1.8452205635607242, | |
| "reward_std": 0.09883114330208627, | |
| "rewards/format_reward": 0.980625, | |
| "rewards/iqa_reward_score": 0.8645955645292998, | |
| "step": 4000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.9753125, | |
| "epoch": 2.1582530507385997, | |
| "grad_norm": 33.54392125627147, | |
| "kl": 0.760584716796875, | |
| "learning_rate": 7.841726618705036e-07, | |
| "loss": 0.0304, | |
| "reward": 1.8701025630533694, | |
| "reward_std": 0.08290246042713989, | |
| "rewards/format_reward": 0.985, | |
| "rewards/iqa_reward_score": 0.8851025623455644, | |
| "step": 4200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.9215625, | |
| "epoch": 2.2610147719974307, | |
| "grad_norm": 47.05045527355845, | |
| "kl": 0.8736767578125, | |
| "learning_rate": 7.738951695786228e-07, | |
| "loss": 0.0349, | |
| "reward": 1.8553532859683037, | |
| "reward_std": 0.10405993272172054, | |
| "rewards/format_reward": 0.980625, | |
| "rewards/iqa_reward_score": 0.8747282821312546, | |
| "step": 4400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.350625, | |
| "epoch": 2.3123956326268464, | |
| "grad_norm": 17.410628447499427, | |
| "kl": 0.89654296875, | |
| "learning_rate": 7.687564234326824e-07, | |
| "loss": 0.0359, | |
| "reward": 1.8479799877107144, | |
| "reward_std": 0.12358368890389101, | |
| "rewards/format_reward": 0.976875, | |
| "rewards/iqa_reward_score": 0.871104982867837, | |
| "step": 4500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.386875, | |
| "epoch": 2.363776493256262, | |
| "grad_norm": 8.447575577812339, | |
| "kl": 0.8929931640625, | |
| "learning_rate": 7.63617677286742e-07, | |
| "loss": 0.0357, | |
| "reward": 1.8610911390185356, | |
| "reward_std": 0.08579985730873886, | |
| "rewards/format_reward": 0.985, | |
| "rewards/iqa_reward_score": 0.8760911403596401, | |
| "step": 4600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.0375, | |
| "epoch": 2.466538214515093, | |
| "grad_norm": 143.05962527749097, | |
| "kl": 0.93417236328125, | |
| "learning_rate": 7.533401849948612e-07, | |
| "loss": 0.0374, | |
| "reward": 1.8311420308053494, | |
| "reward_std": 0.11677662628731923, | |
| "rewards/format_reward": 0.9784375, | |
| "rewards/iqa_reward_score": 0.8527045329660177, | |
| "step": 4800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.01125, | |
| "epoch": 2.569299935773924, | |
| "grad_norm": 7.1542045396705, | |
| "kl": 0.94165771484375, | |
| "learning_rate": 7.430626927029804e-07, | |
| "loss": 0.0377, | |
| "reward": 1.8192646995186805, | |
| "reward_std": 0.14661665820371128, | |
| "rewards/format_reward": 0.9696875, | |
| "rewards/iqa_reward_score": 0.8495772035047412, | |
| "step": 5000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.18375, | |
| "epoch": 2.6720616570327556, | |
| "grad_norm": 6.8796582113899865, | |
| "kl": 0.9063818359375, | |
| "learning_rate": 7.327852004110997e-07, | |
| "loss": 0.0363, | |
| "reward": 1.8397980490326882, | |
| "reward_std": 0.1039076264774485, | |
| "rewards/format_reward": 0.97875, | |
| "rewards/iqa_reward_score": 0.8610480467230082, | |
| "step": 5200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.974375, | |
| "epoch": 2.7748233782915865, | |
| "grad_norm": 21.679690712318255, | |
| "kl": 1.140872802734375, | |
| "learning_rate": 7.225077081192189e-07, | |
| "loss": 0.0456, | |
| "reward": 1.843569800555706, | |
| "reward_std": 0.10958875504176831, | |
| "rewards/format_reward": 0.97875, | |
| "rewards/iqa_reward_score": 0.8648197993636131, | |
| "step": 5400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.874375, | |
| "epoch": 2.826204238921002, | |
| "grad_norm": 15.562815242952489, | |
| "kl": 0.83869384765625, | |
| "learning_rate": 7.173689619732784e-07, | |
| "loss": 0.0335, | |
| "reward": 1.8646468751132488, | |
| "reward_std": 0.07459007016877876, | |
| "rewards/format_reward": 0.989375, | |
| "rewards/iqa_reward_score": 0.8752718791365623, | |
| "step": 5500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.930625, | |
| "epoch": 2.8775850995504175, | |
| "grad_norm": 6.830195802437781, | |
| "kl": 0.790654296875, | |
| "learning_rate": 7.122302158273381e-07, | |
| "loss": 0.0316, | |
| "reward": 1.8246960963308811, | |
| "reward_std": 0.14376337262161543, | |
| "rewards/format_reward": 0.970625, | |
| "rewards/iqa_reward_score": 0.8540710960328579, | |
| "step": 5600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.023125, | |
| "epoch": 2.9803468208092485, | |
| "grad_norm": 14.937325884677351, | |
| "kl": 0.72754638671875, | |
| "learning_rate": 7.019527235354573e-07, | |
| "loss": 0.0291, | |
| "reward": 1.8573167976737022, | |
| "reward_std": 0.07879514415035374, | |
| "rewards/format_reward": 0.986875, | |
| "rewards/iqa_reward_score": 0.8704417966306209, | |
| "step": 5800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.8778125, | |
| "epoch": 3.083236994219653, | |
| "grad_norm": 20.657599431765522, | |
| "kl": 0.769075927734375, | |
| "learning_rate": 6.916752312435765e-07, | |
| "loss": 0.0308, | |
| "reward": 1.8757967218756675, | |
| "reward_std": 0.07059297368963598, | |
| "rewards/format_reward": 0.989375, | |
| "rewards/iqa_reward_score": 0.8864217212796212, | |
| "step": 6000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.2796875, | |
| "epoch": 3.185998715478484, | |
| "grad_norm": 78.23537527732513, | |
| "kl": 0.89517333984375, | |
| "learning_rate": 6.813977389516957e-07, | |
| "loss": 0.0358, | |
| "reward": 1.8553570313751697, | |
| "reward_std": 0.09863820670958376, | |
| "rewards/format_reward": 0.9796875, | |
| "rewards/iqa_reward_score": 0.8756695335730911, | |
| "step": 6200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.8103125, | |
| "epoch": 3.2887604367373156, | |
| "grad_norm": 17.956803144421166, | |
| "kl": 0.8780908203125, | |
| "learning_rate": 6.711202466598149e-07, | |
| "loss": 0.0351, | |
| "reward": 1.8629054698348044, | |
| "reward_std": 0.0895115670125233, | |
| "rewards/format_reward": 0.9828125, | |
| "rewards/iqa_reward_score": 0.880092968903482, | |
| "step": 6400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.025625, | |
| "epoch": 3.340141297366731, | |
| "grad_norm": 14.577257269858018, | |
| "kl": 0.918896484375, | |
| "learning_rate": 6.659815005138745e-07, | |
| "loss": 0.0368, | |
| "reward": 1.8509218773245812, | |
| "reward_std": 0.128468633540906, | |
| "rewards/format_reward": 0.97375, | |
| "rewards/iqa_reward_score": 0.8771718776226044, | |
| "step": 6500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.506875, | |
| "epoch": 3.3915221579961465, | |
| "grad_norm": 11.431877019480062, | |
| "kl": 0.9472998046875, | |
| "learning_rate": 6.608427543679342e-07, | |
| "loss": 0.0379, | |
| "reward": 1.857479686588049, | |
| "reward_std": 0.1099585933983326, | |
| "rewards/format_reward": 0.975625, | |
| "rewards/iqa_reward_score": 0.8818546874821186, | |
| "step": 6600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.7065625, | |
| "epoch": 3.4942838792549775, | |
| "grad_norm": 6.9566554996708, | |
| "kl": 0.9450537109375, | |
| "learning_rate": 6.505652620760534e-07, | |
| "loss": 0.0378, | |
| "reward": 1.870063279643655, | |
| "reward_std": 0.08640069031709573, | |
| "rewards/format_reward": 0.9828125, | |
| "rewards/iqa_reward_score": 0.8872507839277387, | |
| "step": 6800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.3125, | |
| "epoch": 3.5970456005138085, | |
| "grad_norm": 9.466798266539996, | |
| "kl": 1.00124755859375, | |
| "learning_rate": 6.402877697841727e-07, | |
| "loss": 0.0401, | |
| "reward": 1.8952510181069373, | |
| "reward_std": 0.032708497148996686, | |
| "rewards/format_reward": 1.0, | |
| "rewards/iqa_reward_score": 0.8952510149776935, | |
| "step": 7000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.36875, | |
| "epoch": 3.6998073217726395, | |
| "grad_norm": 2557.560579743778, | |
| "kl": 1.13185791015625, | |
| "learning_rate": 6.300102774922919e-07, | |
| "loss": 0.0453, | |
| "reward": 1.892985941618681, | |
| "reward_std": 0.034503267100517404, | |
| "rewards/format_reward": 0.999375, | |
| "rewards/iqa_reward_score": 0.8936109407246113, | |
| "step": 7200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.926875, | |
| "epoch": 3.802569043031471, | |
| "grad_norm": 10.085679643415842, | |
| "kl": 1.2185986328125, | |
| "learning_rate": 6.197327852004111e-07, | |
| "loss": 0.0487, | |
| "reward": 1.8877808587253093, | |
| "reward_std": 0.04178271594835678, | |
| "rewards/format_reward": 0.9965625, | |
| "rewards/iqa_reward_score": 0.891218360774219, | |
| "step": 7400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 30.4325, | |
| "epoch": 3.853949903660886, | |
| "grad_norm": 4.810497524915146, | |
| "kl": 1.437158203125, | |
| "learning_rate": 6.145940390544707e-07, | |
| "loss": 0.0575, | |
| "reward": 1.8972664028406143, | |
| "reward_std": 0.04059907855524216, | |
| "rewards/format_reward": 0.995625, | |
| "rewards/iqa_reward_score": 0.9016414076089859, | |
| "step": 7500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 29.053125, | |
| "epoch": 3.905330764290302, | |
| "grad_norm": 25.880547489302362, | |
| "kl": 1.35017578125, | |
| "learning_rate": 6.094552929085303e-07, | |
| "loss": 0.054, | |
| "reward": 1.8683781233429908, | |
| "reward_std": 0.07233420302451123, | |
| "rewards/format_reward": 0.98625, | |
| "rewards/iqa_reward_score": 0.8821281273663044, | |
| "step": 7600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.375, | |
| "epoch": 4.008220937700706, | |
| "grad_norm": 66.69984916070555, | |
| "kl": 1.363798828125, | |
| "learning_rate": 5.991778006166495e-07, | |
| "loss": 0.0545, | |
| "reward": 1.83771797016263, | |
| "reward_std": 0.13687979618785903, | |
| "rewards/format_reward": 0.97, | |
| "rewards/iqa_reward_score": 0.8677179708704352, | |
| "step": 7800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.53, | |
| "epoch": 4.1109826589595375, | |
| "grad_norm": 165.49295535810293, | |
| "kl": 1.10419921875, | |
| "learning_rate": 5.889003083247688e-07, | |
| "loss": 0.0442, | |
| "reward": 1.8460748683661223, | |
| "reward_std": 0.1447469698698842, | |
| "rewards/format_reward": 0.9675, | |
| "rewards/iqa_reward_score": 0.8785748684033752, | |
| "step": 8000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 29.9115625, | |
| "epoch": 4.213744380218369, | |
| "grad_norm": 8.447151915362078, | |
| "kl": 1.172177734375, | |
| "learning_rate": 5.78622816032888e-07, | |
| "loss": 0.0469, | |
| "reward": 1.8300597659498452, | |
| "reward_std": 0.17003738911065738, | |
| "rewards/format_reward": 0.95875, | |
| "rewards/iqa_reward_score": 0.8713097679056228, | |
| "step": 8200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 29.536875, | |
| "epoch": 4.3165061014771995, | |
| "grad_norm": 139.19766910371885, | |
| "kl": 1.204990234375, | |
| "learning_rate": 5.683453237410072e-07, | |
| "loss": 0.0482, | |
| "reward": 1.8459081883728503, | |
| "reward_std": 0.1452132201299537, | |
| "rewards/format_reward": 0.966875, | |
| "rewards/iqa_reward_score": 0.8790331880003214, | |
| "step": 8400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.16125, | |
| "epoch": 4.367886962106615, | |
| "grad_norm": 67.10745002633337, | |
| "kl": 1.1150390625, | |
| "learning_rate": 5.632065775950667e-07, | |
| "loss": 0.0446, | |
| "reward": 1.8599339020252228, | |
| "reward_std": 0.11604845726978966, | |
| "rewards/format_reward": 0.9775, | |
| "rewards/iqa_reward_score": 0.882433907315135, | |
| "step": 8500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 27.22, | |
| "epoch": 4.419267822736031, | |
| "grad_norm": 7.122275917364418, | |
| "kl": 1.02694091796875, | |
| "learning_rate": 5.580678314491264e-07, | |
| "loss": 0.0411, | |
| "reward": 1.8712499970197678, | |
| "reward_std": 0.11151634595706128, | |
| "rewards/format_reward": 0.98, | |
| "rewards/iqa_reward_score": 0.8912500011175871, | |
| "step": 8600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.8384375, | |
| "epoch": 4.5220295439948615, | |
| "grad_norm": 36.76062498057171, | |
| "kl": 1.05457763671875, | |
| "learning_rate": 5.477903391572456e-07, | |
| "loss": 0.0422, | |
| "reward": 1.8598616697639228, | |
| "reward_std": 0.1202874470622919, | |
| "rewards/format_reward": 0.9740625, | |
| "rewards/iqa_reward_score": 0.8857991739735007, | |
| "step": 8800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 29.40375, | |
| "epoch": 4.624791265253693, | |
| "grad_norm": 11.169409676243108, | |
| "kl": 1.180712890625, | |
| "learning_rate": 5.375128468653648e-07, | |
| "loss": 0.0472, | |
| "reward": 1.8395798132568597, | |
| "reward_std": 0.1512143662823655, | |
| "rewards/format_reward": 0.96625, | |
| "rewards/iqa_reward_score": 0.8733298133686185, | |
| "step": 9000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 29.4003125, | |
| "epoch": 4.727552986512524, | |
| "grad_norm": 7.381446678871813, | |
| "kl": 1.0400537109375, | |
| "learning_rate": 5.27235354573484e-07, | |
| "loss": 0.0416, | |
| "reward": 1.8565457689017058, | |
| "reward_std": 0.13243299395922803, | |
| "rewards/format_reward": 0.973125, | |
| "rewards/iqa_reward_score": 0.8834207731485367, | |
| "step": 9200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 28.5815625, | |
| "epoch": 4.830314707771355, | |
| "grad_norm": 13.559802380662985, | |
| "kl": 1.03513427734375, | |
| "learning_rate": 5.169578622816033e-07, | |
| "loss": 0.0414, | |
| "reward": 1.864812575764954, | |
| "reward_std": 0.12085459077716224, | |
| "rewards/format_reward": 0.9753125, | |
| "rewards/iqa_reward_score": 0.8895000796020031, | |
| "step": 9400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 13.53, | |
| "epoch": 4.881695568400771, | |
| "grad_norm": 42.48053012611243, | |
| "kl": 1.8257421875, | |
| "learning_rate": 5.118191161356628e-07, | |
| "loss": 0.073, | |
| "reward": 1.8785359343886376, | |
| "reward_std": 0.08481779177935095, | |
| "rewards/format_reward": 0.98875, | |
| "rewards/iqa_reward_score": 0.8897859378904104, | |
| "step": 9500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.813125, | |
| "epoch": 4.933076429030186, | |
| "grad_norm": 47.10713474426497, | |
| "kl": 2.1516796875, | |
| "learning_rate": 5.066803699897225e-07, | |
| "loss": 0.0861, | |
| "reward": 1.8353367167711259, | |
| "reward_std": 0.18400538112502546, | |
| "rewards/format_reward": 0.956875, | |
| "rewards/iqa_reward_score": 0.8784617200121283, | |
| "step": 9600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.98125, | |
| "epoch": 5.035966602440591, | |
| "grad_norm": 29.850445718834017, | |
| "kl": 2.100009765625, | |
| "learning_rate": 4.964028776978417e-07, | |
| "loss": 0.084, | |
| "reward": 1.8705265574902297, | |
| "reward_std": 0.10621097847208148, | |
| "rewards/format_reward": 0.9790625, | |
| "rewards/iqa_reward_score": 0.8914640637487173, | |
| "step": 9800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.6721875, | |
| "epoch": 5.138728323699422, | |
| "grad_norm": 24.790938831821737, | |
| "kl": 2.140517578125, | |
| "learning_rate": 4.861253854059609e-07, | |
| "loss": 0.0856, | |
| "reward": 1.8829230420291423, | |
| "reward_std": 0.10707416723642382, | |
| "rewards/format_reward": 0.9803125, | |
| "rewards/iqa_reward_score": 0.9026105474308133, | |
| "step": 10000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 19.6746875, | |
| "epoch": 5.241490044958253, | |
| "grad_norm": 102.41178221797948, | |
| "kl": 2.09017578125, | |
| "learning_rate": 4.7584789311408014e-07, | |
| "loss": 0.0836, | |
| "reward": 1.8718234330415726, | |
| "reward_std": 0.12345175031165127, | |
| "rewards/format_reward": 0.97375, | |
| "rewards/iqa_reward_score": 0.8980734394118189, | |
| "step": 10200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.186875, | |
| "epoch": 5.344251766217084, | |
| "grad_norm": 10.598097581329563, | |
| "kl": 1.971005859375, | |
| "learning_rate": 4.6557040082219935e-07, | |
| "loss": 0.0788, | |
| "reward": 1.8742554614692926, | |
| "reward_std": 0.10413653948286082, | |
| "rewards/format_reward": 0.9796875, | |
| "rewards/iqa_reward_score": 0.8945679685845971, | |
| "step": 10400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 19.570625, | |
| "epoch": 5.3956326268465, | |
| "grad_norm": 15.65196245891987, | |
| "kl": 1.966328125, | |
| "learning_rate": 4.60431654676259e-07, | |
| "loss": 0.0786, | |
| "reward": 1.873755464553833, | |
| "reward_std": 0.1017152764700586, | |
| "rewards/format_reward": 0.98125, | |
| "rewards/iqa_reward_score": 0.8925054696947337, | |
| "step": 10500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 16.39375, | |
| "epoch": 5.447013487475915, | |
| "grad_norm": 17.05777069418545, | |
| "kl": 2.1209765625, | |
| "learning_rate": 4.552929085303186e-07, | |
| "loss": 0.0848, | |
| "reward": 1.8873523423075675, | |
| "reward_std": 0.07782931060733972, | |
| "rewards/format_reward": 0.988125, | |
| "rewards/iqa_reward_score": 0.8992273437976838, | |
| "step": 10600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 19.811875, | |
| "epoch": 5.549775208734746, | |
| "grad_norm": 31.92408805805376, | |
| "kl": 2.054736328125, | |
| "learning_rate": 4.450154162384378e-07, | |
| "loss": 0.0822, | |
| "reward": 1.870059761852026, | |
| "reward_std": 0.10109247839223827, | |
| "rewards/format_reward": 0.9815625, | |
| "rewards/iqa_reward_score": 0.8884972659498453, | |
| "step": 10800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 21.2728125, | |
| "epoch": 5.652536929993578, | |
| "grad_norm": 50.24699385323065, | |
| "kl": 1.977314453125, | |
| "learning_rate": 4.34737923946557e-07, | |
| "loss": 0.0791, | |
| "reward": 1.880501558482647, | |
| "reward_std": 0.09021925082866801, | |
| "rewards/format_reward": 0.984375, | |
| "rewards/iqa_reward_score": 0.8961265632137656, | |
| "step": 11000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.17, | |
| "epoch": 5.755298651252408, | |
| "grad_norm": 16.003754754439743, | |
| "kl": 2.043388671875, | |
| "learning_rate": 4.2446043165467627e-07, | |
| "loss": 0.0817, | |
| "reward": 1.879718798995018, | |
| "reward_std": 0.09361302960853209, | |
| "rewards/format_reward": 0.9821875, | |
| "rewards/iqa_reward_score": 0.8975313076004386, | |
| "step": 11200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 19.6184375, | |
| "epoch": 5.85806037251124, | |
| "grad_norm": 42.24292206080392, | |
| "kl": 1.938994140625, | |
| "learning_rate": 4.141829393627955e-07, | |
| "loss": 0.0776, | |
| "reward": 1.8871759340167045, | |
| "reward_std": 0.08852186787073152, | |
| "rewards/format_reward": 0.9846875, | |
| "rewards/iqa_reward_score": 0.902488438487053, | |
| "step": 11400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.74625, | |
| "epoch": 5.909441233140655, | |
| "grad_norm": 147.2735619120744, | |
| "kl": 1.90677734375, | |
| "learning_rate": 4.090441932168551e-07, | |
| "loss": 0.0763, | |
| "reward": 1.9003723841905593, | |
| "reward_std": 0.06933337734662928, | |
| "rewards/format_reward": 0.98875, | |
| "rewards/iqa_reward_score": 0.9116223914176226, | |
| "step": 11500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.711875, | |
| "epoch": 5.960822093770071, | |
| "grad_norm": 69.36856845472299, | |
| "kl": 1.9169140625, | |
| "learning_rate": 4.039054470709147e-07, | |
| "loss": 0.0767, | |
| "reward": 1.8851546820998193, | |
| "reward_std": 0.09332547818601597, | |
| "rewards/format_reward": 0.983125, | |
| "rewards/iqa_reward_score": 0.9020296874642372, | |
| "step": 11600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.3178125, | |
| "epoch": 6.063712267180475, | |
| "grad_norm": 20.48968236042837, | |
| "kl": 1.988623046875, | |
| "learning_rate": 3.936279547790339e-07, | |
| "loss": 0.0796, | |
| "reward": 1.8889730402827263, | |
| "reward_std": 0.08705994199059205, | |
| "rewards/format_reward": 0.9859375, | |
| "rewards/iqa_reward_score": 0.9030355482548476, | |
| "step": 11800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.341875, | |
| "epoch": 6.166473988439306, | |
| "grad_norm": 17.947997271086912, | |
| "kl": 1.970908203125, | |
| "learning_rate": 3.8335046248715314e-07, | |
| "loss": 0.0788, | |
| "reward": 1.8987746007740498, | |
| "reward_std": 0.07162293007655535, | |
| "rewards/format_reward": 0.9884375, | |
| "rewards/iqa_reward_score": 0.9103371092304587, | |
| "step": 12000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.67625, | |
| "epoch": 6.269235709698138, | |
| "grad_norm": 222.44736959018252, | |
| "kl": 1.915966796875, | |
| "learning_rate": 3.7307297019527235e-07, | |
| "loss": 0.0766, | |
| "reward": 1.8926163981109858, | |
| "reward_std": 0.07730939569650218, | |
| "rewards/format_reward": 0.9865625, | |
| "rewards/iqa_reward_score": 0.9060539072006941, | |
| "step": 12200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.5784375, | |
| "epoch": 6.371997430956968, | |
| "grad_norm": 13.356785918610642, | |
| "kl": 1.961318359375, | |
| "learning_rate": 3.6279547790339155e-07, | |
| "loss": 0.0784, | |
| "reward": 1.8869195252656936, | |
| "reward_std": 0.09981291456278996, | |
| "rewards/format_reward": 0.979375, | |
| "rewards/iqa_reward_score": 0.9075445308163762, | |
| "step": 12400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 22.4525, | |
| "epoch": 6.423378291586384, | |
| "grad_norm": 30.8756155693914, | |
| "kl": 1.8610546875, | |
| "learning_rate": 3.5765673175745115e-07, | |
| "loss": 0.0744, | |
| "reward": 1.9010210880637168, | |
| "reward_std": 0.07242356748203747, | |
| "rewards/format_reward": 0.98875, | |
| "rewards/iqa_reward_score": 0.9122710923105478, | |
| "step": 12500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 24.38, | |
| "epoch": 6.4747591522158, | |
| "grad_norm": 7.829345980386679, | |
| "kl": 1.74701171875, | |
| "learning_rate": 3.5251798561151076e-07, | |
| "loss": 0.0699, | |
| "reward": 1.8803515616059303, | |
| "reward_std": 0.10600787395669613, | |
| "rewards/format_reward": 0.979375, | |
| "rewards/iqa_reward_score": 0.9009765643626452, | |
| "step": 12600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 23.6565625, | |
| "epoch": 6.577520873474631, | |
| "grad_norm": 20.29606750367699, | |
| "kl": 1.82380859375, | |
| "learning_rate": 3.4224049331963e-07, | |
| "loss": 0.0729, | |
| "reward": 1.8765503884851933, | |
| "reward_std": 0.10835037114477018, | |
| "rewards/format_reward": 0.9771875, | |
| "rewards/iqa_reward_score": 0.8993628909811378, | |
| "step": 12800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 21.75625, | |
| "epoch": 6.680282594733462, | |
| "grad_norm": 13.068159937640354, | |
| "kl": 1.868056640625, | |
| "learning_rate": 3.319630010277492e-07, | |
| "loss": 0.0747, | |
| "reward": 1.8851296819746495, | |
| "reward_std": 0.09881990624518949, | |
| "rewards/format_reward": 0.9815625, | |
| "rewards/iqa_reward_score": 0.9035671878978611, | |
| "step": 13000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.6534375, | |
| "epoch": 6.783044315992293, | |
| "grad_norm": 43.271540826002, | |
| "kl": 1.896494140625, | |
| "learning_rate": 3.216855087358684e-07, | |
| "loss": 0.0759, | |
| "reward": 1.8839531208574771, | |
| "reward_std": 0.10083580034945044, | |
| "rewards/format_reward": 0.980625, | |
| "rewards/iqa_reward_score": 0.9033281239122153, | |
| "step": 13200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.59375, | |
| "epoch": 6.885806037251124, | |
| "grad_norm": 51.02501405917888, | |
| "kl": 1.973603515625, | |
| "learning_rate": 3.114080164439877e-07, | |
| "loss": 0.0789, | |
| "reward": 1.8915468671917914, | |
| "reward_std": 0.08674899358520634, | |
| "rewards/format_reward": 0.9840625, | |
| "rewards/iqa_reward_score": 0.9074843747541308, | |
| "step": 13400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.875, | |
| "epoch": 6.937186897880539, | |
| "grad_norm": 101.7576095129992, | |
| "kl": 1.999921875, | |
| "learning_rate": 3.062692702980473e-07, | |
| "loss": 0.08, | |
| "reward": 1.8977671816945076, | |
| "reward_std": 0.07746992219588719, | |
| "rewards/format_reward": 0.9875, | |
| "rewards/iqa_reward_score": 0.9102671888470649, | |
| "step": 13500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 19.418125, | |
| "epoch": 6.988567758509955, | |
| "grad_norm": 31.809990191429367, | |
| "kl": 1.88990234375, | |
| "learning_rate": 3.011305241521069e-07, | |
| "loss": 0.0756, | |
| "reward": 1.8850296795368195, | |
| "reward_std": 0.08940189514425584, | |
| "rewards/format_reward": 0.983125, | |
| "rewards/iqa_reward_score": 0.9019046877324581, | |
| "step": 13600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 20.1825, | |
| "epoch": 7.09145793192036, | |
| "grad_norm": 13.227429266121407, | |
| "kl": 1.850634765625, | |
| "learning_rate": 2.908530318602261e-07, | |
| "loss": 0.074, | |
| "reward": 1.8925906175374985, | |
| "reward_std": 0.09375914162839763, | |
| "rewards/format_reward": 0.9828125, | |
| "rewards/iqa_reward_score": 0.9097781255841255, | |
| "step": 13800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.1321875, | |
| "epoch": 7.194219653179191, | |
| "grad_norm": 10.599748254211335, | |
| "kl": 1.89337890625, | |
| "learning_rate": 2.805755395683453e-07, | |
| "loss": 0.0757, | |
| "reward": 1.899733590334654, | |
| "reward_std": 0.08313908010721206, | |
| "rewards/format_reward": 0.984375, | |
| "rewards/iqa_reward_score": 0.9153585939109325, | |
| "step": 14000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 19.83375, | |
| "epoch": 7.296981374438022, | |
| "grad_norm": 103.30184204894014, | |
| "kl": 1.8382421875, | |
| "learning_rate": 2.7029804727646455e-07, | |
| "loss": 0.0735, | |
| "reward": 1.8822011659294366, | |
| "reward_std": 0.1061160612545791, | |
| "rewards/format_reward": 0.9790625, | |
| "rewards/iqa_reward_score": 0.9031386712566019, | |
| "step": 14200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.7328125, | |
| "epoch": 7.399743095696853, | |
| "grad_norm": 21.624795896150882, | |
| "kl": 1.8940625, | |
| "learning_rate": 2.6002055498458376e-07, | |
| "loss": 0.0758, | |
| "reward": 1.8954588214308024, | |
| "reward_std": 0.08989560850925045, | |
| "rewards/format_reward": 0.985, | |
| "rewards/iqa_reward_score": 0.9104588272795081, | |
| "step": 14400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.505625, | |
| "epoch": 7.451123956326269, | |
| "grad_norm": 35.276748712299856, | |
| "kl": 1.89419921875, | |
| "learning_rate": 2.5488180883864336e-07, | |
| "loss": 0.0758, | |
| "reward": 1.884902337193489, | |
| "reward_std": 0.10493175498064375, | |
| "rewards/format_reward": 0.979375, | |
| "rewards/iqa_reward_score": 0.905527343377471, | |
| "step": 14500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 16.82625, | |
| "epoch": 7.502504816955684, | |
| "grad_norm": 84.53198174061639, | |
| "kl": 2.06251953125, | |
| "learning_rate": 2.4974306269270296e-07, | |
| "loss": 0.0825, | |
| "reward": 1.8909136363863945, | |
| "reward_std": 0.10835257644532248, | |
| "rewards/format_reward": 0.978125, | |
| "rewards/iqa_reward_score": 0.9127886415272951, | |
| "step": 14600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.5196875, | |
| "epoch": 7.605266538214515, | |
| "grad_norm": 10.589764450261264, | |
| "kl": 2.156630859375, | |
| "learning_rate": 2.3946557040082217e-07, | |
| "loss": 0.0863, | |
| "reward": 1.8696386666595937, | |
| "reward_std": 0.12423644962807884, | |
| "rewards/format_reward": 0.9715625, | |
| "rewards/iqa_reward_score": 0.8980761723965407, | |
| "step": 14800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 16.7325, | |
| "epoch": 7.7080282594733465, | |
| "grad_norm": 10.913131914126758, | |
| "kl": 2.070146484375, | |
| "learning_rate": 2.2918807810894143e-07, | |
| "loss": 0.0828, | |
| "reward": 1.8887575732171535, | |
| "reward_std": 0.11135890247678618, | |
| "rewards/format_reward": 0.9775, | |
| "rewards/iqa_reward_score": 0.9112575780972838, | |
| "step": 15000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 15.9571875, | |
| "epoch": 7.810789980732177, | |
| "grad_norm": 45.002154023589725, | |
| "kl": 2.196806640625, | |
| "learning_rate": 2.1891058581706063e-07, | |
| "loss": 0.0879, | |
| "reward": 1.892361634373665, | |
| "reward_std": 0.1102386135366396, | |
| "rewards/format_reward": 0.9771875, | |
| "rewards/iqa_reward_score": 0.9151741417497397, | |
| "step": 15200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 16.425625, | |
| "epoch": 7.913551701991008, | |
| "grad_norm": 15.653934750546377, | |
| "kl": 2.142041015625, | |
| "learning_rate": 2.0863309352517986e-07, | |
| "loss": 0.0857, | |
| "reward": 1.8734804604202508, | |
| "reward_std": 0.12232352063700092, | |
| "rewards/format_reward": 0.97375, | |
| "rewards/iqa_reward_score": 0.8997304691933096, | |
| "step": 15400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.148125, | |
| "epoch": 7.964932562620424, | |
| "grad_norm": 97.67216959654685, | |
| "kl": 2.11296875, | |
| "learning_rate": 2.0349434737923946e-07, | |
| "loss": 0.0845, | |
| "reward": 1.873739056289196, | |
| "reward_std": 0.12005866145074834, | |
| "rewards/format_reward": 0.976875, | |
| "rewards/iqa_reward_score": 0.8968640621006488, | |
| "step": 15500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.506875, | |
| "epoch": 8.016441875401412, | |
| "grad_norm": 105.09965514480388, | |
| "kl": 2.1259375, | |
| "learning_rate": 1.9835560123329907e-07, | |
| "loss": 0.085, | |
| "reward": 1.876248899102211, | |
| "reward_std": 0.11951107540109661, | |
| "rewards/format_reward": 0.976875, | |
| "rewards/iqa_reward_score": 0.8993739089369774, | |
| "step": 15600 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.133125, | |
| "epoch": 8.119203596660244, | |
| "grad_norm": 6.128097746188641, | |
| "kl": 2.223037109375, | |
| "learning_rate": 1.880781089414183e-07, | |
| "loss": 0.0889, | |
| "reward": 1.880941744968295, | |
| "reward_std": 0.12711579945113044, | |
| "rewards/format_reward": 0.9734375, | |
| "rewards/iqa_reward_score": 0.9075042496249079, | |
| "step": 15800 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.825, | |
| "epoch": 8.221965317919075, | |
| "grad_norm": 21.00097459983442, | |
| "kl": 2.175458984375, | |
| "learning_rate": 1.778006166495375e-07, | |
| "loss": 0.087, | |
| "reward": 1.8709558783471585, | |
| "reward_std": 0.14294033588666935, | |
| "rewards/format_reward": 0.9684375, | |
| "rewards/iqa_reward_score": 0.9025183825194836, | |
| "step": 16000 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.31625, | |
| "epoch": 8.324727039177906, | |
| "grad_norm": 32.175418316210305, | |
| "kl": 2.24296875, | |
| "learning_rate": 1.6752312435765673e-07, | |
| "loss": 0.0897, | |
| "reward": 1.8707203871756792, | |
| "reward_std": 0.14462180348142284, | |
| "rewards/format_reward": 0.9675, | |
| "rewards/iqa_reward_score": 0.9032203914225101, | |
| "step": 16200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.0921875, | |
| "epoch": 8.427488760436738, | |
| "grad_norm": 19.585104477991443, | |
| "kl": 2.394765625, | |
| "learning_rate": 1.5724563206577597e-07, | |
| "loss": 0.0958, | |
| "reward": 1.865453900322318, | |
| "reward_std": 0.16036579100909876, | |
| "rewards/format_reward": 0.9640625, | |
| "rewards/iqa_reward_score": 0.9013914061337709, | |
| "step": 16400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 18.23625, | |
| "epoch": 8.478869621066153, | |
| "grad_norm": 52.50729568021014, | |
| "kl": 2.45123046875, | |
| "learning_rate": 1.5210688591983557e-07, | |
| "loss": 0.098, | |
| "reward": 1.8535247252881526, | |
| "reward_std": 0.1715104495617561, | |
| "rewards/format_reward": 0.96125, | |
| "rewards/iqa_reward_score": 0.892274733260274, | |
| "step": 16500 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 17.33125, | |
| "epoch": 8.530250481695568, | |
| "grad_norm": 16.036354362208947, | |
| "kl": 2.4987109375, | |
| "learning_rate": 1.4696813977389517e-07, | |
| "loss": 0.1, | |
| "reward": 1.8759004624187947, | |
| "reward_std": 0.12627161706855985, | |
| "rewards/format_reward": 0.97375, | |
| "rewards/iqa_reward_score": 0.9021504689753056, | |
| "step": 16600 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 19460, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |