llama3_inst_truthbench1_model / trainer_state.json
Ogamon's picture
Initial commit
5d13203 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.887459807073955,
"eval_steps": 500,
"global_step": 190,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02572347266881029,
"grad_norm": 612.1110229492188,
"learning_rate": 5.000000000000001e-07,
"loss": 14.1364,
"num_input_tokens_seen": 6512,
"step": 1
},
{
"epoch": 0.05144694533762058,
"grad_norm": 608.3916625976562,
"learning_rate": 1.0000000000000002e-06,
"loss": 13.7804,
"num_input_tokens_seen": 12848,
"step": 2
},
{
"epoch": 0.07717041800643087,
"grad_norm": 622.3192138671875,
"learning_rate": 1.5e-06,
"loss": 13.4871,
"num_input_tokens_seen": 19248,
"step": 3
},
{
"epoch": 0.10289389067524116,
"grad_norm": 611.6488647460938,
"learning_rate": 2.0000000000000003e-06,
"loss": 12.79,
"num_input_tokens_seen": 25344,
"step": 4
},
{
"epoch": 0.12861736334405144,
"grad_norm": 508.9140319824219,
"learning_rate": 2.5e-06,
"loss": 9.2748,
"num_input_tokens_seen": 31968,
"step": 5
},
{
"epoch": 0.15434083601286175,
"grad_norm": 630.509521484375,
"learning_rate": 3e-06,
"loss": 6.5585,
"num_input_tokens_seen": 38160,
"step": 6
},
{
"epoch": 0.18006430868167203,
"grad_norm": 512.7638549804688,
"learning_rate": 3.5e-06,
"loss": 5.3984,
"num_input_tokens_seen": 44144,
"step": 7
},
{
"epoch": 0.2057877813504823,
"grad_norm": 220.47471618652344,
"learning_rate": 4.000000000000001e-06,
"loss": 1.9363,
"num_input_tokens_seen": 50432,
"step": 8
},
{
"epoch": 0.2315112540192926,
"grad_norm": 114.83863830566406,
"learning_rate": 4.5e-06,
"loss": 0.6783,
"num_input_tokens_seen": 56928,
"step": 9
},
{
"epoch": 0.2572347266881029,
"grad_norm": 383.7873229980469,
"learning_rate": 5e-06,
"loss": 2.9945,
"num_input_tokens_seen": 63344,
"step": 10
},
{
"epoch": 0.2829581993569132,
"grad_norm": 53.58182907104492,
"learning_rate": 4.9996192378909785e-06,
"loss": 0.2916,
"num_input_tokens_seen": 69568,
"step": 11
},
{
"epoch": 0.3086816720257235,
"grad_norm": 200.5736083984375,
"learning_rate": 4.99847706754774e-06,
"loss": 2.2775,
"num_input_tokens_seen": 75824,
"step": 12
},
{
"epoch": 0.33440514469453375,
"grad_norm": 89.13434600830078,
"learning_rate": 4.9965738368864345e-06,
"loss": 0.3757,
"num_input_tokens_seen": 82160,
"step": 13
},
{
"epoch": 0.36012861736334406,
"grad_norm": 149.89199829101562,
"learning_rate": 4.993910125649561e-06,
"loss": 1.9543,
"num_input_tokens_seen": 88624,
"step": 14
},
{
"epoch": 0.3858520900321543,
"grad_norm": 93.02680206298828,
"learning_rate": 4.990486745229364e-06,
"loss": 0.7398,
"num_input_tokens_seen": 94800,
"step": 15
},
{
"epoch": 0.4115755627009646,
"grad_norm": 274.1518249511719,
"learning_rate": 4.986304738420684e-06,
"loss": 1.1868,
"num_input_tokens_seen": 101360,
"step": 16
},
{
"epoch": 0.43729903536977494,
"grad_norm": 75.59544372558594,
"learning_rate": 4.981365379103306e-06,
"loss": 0.5418,
"num_input_tokens_seen": 107488,
"step": 17
},
{
"epoch": 0.4630225080385852,
"grad_norm": 21.915510177612305,
"learning_rate": 4.975670171853926e-06,
"loss": 0.2263,
"num_input_tokens_seen": 113920,
"step": 18
},
{
"epoch": 0.4887459807073955,
"grad_norm": 12.356107711791992,
"learning_rate": 4.9692208514878445e-06,
"loss": 0.1612,
"num_input_tokens_seen": 120352,
"step": 19
},
{
"epoch": 0.5144694533762058,
"grad_norm": 49.00923538208008,
"learning_rate": 4.962019382530521e-06,
"loss": 0.3299,
"num_input_tokens_seen": 126880,
"step": 20
},
{
"epoch": 0.5401929260450161,
"grad_norm": 35.158695220947266,
"learning_rate": 4.9540679586191605e-06,
"loss": 0.2013,
"num_input_tokens_seen": 133344,
"step": 21
},
{
"epoch": 0.5659163987138264,
"grad_norm": 30.501089096069336,
"learning_rate": 4.9453690018345144e-06,
"loss": 0.2446,
"num_input_tokens_seen": 139744,
"step": 22
},
{
"epoch": 0.5916398713826366,
"grad_norm": 29.609901428222656,
"learning_rate": 4.935925161963089e-06,
"loss": 0.2235,
"num_input_tokens_seen": 146144,
"step": 23
},
{
"epoch": 0.617363344051447,
"grad_norm": 7.487663745880127,
"learning_rate": 4.925739315689991e-06,
"loss": 0.116,
"num_input_tokens_seen": 152240,
"step": 24
},
{
"epoch": 0.6430868167202572,
"grad_norm": 37.3343620300293,
"learning_rate": 4.914814565722671e-06,
"loss": 0.2179,
"num_input_tokens_seen": 158512,
"step": 25
},
{
"epoch": 0.6688102893890675,
"grad_norm": 19.37504768371582,
"learning_rate": 4.903154239845798e-06,
"loss": 0.1414,
"num_input_tokens_seen": 164784,
"step": 26
},
{
"epoch": 0.6945337620578779,
"grad_norm": 7.562462329864502,
"learning_rate": 4.890761889907589e-06,
"loss": 0.1181,
"num_input_tokens_seen": 171312,
"step": 27
},
{
"epoch": 0.7202572347266881,
"grad_norm": 45.42784881591797,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.2753,
"num_input_tokens_seen": 177856,
"step": 28
},
{
"epoch": 0.7459807073954984,
"grad_norm": 42.35463333129883,
"learning_rate": 4.863796438998293e-06,
"loss": 0.3255,
"num_input_tokens_seen": 184368,
"step": 29
},
{
"epoch": 0.7717041800643086,
"grad_norm": 29.56382179260254,
"learning_rate": 4.849231551964771e-06,
"loss": 0.2352,
"num_input_tokens_seen": 190928,
"step": 30
},
{
"epoch": 0.797427652733119,
"grad_norm": 10.221027374267578,
"learning_rate": 4.833951066243004e-06,
"loss": 0.063,
"num_input_tokens_seen": 197232,
"step": 31
},
{
"epoch": 0.8231511254019293,
"grad_norm": 19.372211456298828,
"learning_rate": 4.817959636416969e-06,
"loss": 0.2042,
"num_input_tokens_seen": 203584,
"step": 32
},
{
"epoch": 0.8488745980707395,
"grad_norm": 15.362005233764648,
"learning_rate": 4.801262133631101e-06,
"loss": 0.1364,
"num_input_tokens_seen": 210128,
"step": 33
},
{
"epoch": 0.8745980707395499,
"grad_norm": 17.515932083129883,
"learning_rate": 4.783863644106502e-06,
"loss": 0.0934,
"num_input_tokens_seen": 216512,
"step": 34
},
{
"epoch": 0.9003215434083601,
"grad_norm": 10.493622779846191,
"learning_rate": 4.765769467591626e-06,
"loss": 0.1332,
"num_input_tokens_seen": 222928,
"step": 35
},
{
"epoch": 0.9260450160771704,
"grad_norm": 21.616741180419922,
"learning_rate": 4.746985115747918e-06,
"loss": 0.1595,
"num_input_tokens_seen": 229232,
"step": 36
},
{
"epoch": 0.9517684887459807,
"grad_norm": 18.24679946899414,
"learning_rate": 4.72751631047092e-06,
"loss": 0.1528,
"num_input_tokens_seen": 235680,
"step": 37
},
{
"epoch": 0.977491961414791,
"grad_norm": 8.862295150756836,
"learning_rate": 4.707368982147318e-06,
"loss": 0.1342,
"num_input_tokens_seen": 242192,
"step": 38
},
{
"epoch": 1.0032154340836013,
"grad_norm": 25.429838180541992,
"learning_rate": 4.68654926784849e-06,
"loss": 0.1586,
"num_input_tokens_seen": 248672,
"step": 39
},
{
"epoch": 1.0289389067524115,
"grad_norm": 9.540814399719238,
"learning_rate": 4.665063509461098e-06,
"loss": 0.1072,
"num_input_tokens_seen": 255072,
"step": 40
},
{
"epoch": 1.0546623794212218,
"grad_norm": 3.9775660037994385,
"learning_rate": 4.642918251755281e-06,
"loss": 0.0357,
"num_input_tokens_seen": 261584,
"step": 41
},
{
"epoch": 1.0803858520900322,
"grad_norm": 8.192290306091309,
"learning_rate": 4.620120240391065e-06,
"loss": 0.06,
"num_input_tokens_seen": 268032,
"step": 42
},
{
"epoch": 1.1061093247588425,
"grad_norm": 6.468256950378418,
"learning_rate": 4.596676419863561e-06,
"loss": 0.0902,
"num_input_tokens_seen": 274560,
"step": 43
},
{
"epoch": 1.1318327974276527,
"grad_norm": 4.978611469268799,
"learning_rate": 4.572593931387604e-06,
"loss": 0.0202,
"num_input_tokens_seen": 280960,
"step": 44
},
{
"epoch": 1.157556270096463,
"grad_norm": 7.448028564453125,
"learning_rate": 4.54788011072248e-06,
"loss": 0.038,
"num_input_tokens_seen": 287104,
"step": 45
},
{
"epoch": 1.1832797427652733,
"grad_norm": 10.028037071228027,
"learning_rate": 4.522542485937369e-06,
"loss": 0.0379,
"num_input_tokens_seen": 293520,
"step": 46
},
{
"epoch": 1.2090032154340835,
"grad_norm": 13.968883514404297,
"learning_rate": 4.496588775118232e-06,
"loss": 0.0742,
"num_input_tokens_seen": 299936,
"step": 47
},
{
"epoch": 1.234726688102894,
"grad_norm": 8.629486083984375,
"learning_rate": 4.470026884016805e-06,
"loss": 0.0658,
"num_input_tokens_seen": 305936,
"step": 48
},
{
"epoch": 1.2604501607717042,
"grad_norm": 11.009612083435059,
"learning_rate": 4.442864903642428e-06,
"loss": 0.0336,
"num_input_tokens_seen": 312528,
"step": 49
},
{
"epoch": 1.2861736334405145,
"grad_norm": 7.794949531555176,
"learning_rate": 4.415111107797445e-06,
"loss": 0.1021,
"num_input_tokens_seen": 318752,
"step": 50
},
{
"epoch": 1.3118971061093248,
"grad_norm": 12.327733039855957,
"learning_rate": 4.386773950556931e-06,
"loss": 0.1312,
"num_input_tokens_seen": 325088,
"step": 51
},
{
"epoch": 1.337620578778135,
"grad_norm": 8.146430969238281,
"learning_rate": 4.357862063693486e-06,
"loss": 0.0665,
"num_input_tokens_seen": 331424,
"step": 52
},
{
"epoch": 1.3633440514469453,
"grad_norm": 6.919386386871338,
"learning_rate": 4.328384254047927e-06,
"loss": 0.0679,
"num_input_tokens_seen": 337776,
"step": 53
},
{
"epoch": 1.3890675241157555,
"grad_norm": 2.923135280609131,
"learning_rate": 4.2983495008466285e-06,
"loss": 0.0579,
"num_input_tokens_seen": 344000,
"step": 54
},
{
"epoch": 1.414790996784566,
"grad_norm": 5.682593822479248,
"learning_rate": 4.267766952966369e-06,
"loss": 0.0542,
"num_input_tokens_seen": 350528,
"step": 55
},
{
"epoch": 1.4405144694533762,
"grad_norm": 5.936847686767578,
"learning_rate": 4.236645926147493e-06,
"loss": 0.0476,
"num_input_tokens_seen": 356896,
"step": 56
},
{
"epoch": 1.4662379421221865,
"grad_norm": 3.671848773956299,
"learning_rate": 4.204995900156247e-06,
"loss": 0.0613,
"num_input_tokens_seen": 363376,
"step": 57
},
{
"epoch": 1.4919614147909968,
"grad_norm": 6.306807994842529,
"learning_rate": 4.172826515897146e-06,
"loss": 0.0995,
"num_input_tokens_seen": 369680,
"step": 58
},
{
"epoch": 1.517684887459807,
"grad_norm": 7.488581657409668,
"learning_rate": 4.140147572476269e-06,
"loss": 0.0532,
"num_input_tokens_seen": 375904,
"step": 59
},
{
"epoch": 1.5434083601286175,
"grad_norm": 8.461729049682617,
"learning_rate": 4.106969024216348e-06,
"loss": 0.0824,
"num_input_tokens_seen": 382304,
"step": 60
},
{
"epoch": 1.5691318327974275,
"grad_norm": 4.186163902282715,
"learning_rate": 4.073300977624594e-06,
"loss": 0.0499,
"num_input_tokens_seen": 388688,
"step": 61
},
{
"epoch": 1.594855305466238,
"grad_norm": 4.451269149780273,
"learning_rate": 4.039153688314146e-06,
"loss": 0.0413,
"num_input_tokens_seen": 395152,
"step": 62
},
{
"epoch": 1.6205787781350482,
"grad_norm": 7.160181999206543,
"learning_rate": 4.0045375578801216e-06,
"loss": 0.0637,
"num_input_tokens_seen": 401728,
"step": 63
},
{
"epoch": 1.6463022508038585,
"grad_norm": 8.231119155883789,
"learning_rate": 3.969463130731183e-06,
"loss": 0.0529,
"num_input_tokens_seen": 407904,
"step": 64
},
{
"epoch": 1.6720257234726688,
"grad_norm": 5.919021129608154,
"learning_rate": 3.933941090877615e-06,
"loss": 0.0474,
"num_input_tokens_seen": 414240,
"step": 65
},
{
"epoch": 1.697749196141479,
"grad_norm": 5.378300189971924,
"learning_rate": 3.897982258676867e-06,
"loss": 0.0649,
"num_input_tokens_seen": 420448,
"step": 66
},
{
"epoch": 1.7234726688102895,
"grad_norm": 9.992879867553711,
"learning_rate": 3.861597587537568e-06,
"loss": 0.0505,
"num_input_tokens_seen": 426784,
"step": 67
},
{
"epoch": 1.7491961414790995,
"grad_norm": 5.876017093658447,
"learning_rate": 3.824798160583012e-06,
"loss": 0.0621,
"num_input_tokens_seen": 432816,
"step": 68
},
{
"epoch": 1.77491961414791,
"grad_norm": 8.401307106018066,
"learning_rate": 3.787595187275136e-06,
"loss": 0.0769,
"num_input_tokens_seen": 439232,
"step": 69
},
{
"epoch": 1.8006430868167203,
"grad_norm": 8.439587593078613,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0435,
"num_input_tokens_seen": 445792,
"step": 70
},
{
"epoch": 1.8263665594855305,
"grad_norm": 6.123202323913574,
"learning_rate": 3.7120240506158433e-06,
"loss": 0.0673,
"num_input_tokens_seen": 452400,
"step": 71
},
{
"epoch": 1.852090032154341,
"grad_norm": 12.049100875854492,
"learning_rate": 3.6736789069647273e-06,
"loss": 0.1316,
"num_input_tokens_seen": 458528,
"step": 72
},
{
"epoch": 1.877813504823151,
"grad_norm": 6.853906631469727,
"learning_rate": 3.634976249348867e-06,
"loss": 0.0531,
"num_input_tokens_seen": 464976,
"step": 73
},
{
"epoch": 1.9035369774919615,
"grad_norm": 3.6796371936798096,
"learning_rate": 3.595927866972694e-06,
"loss": 0.0287,
"num_input_tokens_seen": 471440,
"step": 74
},
{
"epoch": 1.9292604501607717,
"grad_norm": 11.764636039733887,
"learning_rate": 3.556545654351749e-06,
"loss": 0.0648,
"num_input_tokens_seen": 477776,
"step": 75
},
{
"epoch": 1.954983922829582,
"grad_norm": 15.143081665039062,
"learning_rate": 3.516841607689501e-06,
"loss": 0.1211,
"num_input_tokens_seen": 484096,
"step": 76
},
{
"epoch": 1.9807073954983923,
"grad_norm": 13.158181190490723,
"learning_rate": 3.476827821223184e-06,
"loss": 0.0879,
"num_input_tokens_seen": 490176,
"step": 77
},
{
"epoch": 2.0064308681672025,
"grad_norm": 5.389456748962402,
"learning_rate": 3.436516483539781e-06,
"loss": 0.0227,
"num_input_tokens_seen": 496672,
"step": 78
},
{
"epoch": 2.032154340836013,
"grad_norm": 1.3878227472305298,
"learning_rate": 3.39591987386325e-06,
"loss": 0.0228,
"num_input_tokens_seen": 503088,
"step": 79
},
{
"epoch": 2.057877813504823,
"grad_norm": 4.809704303741455,
"learning_rate": 3.3550503583141726e-06,
"loss": 0.036,
"num_input_tokens_seen": 509472,
"step": 80
},
{
"epoch": 2.0836012861736335,
"grad_norm": 4.475839614868164,
"learning_rate": 3.313920386142892e-06,
"loss": 0.0138,
"num_input_tokens_seen": 515728,
"step": 81
},
{
"epoch": 2.1093247588424435,
"grad_norm": 9.445964813232422,
"learning_rate": 3.272542485937369e-06,
"loss": 0.0697,
"num_input_tokens_seen": 521904,
"step": 82
},
{
"epoch": 2.135048231511254,
"grad_norm": 5.295349597930908,
"learning_rate": 3.230929261806842e-06,
"loss": 0.0508,
"num_input_tokens_seen": 527952,
"step": 83
},
{
"epoch": 2.1607717041800645,
"grad_norm": 5.535093307495117,
"learning_rate": 3.189093389542498e-06,
"loss": 0.0088,
"num_input_tokens_seen": 534624,
"step": 84
},
{
"epoch": 2.1864951768488745,
"grad_norm": 1.4181060791015625,
"learning_rate": 3.147047612756302e-06,
"loss": 0.0158,
"num_input_tokens_seen": 541168,
"step": 85
},
{
"epoch": 2.212218649517685,
"grad_norm": 4.84325647354126,
"learning_rate": 3.1048047389991693e-06,
"loss": 0.006,
"num_input_tokens_seen": 547488,
"step": 86
},
{
"epoch": 2.237942122186495,
"grad_norm": 7.397033214569092,
"learning_rate": 3.062377635859663e-06,
"loss": 0.038,
"num_input_tokens_seen": 554272,
"step": 87
},
{
"epoch": 2.2636655948553055,
"grad_norm": 0.08836190402507782,
"learning_rate": 3.019779227044398e-06,
"loss": 0.0004,
"num_input_tokens_seen": 560528,
"step": 88
},
{
"epoch": 2.289389067524116,
"grad_norm": 2.0779523849487305,
"learning_rate": 2.9770224884413625e-06,
"loss": 0.0111,
"num_input_tokens_seen": 566784,
"step": 89
},
{
"epoch": 2.315112540192926,
"grad_norm": 1.6369861364364624,
"learning_rate": 2.9341204441673267e-06,
"loss": 0.0008,
"num_input_tokens_seen": 573344,
"step": 90
},
{
"epoch": 2.3408360128617365,
"grad_norm": 4.53826904296875,
"learning_rate": 2.8910861626005774e-06,
"loss": 0.0182,
"num_input_tokens_seen": 579776,
"step": 91
},
{
"epoch": 2.3665594855305465,
"grad_norm": 5.374476909637451,
"learning_rate": 2.847932752400164e-06,
"loss": 0.0491,
"num_input_tokens_seen": 586096,
"step": 92
},
{
"epoch": 2.392282958199357,
"grad_norm": 1.1637392044067383,
"learning_rate": 2.804673358512869e-06,
"loss": 0.004,
"num_input_tokens_seen": 592528,
"step": 93
},
{
"epoch": 2.418006430868167,
"grad_norm": 1.8136231899261475,
"learning_rate": 2.761321158169134e-06,
"loss": 0.0176,
"num_input_tokens_seen": 598960,
"step": 94
},
{
"epoch": 2.4437299035369775,
"grad_norm": 11.001687049865723,
"learning_rate": 2.717889356869146e-06,
"loss": 0.019,
"num_input_tokens_seen": 605232,
"step": 95
},
{
"epoch": 2.469453376205788,
"grad_norm": 4.748091697692871,
"learning_rate": 2.6743911843603134e-06,
"loss": 0.027,
"num_input_tokens_seen": 611344,
"step": 96
},
{
"epoch": 2.495176848874598,
"grad_norm": 4.990153789520264,
"learning_rate": 2.6308398906073603e-06,
"loss": 0.0354,
"num_input_tokens_seen": 617712,
"step": 97
},
{
"epoch": 2.5209003215434085,
"grad_norm": 10.616851806640625,
"learning_rate": 2.587248741756253e-06,
"loss": 0.0741,
"num_input_tokens_seen": 624208,
"step": 98
},
{
"epoch": 2.5466237942122185,
"grad_norm": 6.855626106262207,
"learning_rate": 2.543631016093209e-06,
"loss": 0.0582,
"num_input_tokens_seen": 630496,
"step": 99
},
{
"epoch": 2.572347266881029,
"grad_norm": 4.2168426513671875,
"learning_rate": 2.5e-06,
"loss": 0.0096,
"num_input_tokens_seen": 637040,
"step": 100
},
{
"epoch": 2.598070739549839,
"grad_norm": 6.813520908355713,
"learning_rate": 2.4563689839067913e-06,
"loss": 0.0263,
"num_input_tokens_seen": 643472,
"step": 101
},
{
"epoch": 2.6237942122186495,
"grad_norm": 0.9154559373855591,
"learning_rate": 2.4127512582437486e-06,
"loss": 0.0121,
"num_input_tokens_seen": 649760,
"step": 102
},
{
"epoch": 2.64951768488746,
"grad_norm": 6.223565101623535,
"learning_rate": 2.3691601093926406e-06,
"loss": 0.0204,
"num_input_tokens_seen": 656096,
"step": 103
},
{
"epoch": 2.67524115755627,
"grad_norm": 2.3013968467712402,
"learning_rate": 2.325608815639687e-06,
"loss": 0.0325,
"num_input_tokens_seen": 662624,
"step": 104
},
{
"epoch": 2.7009646302250805,
"grad_norm": 2.195141077041626,
"learning_rate": 2.2821106431308546e-06,
"loss": 0.0076,
"num_input_tokens_seen": 668768,
"step": 105
},
{
"epoch": 2.7266881028938905,
"grad_norm": 14.679264068603516,
"learning_rate": 2.238678841830867e-06,
"loss": 0.0485,
"num_input_tokens_seen": 674992,
"step": 106
},
{
"epoch": 2.752411575562701,
"grad_norm": 2.058547258377075,
"learning_rate": 2.195326641487132e-06,
"loss": 0.007,
"num_input_tokens_seen": 681120,
"step": 107
},
{
"epoch": 2.778135048231511,
"grad_norm": 2.056870460510254,
"learning_rate": 2.1520672475998374e-06,
"loss": 0.0347,
"num_input_tokens_seen": 687376,
"step": 108
},
{
"epoch": 2.8038585209003215,
"grad_norm": 3.1806418895721436,
"learning_rate": 2.1089138373994226e-06,
"loss": 0.0142,
"num_input_tokens_seen": 693984,
"step": 109
},
{
"epoch": 2.829581993569132,
"grad_norm": 4.262523651123047,
"learning_rate": 2.0658795558326745e-06,
"loss": 0.0414,
"num_input_tokens_seen": 700352,
"step": 110
},
{
"epoch": 2.855305466237942,
"grad_norm": 4.998467445373535,
"learning_rate": 2.022977511558638e-06,
"loss": 0.0419,
"num_input_tokens_seen": 706768,
"step": 111
},
{
"epoch": 2.8810289389067525,
"grad_norm": 4.63454532623291,
"learning_rate": 1.9802207729556023e-06,
"loss": 0.043,
"num_input_tokens_seen": 713248,
"step": 112
},
{
"epoch": 2.906752411575563,
"grad_norm": 2.7104055881500244,
"learning_rate": 1.937622364140338e-06,
"loss": 0.0192,
"num_input_tokens_seen": 719568,
"step": 113
},
{
"epoch": 2.932475884244373,
"grad_norm": 5.943557262420654,
"learning_rate": 1.895195261000831e-06,
"loss": 0.0427,
"num_input_tokens_seen": 725984,
"step": 114
},
{
"epoch": 2.958199356913183,
"grad_norm": 1.0689281225204468,
"learning_rate": 1.852952387243698e-06,
"loss": 0.0116,
"num_input_tokens_seen": 732224,
"step": 115
},
{
"epoch": 2.9839228295819935,
"grad_norm": 3.041832208633423,
"learning_rate": 1.8109066104575023e-06,
"loss": 0.0135,
"num_input_tokens_seen": 738496,
"step": 116
},
{
"epoch": 3.009646302250804,
"grad_norm": 2.22163462638855,
"learning_rate": 1.7690707381931585e-06,
"loss": 0.0128,
"num_input_tokens_seen": 744880,
"step": 117
},
{
"epoch": 3.035369774919614,
"grad_norm": 0.32683318853378296,
"learning_rate": 1.7274575140626318e-06,
"loss": 0.0021,
"num_input_tokens_seen": 751408,
"step": 118
},
{
"epoch": 3.0610932475884245,
"grad_norm": 0.8898715972900391,
"learning_rate": 1.686079613857109e-06,
"loss": 0.0057,
"num_input_tokens_seen": 757632,
"step": 119
},
{
"epoch": 3.0868167202572345,
"grad_norm": 2.082993984222412,
"learning_rate": 1.6449496416858285e-06,
"loss": 0.0197,
"num_input_tokens_seen": 763936,
"step": 120
},
{
"epoch": 3.112540192926045,
"grad_norm": 0.7368833422660828,
"learning_rate": 1.6040801261367494e-06,
"loss": 0.0017,
"num_input_tokens_seen": 770064,
"step": 121
},
{
"epoch": 3.1382636655948555,
"grad_norm": 1.7324296236038208,
"learning_rate": 1.56348351646022e-06,
"loss": 0.0068,
"num_input_tokens_seen": 776176,
"step": 122
},
{
"epoch": 3.1639871382636655,
"grad_norm": 2.4004085063934326,
"learning_rate": 1.5231721787768162e-06,
"loss": 0.0022,
"num_input_tokens_seen": 782464,
"step": 123
},
{
"epoch": 3.189710610932476,
"grad_norm": 3.6328651905059814,
"learning_rate": 1.4831583923105e-06,
"loss": 0.0162,
"num_input_tokens_seen": 789072,
"step": 124
},
{
"epoch": 3.215434083601286,
"grad_norm": 0.8305163979530334,
"learning_rate": 1.443454345648252e-06,
"loss": 0.0014,
"num_input_tokens_seen": 795536,
"step": 125
},
{
"epoch": 3.2411575562700965,
"grad_norm": 6.007119178771973,
"learning_rate": 1.4040721330273063e-06,
"loss": 0.0063,
"num_input_tokens_seen": 801888,
"step": 126
},
{
"epoch": 3.266881028938907,
"grad_norm": 1.4245920181274414,
"learning_rate": 1.3650237506511333e-06,
"loss": 0.0282,
"num_input_tokens_seen": 808432,
"step": 127
},
{
"epoch": 3.292604501607717,
"grad_norm": 0.36083319783210754,
"learning_rate": 1.3263210930352737e-06,
"loss": 0.0003,
"num_input_tokens_seen": 814896,
"step": 128
},
{
"epoch": 3.3183279742765275,
"grad_norm": 0.05347484350204468,
"learning_rate": 1.2879759493841577e-06,
"loss": 0.0002,
"num_input_tokens_seen": 821200,
"step": 129
},
{
"epoch": 3.3440514469453375,
"grad_norm": 0.16537031531333923,
"learning_rate": 1.2500000000000007e-06,
"loss": 0.0004,
"num_input_tokens_seen": 827584,
"step": 130
},
{
"epoch": 3.369774919614148,
"grad_norm": 1.4132962226867676,
"learning_rate": 1.2124048127248644e-06,
"loss": 0.0169,
"num_input_tokens_seen": 834048,
"step": 131
},
{
"epoch": 3.395498392282958,
"grad_norm": 11.695096969604492,
"learning_rate": 1.1752018394169882e-06,
"loss": 0.0127,
"num_input_tokens_seen": 840240,
"step": 132
},
{
"epoch": 3.4212218649517685,
"grad_norm": 2.03340482711792,
"learning_rate": 1.1384024124624324e-06,
"loss": 0.0045,
"num_input_tokens_seen": 846448,
"step": 133
},
{
"epoch": 3.446945337620579,
"grad_norm": 15.712054252624512,
"learning_rate": 1.1020177413231334e-06,
"loss": 0.0924,
"num_input_tokens_seen": 852928,
"step": 134
},
{
"epoch": 3.472668810289389,
"grad_norm": 3.9320309162139893,
"learning_rate": 1.0660589091223854e-06,
"loss": 0.0067,
"num_input_tokens_seen": 859312,
"step": 135
},
{
"epoch": 3.4983922829581995,
"grad_norm": 1.599960207939148,
"learning_rate": 1.0305368692688175e-06,
"loss": 0.003,
"num_input_tokens_seen": 865440,
"step": 136
},
{
"epoch": 3.5241157556270095,
"grad_norm": 8.650961875915527,
"learning_rate": 9.95462442119879e-07,
"loss": 0.0164,
"num_input_tokens_seen": 871968,
"step": 137
},
{
"epoch": 3.54983922829582,
"grad_norm": 2.1372265815734863,
"learning_rate": 9.608463116858544e-07,
"loss": 0.0018,
"num_input_tokens_seen": 878352,
"step": 138
},
{
"epoch": 3.57556270096463,
"grad_norm": 1.8475613594055176,
"learning_rate": 9.266990223754069e-07,
"loss": 0.0226,
"num_input_tokens_seen": 884736,
"step": 139
},
{
"epoch": 3.6012861736334405,
"grad_norm": 0.2472313642501831,
"learning_rate": 8.930309757836517e-07,
"loss": 0.0008,
"num_input_tokens_seen": 891008,
"step": 140
},
{
"epoch": 3.627009646302251,
"grad_norm": 0.13111230731010437,
"learning_rate": 8.598524275237321e-07,
"loss": 0.0004,
"num_input_tokens_seen": 897120,
"step": 141
},
{
"epoch": 3.652733118971061,
"grad_norm": 0.23976323008537292,
"learning_rate": 8.271734841028553e-07,
"loss": 0.0008,
"num_input_tokens_seen": 903536,
"step": 142
},
{
"epoch": 3.6784565916398715,
"grad_norm": 4.404489517211914,
"learning_rate": 7.950040998437541e-07,
"loss": 0.0256,
"num_input_tokens_seen": 909824,
"step": 143
},
{
"epoch": 3.7041800643086815,
"grad_norm": 0.16712801158428192,
"learning_rate": 7.633540738525066e-07,
"loss": 0.0005,
"num_input_tokens_seen": 916112,
"step": 144
},
{
"epoch": 3.729903536977492,
"grad_norm": 0.4478089213371277,
"learning_rate": 7.322330470336314e-07,
"loss": 0.0045,
"num_input_tokens_seen": 922592,
"step": 145
},
{
"epoch": 3.755627009646302,
"grad_norm": 0.14608968794345856,
"learning_rate": 7.016504991533727e-07,
"loss": 0.0005,
"num_input_tokens_seen": 928640,
"step": 146
},
{
"epoch": 3.7813504823151125,
"grad_norm": 1.1640571355819702,
"learning_rate": 6.716157459520739e-07,
"loss": 0.0069,
"num_input_tokens_seen": 935488,
"step": 147
},
{
"epoch": 3.807073954983923,
"grad_norm": 5.529272079467773,
"learning_rate": 6.421379363065142e-07,
"loss": 0.015,
"num_input_tokens_seen": 941952,
"step": 148
},
{
"epoch": 3.832797427652733,
"grad_norm": 2.3761372566223145,
"learning_rate": 6.1322604944307e-07,
"loss": 0.0012,
"num_input_tokens_seen": 948368,
"step": 149
},
{
"epoch": 3.8585209003215435,
"grad_norm": 2.560185670852661,
"learning_rate": 5.848888922025553e-07,
"loss": 0.0095,
"num_input_tokens_seen": 954752,
"step": 150
},
{
"epoch": 3.884244372990354,
"grad_norm": 1.6769373416900635,
"learning_rate": 5.571350963575728e-07,
"loss": 0.0271,
"num_input_tokens_seen": 961248,
"step": 151
},
{
"epoch": 3.909967845659164,
"grad_norm": 1.4693336486816406,
"learning_rate": 5.299731159831953e-07,
"loss": 0.0201,
"num_input_tokens_seen": 967424,
"step": 152
},
{
"epoch": 3.935691318327974,
"grad_norm": 1.2330920696258545,
"learning_rate": 5.034112248817685e-07,
"loss": 0.012,
"num_input_tokens_seen": 973696,
"step": 153
},
{
"epoch": 3.9614147909967845,
"grad_norm": 8.100526809692383,
"learning_rate": 4.774575140626317e-07,
"loss": 0.023,
"num_input_tokens_seen": 980144,
"step": 154
},
{
"epoch": 3.987138263665595,
"grad_norm": 1.439001202583313,
"learning_rate": 4.5211988927752026e-07,
"loss": 0.0156,
"num_input_tokens_seen": 986352,
"step": 155
},
{
"epoch": 4.012861736334405,
"grad_norm": 0.2862737476825714,
"learning_rate": 4.27406068612396e-07,
"loss": 0.0009,
"num_input_tokens_seen": 992880,
"step": 156
},
{
"epoch": 4.038585209003215,
"grad_norm": 0.575637936592102,
"learning_rate": 4.033235801364402e-07,
"loss": 0.0017,
"num_input_tokens_seen": 999200,
"step": 157
},
{
"epoch": 4.064308681672026,
"grad_norm": 0.47944220900535583,
"learning_rate": 3.798797596089351e-07,
"loss": 0.0015,
"num_input_tokens_seen": 1005568,
"step": 158
},
{
"epoch": 4.090032154340836,
"grad_norm": 0.6407404541969299,
"learning_rate": 3.5708174824471947e-07,
"loss": 0.0035,
"num_input_tokens_seen": 1011728,
"step": 159
},
{
"epoch": 4.115755627009646,
"grad_norm": 0.29835206270217896,
"learning_rate": 3.3493649053890325e-07,
"loss": 0.0016,
"num_input_tokens_seen": 1018272,
"step": 160
},
{
"epoch": 4.141479099678457,
"grad_norm": 0.8626136183738708,
"learning_rate": 3.134507321515107e-07,
"loss": 0.0028,
"num_input_tokens_seen": 1024688,
"step": 161
},
{
"epoch": 4.167202572347267,
"grad_norm": 0.19084709882736206,
"learning_rate": 2.9263101785268253e-07,
"loss": 0.0006,
"num_input_tokens_seen": 1031152,
"step": 162
},
{
"epoch": 4.192926045016077,
"grad_norm": 0.34228870272636414,
"learning_rate": 2.7248368952908055e-07,
"loss": 0.0013,
"num_input_tokens_seen": 1037632,
"step": 163
},
{
"epoch": 4.218649517684887,
"grad_norm": 0.19209067523479462,
"learning_rate": 2.53014884252083e-07,
"loss": 0.0006,
"num_input_tokens_seen": 1043936,
"step": 164
},
{
"epoch": 4.244372990353698,
"grad_norm": 0.5416057705879211,
"learning_rate": 2.3423053240837518e-07,
"loss": 0.0017,
"num_input_tokens_seen": 1050048,
"step": 165
},
{
"epoch": 4.270096463022508,
"grad_norm": 0.12837135791778564,
"learning_rate": 2.1613635589349756e-07,
"loss": 0.0004,
"num_input_tokens_seen": 1056496,
"step": 166
},
{
"epoch": 4.295819935691318,
"grad_norm": 3.4782867431640625,
"learning_rate": 1.9873786636889908e-07,
"loss": 0.0049,
"num_input_tokens_seen": 1062848,
"step": 167
},
{
"epoch": 4.321543408360129,
"grad_norm": 4.008510112762451,
"learning_rate": 1.8204036358303173e-07,
"loss": 0.0071,
"num_input_tokens_seen": 1069136,
"step": 168
},
{
"epoch": 4.347266881028939,
"grad_norm": 0.38916894793510437,
"learning_rate": 1.6604893375699594e-07,
"loss": 0.0011,
"num_input_tokens_seen": 1075328,
"step": 169
},
{
"epoch": 4.372990353697749,
"grad_norm": 0.11064338684082031,
"learning_rate": 1.507684480352292e-07,
"loss": 0.0004,
"num_input_tokens_seen": 1081696,
"step": 170
},
{
"epoch": 4.39871382636656,
"grad_norm": 0.20267654955387115,
"learning_rate": 1.362035610017079e-07,
"loss": 0.0007,
"num_input_tokens_seen": 1088112,
"step": 171
},
{
"epoch": 4.42443729903537,
"grad_norm": 0.35677772760391235,
"learning_rate": 1.223587092621162e-07,
"loss": 0.0017,
"num_input_tokens_seen": 1094512,
"step": 172
},
{
"epoch": 4.45016077170418,
"grad_norm": 0.3713955581188202,
"learning_rate": 1.0923811009241142e-07,
"loss": 0.0007,
"num_input_tokens_seen": 1101040,
"step": 173
},
{
"epoch": 4.47588424437299,
"grad_norm": 0.1514904946088791,
"learning_rate": 9.684576015420277e-08,
"loss": 0.0003,
"num_input_tokens_seen": 1107168,
"step": 174
},
{
"epoch": 4.501607717041801,
"grad_norm": 4.804702281951904,
"learning_rate": 8.518543427732951e-08,
"loss": 0.0046,
"num_input_tokens_seen": 1113408,
"step": 175
},
{
"epoch": 4.527331189710611,
"grad_norm": 0.9991558194160461,
"learning_rate": 7.426068431000883e-08,
"loss": 0.0038,
"num_input_tokens_seen": 1119568,
"step": 176
},
{
"epoch": 4.553054662379421,
"grad_norm": 2.726783037185669,
"learning_rate": 6.407483803691216e-08,
"loss": 0.0036,
"num_input_tokens_seen": 1126048,
"step": 177
},
{
"epoch": 4.578778135048232,
"grad_norm": 1.2259963750839233,
"learning_rate": 5.463099816548578e-08,
"loss": 0.0056,
"num_input_tokens_seen": 1132400,
"step": 178
},
{
"epoch": 4.604501607717042,
"grad_norm": 3.010671615600586,
"learning_rate": 4.593204138084006e-08,
"loss": 0.0057,
"num_input_tokens_seen": 1138832,
"step": 179
},
{
"epoch": 4.630225080385852,
"grad_norm": 0.9592411518096924,
"learning_rate": 3.798061746947995e-08,
"loss": 0.002,
"num_input_tokens_seen": 1145344,
"step": 180
},
{
"epoch": 4.655948553054662,
"grad_norm": 0.09295938909053802,
"learning_rate": 3.077914851215585e-08,
"loss": 0.0003,
"num_input_tokens_seen": 1151536,
"step": 181
},
{
"epoch": 4.681672025723473,
"grad_norm": 0.06213412061333656,
"learning_rate": 2.4329828146074096e-08,
"loss": 0.0002,
"num_input_tokens_seen": 1157696,
"step": 182
},
{
"epoch": 4.707395498392283,
"grad_norm": 1.2592134475708008,
"learning_rate": 1.8634620896695044e-08,
"loss": 0.0043,
"num_input_tokens_seen": 1163712,
"step": 183
},
{
"epoch": 4.733118971061093,
"grad_norm": 0.05733427032828331,
"learning_rate": 1.3695261579316776e-08,
"loss": 0.0002,
"num_input_tokens_seen": 1169888,
"step": 184
},
{
"epoch": 4.758842443729904,
"grad_norm": 0.38288363814353943,
"learning_rate": 9.513254770636138e-09,
"loss": 0.0013,
"num_input_tokens_seen": 1176272,
"step": 185
},
{
"epoch": 4.784565916398714,
"grad_norm": 0.6540606617927551,
"learning_rate": 6.089874350439507e-09,
"loss": 0.0023,
"num_input_tokens_seen": 1182688,
"step": 186
},
{
"epoch": 4.810289389067524,
"grad_norm": 0.0432482548058033,
"learning_rate": 3.4261631135654174e-09,
"loss": 0.0002,
"num_input_tokens_seen": 1189360,
"step": 187
},
{
"epoch": 4.836012861736334,
"grad_norm": 0.8218325972557068,
"learning_rate": 1.5229324522605949e-09,
"loss": 0.0015,
"num_input_tokens_seen": 1195632,
"step": 188
},
{
"epoch": 4.861736334405145,
"grad_norm": 0.05609305202960968,
"learning_rate": 3.8076210902182607e-10,
"loss": 0.0002,
"num_input_tokens_seen": 1202016,
"step": 189
},
{
"epoch": 4.887459807073955,
"grad_norm": 3.791782855987549,
"learning_rate": 0.0,
"loss": 0.0028,
"num_input_tokens_seen": 1208400,
"step": 190
},
{
"epoch": 4.887459807073955,
"num_input_tokens_seen": 1208400,
"step": 190,
"total_flos": 5.441370708980531e+16,
"train_loss": 0.5078434096239538,
"train_runtime": 2571.1298,
"train_samples_per_second": 9.661,
"train_steps_per_second": 0.074
}
],
"logging_steps": 1,
"max_steps": 190,
"num_input_tokens_seen": 1208400,
"num_train_epochs": 5,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.441370708980531e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}