zephyr-7b-sft-safe-hhrlhf / trainer_state.json
AmberYifan's picture
Model save
66869bf verified
raw
history blame
32.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 25,
"global_step": 812,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 33.25565995239216,
"learning_rate": 2.439024390243903e-07,
"loss": 1.8579,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 20.62131418379264,
"learning_rate": 1.2195121951219514e-06,
"loss": 1.8594,
"step": 5
},
{
"epoch": 0.02,
"grad_norm": 9.116745598504249,
"learning_rate": 2.4390243902439027e-06,
"loss": 1.6368,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 4.845477457562938,
"learning_rate": 3.6585365853658537e-06,
"loss": 1.5273,
"step": 15
},
{
"epoch": 0.05,
"grad_norm": 4.064982240065612,
"learning_rate": 4.8780487804878055e-06,
"loss": 1.4879,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 3.842051647757176,
"learning_rate": 6.0975609756097564e-06,
"loss": 1.4854,
"step": 25
},
{
"epoch": 0.06,
"eval_loss": 1.4539871215820312,
"eval_runtime": 37.3786,
"eval_samples_per_second": 35.047,
"eval_steps_per_second": 1.097,
"step": 25
},
{
"epoch": 0.07,
"grad_norm": 3.5736405142429235,
"learning_rate": 7.317073170731707e-06,
"loss": 1.4689,
"step": 30
},
{
"epoch": 0.09,
"grad_norm": 3.601301646883329,
"learning_rate": 8.536585365853658e-06,
"loss": 1.4492,
"step": 35
},
{
"epoch": 0.1,
"grad_norm": 3.5795153985197454,
"learning_rate": 9.756097560975611e-06,
"loss": 1.4464,
"step": 40
},
{
"epoch": 0.11,
"grad_norm": 3.547211967993725,
"learning_rate": 1.0975609756097562e-05,
"loss": 1.4118,
"step": 45
},
{
"epoch": 0.12,
"grad_norm": 3.8241967984903127,
"learning_rate": 1.2195121951219513e-05,
"loss": 1.43,
"step": 50
},
{
"epoch": 0.12,
"eval_loss": 1.4291119575500488,
"eval_runtime": 37.1035,
"eval_samples_per_second": 35.307,
"eval_steps_per_second": 1.105,
"step": 50
},
{
"epoch": 0.14,
"grad_norm": 6.65980179405396,
"learning_rate": 1.3414634146341466e-05,
"loss": 1.4254,
"step": 55
},
{
"epoch": 0.15,
"grad_norm": 4.155175626685716,
"learning_rate": 1.4634146341463415e-05,
"loss": 1.4337,
"step": 60
},
{
"epoch": 0.16,
"grad_norm": 3.6031251352197695,
"learning_rate": 1.585365853658537e-05,
"loss": 1.4482,
"step": 65
},
{
"epoch": 0.17,
"grad_norm": 3.9000471804568297,
"learning_rate": 1.7073170731707317e-05,
"loss": 1.452,
"step": 70
},
{
"epoch": 0.18,
"grad_norm": 3.6748440024674487,
"learning_rate": 1.829268292682927e-05,
"loss": 1.4188,
"step": 75
},
{
"epoch": 0.18,
"eval_loss": 1.4451020956039429,
"eval_runtime": 36.3992,
"eval_samples_per_second": 35.99,
"eval_steps_per_second": 1.126,
"step": 75
},
{
"epoch": 0.2,
"grad_norm": 4.784684166408002,
"learning_rate": 1.9512195121951222e-05,
"loss": 1.422,
"step": 80
},
{
"epoch": 0.21,
"grad_norm": 3.777533605838261,
"learning_rate": 1.9999166586547382e-05,
"loss": 1.4567,
"step": 85
},
{
"epoch": 0.22,
"grad_norm": 3.551321168481626,
"learning_rate": 1.999407400739705e-05,
"loss": 1.4674,
"step": 90
},
{
"epoch": 0.23,
"grad_norm": 3.625908306378223,
"learning_rate": 1.9984354211555646e-05,
"loss": 1.4554,
"step": 95
},
{
"epoch": 0.25,
"grad_norm": 4.243745995028721,
"learning_rate": 1.997001169925015e-05,
"loss": 1.4465,
"step": 100
},
{
"epoch": 0.25,
"eval_loss": 1.4564708471298218,
"eval_runtime": 35.8982,
"eval_samples_per_second": 36.492,
"eval_steps_per_second": 1.142,
"step": 100
},
{
"epoch": 0.26,
"grad_norm": 3.7056910011856434,
"learning_rate": 1.995105311100698e-05,
"loss": 1.435,
"step": 105
},
{
"epoch": 0.27,
"grad_norm": 4.028708939881987,
"learning_rate": 1.9927487224577402e-05,
"loss": 1.4554,
"step": 110
},
{
"epoch": 0.28,
"grad_norm": 3.4936872675120205,
"learning_rate": 1.9899324950873532e-05,
"loss": 1.4371,
"step": 115
},
{
"epoch": 0.3,
"grad_norm": 5.446138884571553,
"learning_rate": 1.986657932891657e-05,
"loss": 1.4213,
"step": 120
},
{
"epoch": 0.31,
"grad_norm": 3.5762471286009836,
"learning_rate": 1.9829265519799824e-05,
"loss": 1.4499,
"step": 125
},
{
"epoch": 0.31,
"eval_loss": 1.4525413513183594,
"eval_runtime": 36.7696,
"eval_samples_per_second": 35.627,
"eval_steps_per_second": 1.115,
"step": 125
},
{
"epoch": 0.32,
"grad_norm": 3.651881589784425,
"learning_rate": 1.9787400799669155e-05,
"loss": 1.4406,
"step": 130
},
{
"epoch": 0.33,
"grad_norm": 3.5390077509830626,
"learning_rate": 1.9741004551724208e-05,
"loss": 1.4683,
"step": 135
},
{
"epoch": 0.34,
"grad_norm": 3.654802825210285,
"learning_rate": 1.9690098257244063e-05,
"loss": 1.46,
"step": 140
},
{
"epoch": 0.36,
"grad_norm": 4.828779092366187,
"learning_rate": 1.963470548564149e-05,
"loss": 1.457,
"step": 145
},
{
"epoch": 0.37,
"grad_norm": 3.221793591458839,
"learning_rate": 1.9574851883550395e-05,
"loss": 1.4387,
"step": 150
},
{
"epoch": 0.37,
"eval_loss": 1.4409288167953491,
"eval_runtime": 36.7378,
"eval_samples_per_second": 35.658,
"eval_steps_per_second": 1.116,
"step": 150
},
{
"epoch": 0.38,
"grad_norm": 4.429483358899047,
"learning_rate": 1.9510565162951538e-05,
"loss": 1.4227,
"step": 155
},
{
"epoch": 0.39,
"grad_norm": 3.3883915102781494,
"learning_rate": 1.9441875088342e-05,
"loss": 1.4335,
"step": 160
},
{
"epoch": 0.41,
"grad_norm": 3.4947168397325368,
"learning_rate": 1.9368813462954316e-05,
"loss": 1.4165,
"step": 165
},
{
"epoch": 0.42,
"grad_norm": 3.498600082458413,
"learning_rate": 1.9291414114031744e-05,
"loss": 1.411,
"step": 170
},
{
"epoch": 0.43,
"grad_norm": 3.540671355408263,
"learning_rate": 1.9209712877166347e-05,
"loss": 1.4368,
"step": 175
},
{
"epoch": 0.43,
"eval_loss": 1.4226408004760742,
"eval_runtime": 36.4019,
"eval_samples_per_second": 35.987,
"eval_steps_per_second": 1.126,
"step": 175
},
{
"epoch": 0.44,
"grad_norm": 324.64947934206117,
"learning_rate": 1.9123747579707275e-05,
"loss": 1.3938,
"step": 180
},
{
"epoch": 0.46,
"grad_norm": 9.509687913297793,
"learning_rate": 1.9033558023246845e-05,
"loss": 1.4201,
"step": 185
},
{
"epoch": 0.47,
"grad_norm": 5.133252955012539,
"learning_rate": 1.8939185965192572e-05,
"loss": 1.413,
"step": 190
},
{
"epoch": 0.48,
"grad_norm": 5.023875370016508,
"learning_rate": 1.8840675099433637e-05,
"loss": 1.4426,
"step": 195
},
{
"epoch": 0.49,
"grad_norm": 4.144939646556894,
"learning_rate": 1.873807103611081e-05,
"loss": 1.4095,
"step": 200
},
{
"epoch": 0.49,
"eval_loss": 1.422728419303894,
"eval_runtime": 36.3022,
"eval_samples_per_second": 36.086,
"eval_steps_per_second": 1.129,
"step": 200
},
{
"epoch": 0.5,
"grad_norm": 3.8094743377142,
"learning_rate": 1.8631421280499114e-05,
"loss": 1.4219,
"step": 205
},
{
"epoch": 0.52,
"grad_norm": 3.6484435225782885,
"learning_rate": 1.8520775211013094e-05,
"loss": 1.399,
"step": 210
},
{
"epoch": 0.53,
"grad_norm": 3.8418704373836454,
"learning_rate": 1.8406184056344784e-05,
"loss": 1.424,
"step": 215
},
{
"epoch": 0.54,
"grad_norm": 3.5369039139594354,
"learning_rate": 1.8287700871745036e-05,
"loss": 1.3809,
"step": 220
},
{
"epoch": 0.55,
"grad_norm": 4.306679118424655,
"learning_rate": 1.816538051445916e-05,
"loss": 1.4065,
"step": 225
},
{
"epoch": 0.55,
"eval_loss": 1.4043676853179932,
"eval_runtime": 36.1499,
"eval_samples_per_second": 36.238,
"eval_steps_per_second": 1.134,
"step": 225
},
{
"epoch": 0.57,
"grad_norm": 3.793497887600387,
"learning_rate": 1.8039279618328215e-05,
"loss": 1.4393,
"step": 230
},
{
"epoch": 0.58,
"grad_norm": 3.5806655996080217,
"learning_rate": 1.7909456567567774e-05,
"loss": 1.3877,
"step": 235
},
{
"epoch": 0.59,
"grad_norm": 4.335034005215255,
"learning_rate": 1.777597146973627e-05,
"loss": 1.3992,
"step": 240
},
{
"epoch": 0.6,
"grad_norm": 3.5829112355976407,
"learning_rate": 1.7638886127905427e-05,
"loss": 1.3966,
"step": 245
},
{
"epoch": 0.62,
"grad_norm": 3.380252848613958,
"learning_rate": 1.7498264012045686e-05,
"loss": 1.3747,
"step": 250
},
{
"epoch": 0.62,
"eval_loss": 1.3881479501724243,
"eval_runtime": 36.7171,
"eval_samples_per_second": 35.678,
"eval_steps_per_second": 1.117,
"step": 250
},
{
"epoch": 0.63,
"grad_norm": 3.610102942308515,
"learning_rate": 1.7354170229639856e-05,
"loss": 1.3726,
"step": 255
},
{
"epoch": 0.64,
"grad_norm": 3.3439020430777155,
"learning_rate": 1.720667149553861e-05,
"loss": 1.3682,
"step": 260
},
{
"epoch": 0.65,
"grad_norm": 3.4956412955508402,
"learning_rate": 1.705583610107178e-05,
"loss": 1.3552,
"step": 265
},
{
"epoch": 0.67,
"grad_norm": 3.4985126593068165,
"learning_rate": 1.690173388242972e-05,
"loss": 1.3895,
"step": 270
},
{
"epoch": 0.68,
"grad_norm": 3.7382334626759746,
"learning_rate": 1.6744436188329455e-05,
"loss": 1.3847,
"step": 275
},
{
"epoch": 0.68,
"eval_loss": 1.3719505071640015,
"eval_runtime": 36.7391,
"eval_samples_per_second": 35.657,
"eval_steps_per_second": 1.116,
"step": 275
},
{
"epoch": 0.69,
"grad_norm": 3.7522691462559363,
"learning_rate": 1.658401584698049e-05,
"loss": 1.3954,
"step": 280
},
{
"epoch": 0.7,
"grad_norm": 3.599034550869163,
"learning_rate": 1.6420547132365637e-05,
"loss": 1.3558,
"step": 285
},
{
"epoch": 0.71,
"grad_norm": 3.780355027101858,
"learning_rate": 1.6254105729852466e-05,
"loss": 1.3836,
"step": 290
},
{
"epoch": 0.73,
"grad_norm": 3.723697013821863,
"learning_rate": 1.6084768701151263e-05,
"loss": 1.3753,
"step": 295
},
{
"epoch": 0.74,
"grad_norm": 3.285217439584574,
"learning_rate": 1.5912614448635784e-05,
"loss": 1.3526,
"step": 300
},
{
"epoch": 0.74,
"eval_loss": 1.358110785484314,
"eval_runtime": 36.5535,
"eval_samples_per_second": 35.838,
"eval_steps_per_second": 1.122,
"step": 300
},
{
"epoch": 0.75,
"grad_norm": 4.389266580261243,
"learning_rate": 1.573772267904325e-05,
"loss": 1.3432,
"step": 305
},
{
"epoch": 0.76,
"grad_norm": 3.562805344105124,
"learning_rate": 1.5560174366570448e-05,
"loss": 1.3479,
"step": 310
},
{
"epoch": 0.78,
"grad_norm": 3.557282541178432,
"learning_rate": 1.5380051715383e-05,
"loss": 1.3482,
"step": 315
},
{
"epoch": 0.79,
"grad_norm": 3.171518329728872,
"learning_rate": 1.5197438121555159e-05,
"loss": 1.3627,
"step": 320
},
{
"epoch": 0.8,
"grad_norm": 3.4241008103077006,
"learning_rate": 1.5012418134457756e-05,
"loss": 1.3072,
"step": 325
},
{
"epoch": 0.8,
"eval_loss": 1.3410331010818481,
"eval_runtime": 36.5162,
"eval_samples_per_second": 35.874,
"eval_steps_per_second": 1.123,
"step": 325
},
{
"epoch": 0.81,
"grad_norm": 3.4417136894356077,
"learning_rate": 1.4825077417612187e-05,
"loss": 1.3713,
"step": 330
},
{
"epoch": 0.83,
"grad_norm": 3.55225659028301,
"learning_rate": 1.4635502709028511e-05,
"loss": 1.3492,
"step": 335
},
{
"epoch": 0.84,
"grad_norm": 4.864004021362561,
"learning_rate": 1.4443781781046135e-05,
"loss": 1.3297,
"step": 340
},
{
"epoch": 0.85,
"grad_norm": 3.714231728878515,
"learning_rate": 1.425000339969554e-05,
"loss": 1.292,
"step": 345
},
{
"epoch": 0.86,
"grad_norm": 3.499541537796859,
"learning_rate": 1.4054257283599974e-05,
"loss": 1.3146,
"step": 350
},
{
"epoch": 0.86,
"eval_loss": 1.3238551616668701,
"eval_runtime": 35.9276,
"eval_samples_per_second": 36.462,
"eval_steps_per_second": 1.141,
"step": 350
},
{
"epoch": 0.87,
"grad_norm": 3.3766322552695374,
"learning_rate": 1.3856634062436073e-05,
"loss": 1.2782,
"step": 355
},
{
"epoch": 0.89,
"grad_norm": 3.430094949177884,
"learning_rate": 1.3657225234972695e-05,
"loss": 1.291,
"step": 360
},
{
"epoch": 0.9,
"grad_norm": 3.7053693958311964,
"learning_rate": 1.3456123126707334e-05,
"loss": 1.3275,
"step": 365
},
{
"epoch": 0.91,
"grad_norm": 3.528099228472523,
"learning_rate": 1.3253420847119804e-05,
"loss": 1.2867,
"step": 370
},
{
"epoch": 0.92,
"grad_norm": 3.6078436890974706,
"learning_rate": 1.304921224656289e-05,
"loss": 1.2783,
"step": 375
},
{
"epoch": 0.92,
"eval_loss": 1.310630440711975,
"eval_runtime": 36.5504,
"eval_samples_per_second": 35.841,
"eval_steps_per_second": 1.122,
"step": 375
},
{
"epoch": 0.94,
"grad_norm": 3.523684230098565,
"learning_rate": 1.2843591872810039e-05,
"loss": 1.2782,
"step": 380
},
{
"epoch": 0.95,
"grad_norm": 3.7267322056062144,
"learning_rate": 1.2636654927280074e-05,
"loss": 1.2761,
"step": 385
},
{
"epoch": 0.96,
"grad_norm": 3.65799573792828,
"learning_rate": 1.2428497220959359e-05,
"loss": 1.2864,
"step": 390
},
{
"epoch": 0.97,
"grad_norm": 3.520887899492278,
"learning_rate": 1.2219215130041656e-05,
"loss": 1.2936,
"step": 395
},
{
"epoch": 0.99,
"grad_norm": 3.878015265725218,
"learning_rate": 1.2008905551306356e-05,
"loss": 1.2773,
"step": 400
},
{
"epoch": 0.99,
"eval_loss": 1.2895601987838745,
"eval_runtime": 36.4176,
"eval_samples_per_second": 35.972,
"eval_steps_per_second": 1.126,
"step": 400
},
{
"epoch": 1.0,
"grad_norm": 6.1260498556243155,
"learning_rate": 1.1797665857255622e-05,
"loss": 1.2861,
"step": 405
},
{
"epoch": 1.01,
"grad_norm": 4.466084749533314,
"learning_rate": 1.1585593851031346e-05,
"loss": 0.8568,
"step": 410
},
{
"epoch": 1.02,
"grad_norm": 4.230541766753645,
"learning_rate": 1.1372787721132648e-05,
"loss": 0.7304,
"step": 415
},
{
"epoch": 1.03,
"grad_norm": 4.050955020609926,
"learning_rate": 1.1159345995955007e-05,
"loss": 0.7438,
"step": 420
},
{
"epoch": 1.05,
"grad_norm": 3.8978479248153,
"learning_rate": 1.0945367498171992e-05,
"loss": 0.7303,
"step": 425
},
{
"epoch": 1.05,
"eval_loss": 1.3326823711395264,
"eval_runtime": 37.9893,
"eval_samples_per_second": 34.483,
"eval_steps_per_second": 1.079,
"step": 425
},
{
"epoch": 1.06,
"grad_norm": 3.7185440445464963,
"learning_rate": 1.0730951298980776e-05,
"loss": 0.7407,
"step": 430
},
{
"epoch": 1.07,
"grad_norm": 3.7568671101121303,
"learning_rate": 1.0516196672232538e-05,
"loss": 0.7147,
"step": 435
},
{
"epoch": 1.08,
"grad_norm": 4.175956955241419,
"learning_rate": 1.0301203048469084e-05,
"loss": 0.7187,
"step": 440
},
{
"epoch": 1.1,
"grad_norm": 3.5713973562394794,
"learning_rate": 1.0086069968886885e-05,
"loss": 0.6899,
"step": 445
},
{
"epoch": 1.11,
"grad_norm": 3.926229796886718,
"learning_rate": 9.87089703924991e-06,
"loss": 0.6911,
"step": 450
},
{
"epoch": 1.11,
"eval_loss": 1.3080617189407349,
"eval_runtime": 36.6218,
"eval_samples_per_second": 35.771,
"eval_steps_per_second": 1.12,
"step": 450
},
{
"epoch": 1.12,
"grad_norm": 4.041765832088674,
"learning_rate": 9.655783883772546e-06,
"loss": 0.6783,
"step": 455
},
{
"epoch": 1.13,
"grad_norm": 3.822198052373504,
"learning_rate": 9.440830098993969e-06,
"loss": 0.7047,
"step": 460
},
{
"epoch": 1.15,
"grad_norm": 3.74103783619035,
"learning_rate": 9.22613520766537e-06,
"loss": 0.7148,
"step": 465
},
{
"epoch": 1.16,
"grad_norm": 3.863229052811563,
"learning_rate": 9.011798612671286e-06,
"loss": 0.6882,
"step": 470
},
{
"epoch": 1.17,
"grad_norm": 3.7184780256426233,
"learning_rate": 8.797919551006477e-06,
"loss": 0.7044,
"step": 475
},
{
"epoch": 1.17,
"eval_loss": 1.3011983633041382,
"eval_runtime": 36.1296,
"eval_samples_per_second": 36.258,
"eval_steps_per_second": 1.135,
"step": 475
},
{
"epoch": 1.18,
"grad_norm": 3.61644498812297,
"learning_rate": 8.58459704782957e-06,
"loss": 0.7014,
"step": 480
},
{
"epoch": 1.19,
"grad_norm": 3.7252005213479427,
"learning_rate": 8.371929870614834e-06,
"loss": 0.689,
"step": 485
},
{
"epoch": 1.21,
"grad_norm": 3.6457105180525105,
"learning_rate": 8.1600164834232e-06,
"loss": 0.6913,
"step": 490
},
{
"epoch": 1.22,
"grad_norm": 7.856843188311362,
"learning_rate": 7.948955001313812e-06,
"loss": 0.6851,
"step": 495
},
{
"epoch": 1.23,
"grad_norm": 3.583455067918794,
"learning_rate": 7.738843144917119e-06,
"loss": 0.6597,
"step": 500
},
{
"epoch": 1.23,
"eval_loss": 1.2844963073730469,
"eval_runtime": 36.2065,
"eval_samples_per_second": 36.181,
"eval_steps_per_second": 1.132,
"step": 500
},
{
"epoch": 1.24,
"grad_norm": 6.459020130486219,
"learning_rate": 7.529778195190644e-06,
"loss": 0.6643,
"step": 505
},
{
"epoch": 1.26,
"grad_norm": 3.8211583260556905,
"learning_rate": 7.321856948378259e-06,
"loss": 0.6848,
"step": 510
},
{
"epoch": 1.27,
"grad_norm": 3.7126647718977095,
"learning_rate": 7.115175671193913e-06,
"loss": 0.6894,
"step": 515
},
{
"epoch": 1.28,
"grad_norm": 3.659148206814036,
"learning_rate": 6.909830056250527e-06,
"loss": 0.6846,
"step": 520
},
{
"epoch": 1.29,
"grad_norm": 3.5912318784335757,
"learning_rate": 6.7059151777547e-06,
"loss": 0.6717,
"step": 525
},
{
"epoch": 1.29,
"eval_loss": 1.2701396942138672,
"eval_runtime": 36.3576,
"eval_samples_per_second": 36.031,
"eval_steps_per_second": 1.128,
"step": 525
},
{
"epoch": 1.31,
"grad_norm": 4.43495688718006,
"learning_rate": 6.503525447487717e-06,
"loss": 0.6644,
"step": 530
},
{
"epoch": 1.32,
"grad_norm": 3.754517876550191,
"learning_rate": 6.3027545710932715e-06,
"loss": 0.6531,
"step": 535
},
{
"epoch": 1.33,
"grad_norm": 3.745749569347864,
"learning_rate": 6.103695504692122e-06,
"loss": 0.6528,
"step": 540
},
{
"epoch": 1.34,
"grad_norm": 3.724963177872727,
"learning_rate": 5.906440411843787e-06,
"loss": 0.6679,
"step": 545
},
{
"epoch": 1.35,
"grad_norm": 3.5172682214444926,
"learning_rate": 5.711080620875165e-06,
"loss": 0.6538,
"step": 550
},
{
"epoch": 1.35,
"eval_loss": 1.2666361331939697,
"eval_runtime": 36.5896,
"eval_samples_per_second": 35.802,
"eval_steps_per_second": 1.121,
"step": 550
},
{
"epoch": 1.37,
"grad_norm": 3.6339406360777096,
"learning_rate": 5.517706582595896e-06,
"loss": 0.6794,
"step": 555
},
{
"epoch": 1.38,
"grad_norm": 3.7173322180156116,
"learning_rate": 5.32640782841998e-06,
"loss": 0.6479,
"step": 560
},
{
"epoch": 1.39,
"grad_norm": 3.661315456496335,
"learning_rate": 5.137272928913097e-06,
"loss": 0.6582,
"step": 565
},
{
"epoch": 1.4,
"grad_norm": 4.487184920335217,
"learning_rate": 4.950389452784796e-06,
"loss": 0.6697,
"step": 570
},
{
"epoch": 1.42,
"grad_norm": 3.5318063600935776,
"learning_rate": 4.7658439263445015e-06,
"loss": 0.6484,
"step": 575
},
{
"epoch": 1.42,
"eval_loss": 1.2480911016464233,
"eval_runtime": 36.7469,
"eval_samples_per_second": 35.649,
"eval_steps_per_second": 1.116,
"step": 575
},
{
"epoch": 1.43,
"grad_norm": 3.6935613568961463,
"learning_rate": 4.583721793440188e-06,
"loss": 0.6621,
"step": 580
},
{
"epoch": 1.44,
"grad_norm": 3.6221958065559203,
"learning_rate": 4.404107375898234e-06,
"loss": 0.6633,
"step": 585
},
{
"epoch": 1.45,
"grad_norm": 3.9034034514689533,
"learning_rate": 4.2270838344827285e-06,
"loss": 0.6561,
"step": 590
},
{
"epoch": 1.47,
"grad_norm": 3.7752277109004733,
"learning_rate": 4.052733130392367e-06,
"loss": 0.65,
"step": 595
},
{
"epoch": 1.48,
"grad_norm": 3.63792060813426,
"learning_rate": 3.881135987312758e-06,
"loss": 0.633,
"step": 600
},
{
"epoch": 1.48,
"eval_loss": 1.2404245138168335,
"eval_runtime": 36.9,
"eval_samples_per_second": 35.501,
"eval_steps_per_second": 1.111,
"step": 600
},
{
"epoch": 1.49,
"grad_norm": 3.629431521288044,
"learning_rate": 3.712371854041654e-06,
"loss": 0.6252,
"step": 605
},
{
"epoch": 1.5,
"grad_norm": 3.7538748748698962,
"learning_rate": 3.546518867704499e-06,
"loss": 0.6549,
"step": 610
},
{
"epoch": 1.51,
"grad_norm": 3.5943200372390676,
"learning_rate": 3.383653817577216e-06,
"loss": 0.6365,
"step": 615
},
{
"epoch": 1.53,
"grad_norm": 3.7988035352394673,
"learning_rate": 3.223852109533112e-06,
"loss": 0.6157,
"step": 620
},
{
"epoch": 1.54,
"grad_norm": 3.7396973266303886,
"learning_rate": 3.0671877311302247e-06,
"loss": 0.6252,
"step": 625
},
{
"epoch": 1.54,
"eval_loss": 1.2332385778427124,
"eval_runtime": 36.3217,
"eval_samples_per_second": 36.067,
"eval_steps_per_second": 1.129,
"step": 625
},
{
"epoch": 1.55,
"grad_norm": 3.6119019240279258,
"learning_rate": 2.9137332173554043e-06,
"loss": 0.6215,
"step": 630
},
{
"epoch": 1.56,
"grad_norm": 3.561248255506443,
"learning_rate": 2.763559617040876e-06,
"loss": 0.6364,
"step": 635
},
{
"epoch": 1.58,
"grad_norm": 4.068465368690645,
"learning_rate": 2.616736459968936e-06,
"loss": 0.6193,
"step": 640
},
{
"epoch": 1.59,
"grad_norm": 3.808522974597512,
"learning_rate": 2.473331724679917e-06,
"loss": 0.6622,
"step": 645
},
{
"epoch": 1.6,
"grad_norm": 3.743484183877061,
"learning_rate": 2.3334118069984102e-06,
"loss": 0.6423,
"step": 650
},
{
"epoch": 1.6,
"eval_loss": 1.2212291955947876,
"eval_runtime": 36.995,
"eval_samples_per_second": 35.41,
"eval_steps_per_second": 1.108,
"step": 650
},
{
"epoch": 1.61,
"grad_norm": 3.7762899770508453,
"learning_rate": 2.197041489292244e-06,
"loss": 0.64,
"step": 655
},
{
"epoch": 1.63,
"grad_norm": 3.7440789835416575,
"learning_rate": 2.064283910478527e-06,
"loss": 0.6627,
"step": 660
},
{
"epoch": 1.64,
"grad_norm": 3.53490892439879,
"learning_rate": 1.9352005367905536e-06,
"loss": 0.6253,
"step": 665
},
{
"epoch": 1.65,
"grad_norm": 3.743044691468231,
"learning_rate": 1.8098511333192026e-06,
"loss": 0.6248,
"step": 670
},
{
"epoch": 1.66,
"grad_norm": 3.508178112989742,
"learning_rate": 1.6882937363419205e-06,
"loss": 0.6182,
"step": 675
},
{
"epoch": 1.66,
"eval_loss": 1.2115447521209717,
"eval_runtime": 36.7342,
"eval_samples_per_second": 35.662,
"eval_steps_per_second": 1.116,
"step": 675
},
{
"epoch": 1.67,
"grad_norm": 3.82584115413825,
"learning_rate": 1.5705846264521728e-06,
"loss": 0.6159,
"step": 680
},
{
"epoch": 1.69,
"grad_norm": 3.805908154584629,
"learning_rate": 1.45677830250173e-06,
"loss": 0.5992,
"step": 685
},
{
"epoch": 1.7,
"grad_norm": 3.6691587173463174,
"learning_rate": 1.3469274563679401e-06,
"loss": 0.6379,
"step": 690
},
{
"epoch": 1.71,
"grad_norm": 3.6099414977809534,
"learning_rate": 1.2410829485575703e-06,
"loss": 0.6217,
"step": 695
},
{
"epoch": 1.72,
"grad_norm": 3.6351646415567833,
"learning_rate": 1.1392937846586216e-06,
"loss": 0.6218,
"step": 700
},
{
"epoch": 1.72,
"eval_loss": 1.2099106311798096,
"eval_runtime": 37.2149,
"eval_samples_per_second": 35.201,
"eval_steps_per_second": 1.102,
"step": 700
},
{
"epoch": 1.74,
"grad_norm": 3.5759017601516625,
"learning_rate": 1.0416070926509114e-06,
"loss": 0.625,
"step": 705
},
{
"epoch": 1.75,
"grad_norm": 4.079216714693347,
"learning_rate": 9.48068101086026e-07,
"loss": 0.6503,
"step": 710
},
{
"epoch": 1.76,
"grad_norm": 3.690374175674403,
"learning_rate": 8.58720118146662e-07,
"loss": 0.6279,
"step": 715
},
{
"epoch": 1.77,
"grad_norm": 3.653451951807952,
"learning_rate": 7.736045115951252e-07,
"loss": 0.6214,
"step": 720
},
{
"epoch": 1.79,
"grad_norm": 3.9843182779544253,
"learning_rate": 6.927606896202066e-07,
"loss": 0.6293,
"step": 725
},
{
"epoch": 1.79,
"eval_loss": 1.2038028240203857,
"eval_runtime": 36.8911,
"eval_samples_per_second": 35.51,
"eval_steps_per_second": 1.111,
"step": 725
},
{
"epoch": 1.8,
"grad_norm": 3.625232494017856,
"learning_rate": 6.162260825913591e-07,
"loss": 0.6138,
"step": 730
},
{
"epoch": 1.81,
"grad_norm": 3.438461239374458,
"learning_rate": 5.440361257285742e-07,
"loss": 0.6035,
"step": 735
},
{
"epoch": 1.82,
"grad_norm": 3.468914334826838,
"learning_rate": 4.762242426960262e-07,
"loss": 0.6123,
"step": 740
},
{
"epoch": 1.83,
"grad_norm": 3.4298511965711214,
"learning_rate": 4.128218301270359e-07,
"loss": 0.5911,
"step": 745
},
{
"epoch": 1.85,
"grad_norm": 3.648217807793934,
"learning_rate": 3.538582430875659e-07,
"loss": 0.5978,
"step": 750
},
{
"epoch": 1.85,
"eval_loss": 1.20143461227417,
"eval_runtime": 36.7633,
"eval_samples_per_second": 35.633,
"eval_steps_per_second": 1.115,
"step": 750
},
{
"epoch": 1.86,
"grad_norm": 3.6847453856191597,
"learning_rate": 2.9936078148492973e-07,
"loss": 0.6013,
"step": 755
},
{
"epoch": 1.87,
"grad_norm": 3.6349536920444874,
"learning_rate": 2.493546774280531e-07,
"loss": 0.6004,
"step": 760
},
{
"epoch": 1.88,
"grad_norm": 3.5351570751944594,
"learning_rate": 2.0386308354509944e-07,
"loss": 0.5957,
"step": 765
},
{
"epoch": 1.9,
"grad_norm": 3.623279846120391,
"learning_rate": 1.6290706226390286e-07,
"loss": 0.5953,
"step": 770
},
{
"epoch": 1.91,
"grad_norm": 3.6727168784725897,
"learning_rate": 1.2650557606013635e-07,
"loss": 0.6176,
"step": 775
},
{
"epoch": 1.91,
"eval_loss": 1.19950532913208,
"eval_runtime": 36.6969,
"eval_samples_per_second": 35.698,
"eval_steps_per_second": 1.117,
"step": 775
},
{
"epoch": 1.92,
"grad_norm": 3.873849616801828,
"learning_rate": 9.467547867777261e-08,
"loss": 0.5817,
"step": 780
},
{
"epoch": 1.93,
"grad_norm": 3.554084353387301,
"learning_rate": 6.74315073258569e-08,
"loss": 0.6145,
"step": 785
},
{
"epoch": 1.95,
"grad_norm": 7.330372861717701,
"learning_rate": 4.478627585524753e-08,
"loss": 0.6256,
"step": 790
},
{
"epoch": 1.96,
"grad_norm": 3.617776301851038,
"learning_rate": 2.675026891844512e-08,
"loss": 0.6224,
"step": 795
},
{
"epoch": 1.97,
"grad_norm": 3.5902020548965763,
"learning_rate": 1.3331837115241331e-08,
"loss": 0.6475,
"step": 800
},
{
"epoch": 1.97,
"eval_loss": 1.199601411819458,
"eval_runtime": 36.7572,
"eval_samples_per_second": 35.639,
"eval_steps_per_second": 1.115,
"step": 800
},
{
"epoch": 1.98,
"grad_norm": 3.5573953568277306,
"learning_rate": 4.537193126427086e-09,
"loss": 0.5941,
"step": 805
},
{
"epoch": 2.0,
"grad_norm": 3.684445766409529,
"learning_rate": 3.7040883734462685e-10,
"loss": 0.6021,
"step": 810
},
{
"epoch": 2.0,
"step": 812,
"total_flos": 42504070103040.0,
"train_loss": 1.0263223388218528,
"train_runtime": 5267.6103,
"train_samples_per_second": 4.93,
"train_steps_per_second": 0.154
}
],
"logging_steps": 5,
"max_steps": 812,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"total_flos": 42504070103040.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}