OH_DCFT_V3_wo_sharegpt / trainer_state.json
sedrickkeh's picture
End of training
eb6b0fa verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1266,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023696682464454975,
"grad_norm": 1.8578642304403785,
"learning_rate": 5e-06,
"loss": 0.8806,
"step": 10
},
{
"epoch": 0.04739336492890995,
"grad_norm": 1.183006030611099,
"learning_rate": 5e-06,
"loss": 0.7797,
"step": 20
},
{
"epoch": 0.07109004739336493,
"grad_norm": 1.4765088938253328,
"learning_rate": 5e-06,
"loss": 0.7484,
"step": 30
},
{
"epoch": 0.0947867298578199,
"grad_norm": 1.163022578781007,
"learning_rate": 5e-06,
"loss": 0.7347,
"step": 40
},
{
"epoch": 0.11848341232227488,
"grad_norm": 0.7554926726458471,
"learning_rate": 5e-06,
"loss": 0.7203,
"step": 50
},
{
"epoch": 0.14218009478672985,
"grad_norm": 0.731542170193161,
"learning_rate": 5e-06,
"loss": 0.7096,
"step": 60
},
{
"epoch": 0.16587677725118483,
"grad_norm": 0.9060440478303672,
"learning_rate": 5e-06,
"loss": 0.7059,
"step": 70
},
{
"epoch": 0.1895734597156398,
"grad_norm": 0.7296366219850153,
"learning_rate": 5e-06,
"loss": 0.7001,
"step": 80
},
{
"epoch": 0.2132701421800948,
"grad_norm": 0.7426978264461702,
"learning_rate": 5e-06,
"loss": 0.6962,
"step": 90
},
{
"epoch": 0.23696682464454977,
"grad_norm": 0.6188812479024083,
"learning_rate": 5e-06,
"loss": 0.6868,
"step": 100
},
{
"epoch": 0.26066350710900477,
"grad_norm": 0.5476530648144865,
"learning_rate": 5e-06,
"loss": 0.6824,
"step": 110
},
{
"epoch": 0.2843601895734597,
"grad_norm": 0.694221105485736,
"learning_rate": 5e-06,
"loss": 0.679,
"step": 120
},
{
"epoch": 0.3080568720379147,
"grad_norm": 0.7560482674288443,
"learning_rate": 5e-06,
"loss": 0.6827,
"step": 130
},
{
"epoch": 0.33175355450236965,
"grad_norm": 0.7850327296092002,
"learning_rate": 5e-06,
"loss": 0.6733,
"step": 140
},
{
"epoch": 0.35545023696682465,
"grad_norm": 0.5570389442716736,
"learning_rate": 5e-06,
"loss": 0.6813,
"step": 150
},
{
"epoch": 0.3791469194312796,
"grad_norm": 0.9044873869853465,
"learning_rate": 5e-06,
"loss": 0.6764,
"step": 160
},
{
"epoch": 0.4028436018957346,
"grad_norm": 0.9187588354313664,
"learning_rate": 5e-06,
"loss": 0.6729,
"step": 170
},
{
"epoch": 0.4265402843601896,
"grad_norm": 0.5060518745683197,
"learning_rate": 5e-06,
"loss": 0.6691,
"step": 180
},
{
"epoch": 0.45023696682464454,
"grad_norm": 0.49447301037625124,
"learning_rate": 5e-06,
"loss": 0.6694,
"step": 190
},
{
"epoch": 0.47393364928909953,
"grad_norm": 0.6175555086839499,
"learning_rate": 5e-06,
"loss": 0.6681,
"step": 200
},
{
"epoch": 0.4976303317535545,
"grad_norm": 0.6085272766245269,
"learning_rate": 5e-06,
"loss": 0.6604,
"step": 210
},
{
"epoch": 0.5213270142180095,
"grad_norm": 0.5926380925408075,
"learning_rate": 5e-06,
"loss": 0.6656,
"step": 220
},
{
"epoch": 0.5450236966824644,
"grad_norm": 0.6160796498694022,
"learning_rate": 5e-06,
"loss": 0.663,
"step": 230
},
{
"epoch": 0.5687203791469194,
"grad_norm": 0.6914531474013237,
"learning_rate": 5e-06,
"loss": 0.6656,
"step": 240
},
{
"epoch": 0.5924170616113744,
"grad_norm": 0.6596857730992619,
"learning_rate": 5e-06,
"loss": 0.6572,
"step": 250
},
{
"epoch": 0.6161137440758294,
"grad_norm": 0.5279165722236415,
"learning_rate": 5e-06,
"loss": 0.6684,
"step": 260
},
{
"epoch": 0.6398104265402843,
"grad_norm": 0.5689701712166578,
"learning_rate": 5e-06,
"loss": 0.6548,
"step": 270
},
{
"epoch": 0.6635071090047393,
"grad_norm": 0.5166176607170971,
"learning_rate": 5e-06,
"loss": 0.6698,
"step": 280
},
{
"epoch": 0.6872037914691943,
"grad_norm": 0.6445425535849747,
"learning_rate": 5e-06,
"loss": 0.6579,
"step": 290
},
{
"epoch": 0.7109004739336493,
"grad_norm": 0.6506854686970152,
"learning_rate": 5e-06,
"loss": 0.6551,
"step": 300
},
{
"epoch": 0.7345971563981043,
"grad_norm": 0.5085915851296422,
"learning_rate": 5e-06,
"loss": 0.6616,
"step": 310
},
{
"epoch": 0.7582938388625592,
"grad_norm": 0.6487986243452837,
"learning_rate": 5e-06,
"loss": 0.6566,
"step": 320
},
{
"epoch": 0.7819905213270142,
"grad_norm": 0.5441246219276076,
"learning_rate": 5e-06,
"loss": 0.6521,
"step": 330
},
{
"epoch": 0.8056872037914692,
"grad_norm": 0.48980734623494515,
"learning_rate": 5e-06,
"loss": 0.6552,
"step": 340
},
{
"epoch": 0.8293838862559242,
"grad_norm": 0.3938069962058288,
"learning_rate": 5e-06,
"loss": 0.6503,
"step": 350
},
{
"epoch": 0.8530805687203792,
"grad_norm": 0.5653669786380396,
"learning_rate": 5e-06,
"loss": 0.653,
"step": 360
},
{
"epoch": 0.8767772511848341,
"grad_norm": 0.7322605559502221,
"learning_rate": 5e-06,
"loss": 0.6549,
"step": 370
},
{
"epoch": 0.9004739336492891,
"grad_norm": 0.6662476991971801,
"learning_rate": 5e-06,
"loss": 0.658,
"step": 380
},
{
"epoch": 0.9241706161137441,
"grad_norm": 0.568403065783914,
"learning_rate": 5e-06,
"loss": 0.6577,
"step": 390
},
{
"epoch": 0.9478672985781991,
"grad_norm": 0.46948514320296014,
"learning_rate": 5e-06,
"loss": 0.6459,
"step": 400
},
{
"epoch": 0.9715639810426541,
"grad_norm": 0.7292309309979981,
"learning_rate": 5e-06,
"loss": 0.6556,
"step": 410
},
{
"epoch": 0.995260663507109,
"grad_norm": 0.6757095412992554,
"learning_rate": 5e-06,
"loss": 0.6485,
"step": 420
},
{
"epoch": 1.0,
"eval_loss": 0.6513110399246216,
"eval_runtime": 226.1564,
"eval_samples_per_second": 50.257,
"eval_steps_per_second": 0.394,
"step": 422
},
{
"epoch": 1.018957345971564,
"grad_norm": 0.5805140570845687,
"learning_rate": 5e-06,
"loss": 0.6185,
"step": 430
},
{
"epoch": 1.042654028436019,
"grad_norm": 0.5434272081051958,
"learning_rate": 5e-06,
"loss": 0.6145,
"step": 440
},
{
"epoch": 1.066350710900474,
"grad_norm": 0.5326556907180255,
"learning_rate": 5e-06,
"loss": 0.6094,
"step": 450
},
{
"epoch": 1.0900473933649288,
"grad_norm": 0.5829211855668741,
"learning_rate": 5e-06,
"loss": 0.6097,
"step": 460
},
{
"epoch": 1.113744075829384,
"grad_norm": 0.6319252195360786,
"learning_rate": 5e-06,
"loss": 0.6194,
"step": 470
},
{
"epoch": 1.1374407582938388,
"grad_norm": 0.504231300635023,
"learning_rate": 5e-06,
"loss": 0.6055,
"step": 480
},
{
"epoch": 1.161137440758294,
"grad_norm": 0.47691471391766244,
"learning_rate": 5e-06,
"loss": 0.6103,
"step": 490
},
{
"epoch": 1.1848341232227488,
"grad_norm": 0.6579542570947285,
"learning_rate": 5e-06,
"loss": 0.5974,
"step": 500
},
{
"epoch": 1.2085308056872037,
"grad_norm": 0.5296651294386931,
"learning_rate": 5e-06,
"loss": 0.6087,
"step": 510
},
{
"epoch": 1.2322274881516588,
"grad_norm": 0.5136903442393255,
"learning_rate": 5e-06,
"loss": 0.6098,
"step": 520
},
{
"epoch": 1.2559241706161137,
"grad_norm": 0.4323386979195923,
"learning_rate": 5e-06,
"loss": 0.6061,
"step": 530
},
{
"epoch": 1.2796208530805688,
"grad_norm": 0.4569721261280212,
"learning_rate": 5e-06,
"loss": 0.6066,
"step": 540
},
{
"epoch": 1.3033175355450237,
"grad_norm": 0.6467514503519792,
"learning_rate": 5e-06,
"loss": 0.611,
"step": 550
},
{
"epoch": 1.3270142180094786,
"grad_norm": 0.5707325971904217,
"learning_rate": 5e-06,
"loss": 0.6149,
"step": 560
},
{
"epoch": 1.3507109004739337,
"grad_norm": 0.6351802477821956,
"learning_rate": 5e-06,
"loss": 0.6117,
"step": 570
},
{
"epoch": 1.3744075829383886,
"grad_norm": 0.4343155173009086,
"learning_rate": 5e-06,
"loss": 0.6061,
"step": 580
},
{
"epoch": 1.3981042654028437,
"grad_norm": 0.4859882284821603,
"learning_rate": 5e-06,
"loss": 0.6161,
"step": 590
},
{
"epoch": 1.4218009478672986,
"grad_norm": 0.5236535463545058,
"learning_rate": 5e-06,
"loss": 0.6096,
"step": 600
},
{
"epoch": 1.4454976303317535,
"grad_norm": 0.4557408062718408,
"learning_rate": 5e-06,
"loss": 0.6137,
"step": 610
},
{
"epoch": 1.4691943127962086,
"grad_norm": 0.4710881048666159,
"learning_rate": 5e-06,
"loss": 0.6104,
"step": 620
},
{
"epoch": 1.4928909952606635,
"grad_norm": 0.4693244351357125,
"learning_rate": 5e-06,
"loss": 0.6113,
"step": 630
},
{
"epoch": 1.5165876777251186,
"grad_norm": 0.5106722321731667,
"learning_rate": 5e-06,
"loss": 0.6099,
"step": 640
},
{
"epoch": 1.5402843601895735,
"grad_norm": 0.5008718823276928,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 650
},
{
"epoch": 1.5639810426540284,
"grad_norm": 0.5709206603606636,
"learning_rate": 5e-06,
"loss": 0.6072,
"step": 660
},
{
"epoch": 1.5876777251184833,
"grad_norm": 0.5000142373704568,
"learning_rate": 5e-06,
"loss": 0.6054,
"step": 670
},
{
"epoch": 1.6113744075829384,
"grad_norm": 0.503815554745896,
"learning_rate": 5e-06,
"loss": 0.609,
"step": 680
},
{
"epoch": 1.6350710900473935,
"grad_norm": 0.4901500964784556,
"learning_rate": 5e-06,
"loss": 0.6144,
"step": 690
},
{
"epoch": 1.6587677725118484,
"grad_norm": 0.7148462741898324,
"learning_rate": 5e-06,
"loss": 0.6171,
"step": 700
},
{
"epoch": 1.6824644549763033,
"grad_norm": 0.46371859890219064,
"learning_rate": 5e-06,
"loss": 0.6117,
"step": 710
},
{
"epoch": 1.7061611374407581,
"grad_norm": 0.4868566708300119,
"learning_rate": 5e-06,
"loss": 0.6084,
"step": 720
},
{
"epoch": 1.7298578199052133,
"grad_norm": 0.46557974227861393,
"learning_rate": 5e-06,
"loss": 0.612,
"step": 730
},
{
"epoch": 1.7535545023696684,
"grad_norm": 0.4614750853949275,
"learning_rate": 5e-06,
"loss": 0.6085,
"step": 740
},
{
"epoch": 1.7772511848341233,
"grad_norm": 0.46000509620123947,
"learning_rate": 5e-06,
"loss": 0.598,
"step": 750
},
{
"epoch": 1.8009478672985781,
"grad_norm": 0.5069124386887875,
"learning_rate": 5e-06,
"loss": 0.6165,
"step": 760
},
{
"epoch": 1.824644549763033,
"grad_norm": 0.5759098925487337,
"learning_rate": 5e-06,
"loss": 0.6055,
"step": 770
},
{
"epoch": 1.8483412322274881,
"grad_norm": 0.5688069546656076,
"learning_rate": 5e-06,
"loss": 0.603,
"step": 780
},
{
"epoch": 1.8720379146919433,
"grad_norm": 0.4094394097978499,
"learning_rate": 5e-06,
"loss": 0.6048,
"step": 790
},
{
"epoch": 1.8957345971563981,
"grad_norm": 0.5419176372666504,
"learning_rate": 5e-06,
"loss": 0.6113,
"step": 800
},
{
"epoch": 1.919431279620853,
"grad_norm": 0.5208348604849014,
"learning_rate": 5e-06,
"loss": 0.6031,
"step": 810
},
{
"epoch": 1.943127962085308,
"grad_norm": 0.528072870680137,
"learning_rate": 5e-06,
"loss": 0.6079,
"step": 820
},
{
"epoch": 1.966824644549763,
"grad_norm": 0.5102023817039236,
"learning_rate": 5e-06,
"loss": 0.6057,
"step": 830
},
{
"epoch": 1.9905213270142181,
"grad_norm": 0.4411278261182562,
"learning_rate": 5e-06,
"loss": 0.6114,
"step": 840
},
{
"epoch": 2.0,
"eval_loss": 0.6409755349159241,
"eval_runtime": 227.48,
"eval_samples_per_second": 49.965,
"eval_steps_per_second": 0.391,
"step": 844
},
{
"epoch": 2.014218009478673,
"grad_norm": 0.7123835129441645,
"learning_rate": 5e-06,
"loss": 0.5761,
"step": 850
},
{
"epoch": 2.037914691943128,
"grad_norm": 0.5536575003997367,
"learning_rate": 5e-06,
"loss": 0.5637,
"step": 860
},
{
"epoch": 2.061611374407583,
"grad_norm": 0.5997737415317981,
"learning_rate": 5e-06,
"loss": 0.5558,
"step": 870
},
{
"epoch": 2.085308056872038,
"grad_norm": 0.7669561696901881,
"learning_rate": 5e-06,
"loss": 0.5628,
"step": 880
},
{
"epoch": 2.109004739336493,
"grad_norm": 0.809324396115254,
"learning_rate": 5e-06,
"loss": 0.5659,
"step": 890
},
{
"epoch": 2.132701421800948,
"grad_norm": 0.632132677022118,
"learning_rate": 5e-06,
"loss": 0.5627,
"step": 900
},
{
"epoch": 2.156398104265403,
"grad_norm": 0.5363755744993185,
"learning_rate": 5e-06,
"loss": 0.5613,
"step": 910
},
{
"epoch": 2.1800947867298577,
"grad_norm": 0.7992701994586238,
"learning_rate": 5e-06,
"loss": 0.5647,
"step": 920
},
{
"epoch": 2.2037914691943126,
"grad_norm": 0.5137612186247765,
"learning_rate": 5e-06,
"loss": 0.5717,
"step": 930
},
{
"epoch": 2.227488151658768,
"grad_norm": 0.48307350386563647,
"learning_rate": 5e-06,
"loss": 0.5707,
"step": 940
},
{
"epoch": 2.251184834123223,
"grad_norm": 0.4585281852449342,
"learning_rate": 5e-06,
"loss": 0.5704,
"step": 950
},
{
"epoch": 2.2748815165876777,
"grad_norm": 0.5158085231030699,
"learning_rate": 5e-06,
"loss": 0.5683,
"step": 960
},
{
"epoch": 2.2985781990521326,
"grad_norm": 0.5090415152331892,
"learning_rate": 5e-06,
"loss": 0.5675,
"step": 970
},
{
"epoch": 2.322274881516588,
"grad_norm": 0.5033009699976476,
"learning_rate": 5e-06,
"loss": 0.5677,
"step": 980
},
{
"epoch": 2.345971563981043,
"grad_norm": 0.6352174600369093,
"learning_rate": 5e-06,
"loss": 0.5713,
"step": 990
},
{
"epoch": 2.3696682464454977,
"grad_norm": 0.6200134705498003,
"learning_rate": 5e-06,
"loss": 0.5697,
"step": 1000
},
{
"epoch": 2.3933649289099526,
"grad_norm": 0.5541614450406952,
"learning_rate": 5e-06,
"loss": 0.5656,
"step": 1010
},
{
"epoch": 2.4170616113744074,
"grad_norm": 0.5662092850065095,
"learning_rate": 5e-06,
"loss": 0.5667,
"step": 1020
},
{
"epoch": 2.4407582938388623,
"grad_norm": 0.527115473356076,
"learning_rate": 5e-06,
"loss": 0.5695,
"step": 1030
},
{
"epoch": 2.4644549763033177,
"grad_norm": 0.5199769536715289,
"learning_rate": 5e-06,
"loss": 0.5698,
"step": 1040
},
{
"epoch": 2.4881516587677726,
"grad_norm": 0.5592591939664879,
"learning_rate": 5e-06,
"loss": 0.5649,
"step": 1050
},
{
"epoch": 2.5118483412322274,
"grad_norm": 0.5646160398662861,
"learning_rate": 5e-06,
"loss": 0.5723,
"step": 1060
},
{
"epoch": 2.5355450236966823,
"grad_norm": 0.45824336816755823,
"learning_rate": 5e-06,
"loss": 0.5693,
"step": 1070
},
{
"epoch": 2.5592417061611377,
"grad_norm": 0.5244304501627578,
"learning_rate": 5e-06,
"loss": 0.5611,
"step": 1080
},
{
"epoch": 2.5829383886255926,
"grad_norm": 0.5019930257935107,
"learning_rate": 5e-06,
"loss": 0.5703,
"step": 1090
},
{
"epoch": 2.6066350710900474,
"grad_norm": 0.4876750231159846,
"learning_rate": 5e-06,
"loss": 0.5642,
"step": 1100
},
{
"epoch": 2.6303317535545023,
"grad_norm": 0.436840454337667,
"learning_rate": 5e-06,
"loss": 0.5727,
"step": 1110
},
{
"epoch": 2.654028436018957,
"grad_norm": 0.5230375266459463,
"learning_rate": 5e-06,
"loss": 0.5723,
"step": 1120
},
{
"epoch": 2.677725118483412,
"grad_norm": 0.4743811030949543,
"learning_rate": 5e-06,
"loss": 0.5738,
"step": 1130
},
{
"epoch": 2.7014218009478674,
"grad_norm": 0.480507040656355,
"learning_rate": 5e-06,
"loss": 0.5719,
"step": 1140
},
{
"epoch": 2.7251184834123223,
"grad_norm": 0.4931664579971654,
"learning_rate": 5e-06,
"loss": 0.5668,
"step": 1150
},
{
"epoch": 2.748815165876777,
"grad_norm": 0.5124020975818223,
"learning_rate": 5e-06,
"loss": 0.5674,
"step": 1160
},
{
"epoch": 2.772511848341232,
"grad_norm": 0.4542681202604461,
"learning_rate": 5e-06,
"loss": 0.579,
"step": 1170
},
{
"epoch": 2.7962085308056874,
"grad_norm": 0.49551548889838143,
"learning_rate": 5e-06,
"loss": 0.571,
"step": 1180
},
{
"epoch": 2.8199052132701423,
"grad_norm": 0.47230557330706946,
"learning_rate": 5e-06,
"loss": 0.5684,
"step": 1190
},
{
"epoch": 2.843601895734597,
"grad_norm": 0.4780110660351261,
"learning_rate": 5e-06,
"loss": 0.5709,
"step": 1200
},
{
"epoch": 2.867298578199052,
"grad_norm": 0.49898222281980387,
"learning_rate": 5e-06,
"loss": 0.5663,
"step": 1210
},
{
"epoch": 2.890995260663507,
"grad_norm": 0.5319484749651314,
"learning_rate": 5e-06,
"loss": 0.5617,
"step": 1220
},
{
"epoch": 2.914691943127962,
"grad_norm": 0.47502518298760565,
"learning_rate": 5e-06,
"loss": 0.5673,
"step": 1230
},
{
"epoch": 2.938388625592417,
"grad_norm": 0.5666269567982688,
"learning_rate": 5e-06,
"loss": 0.5735,
"step": 1240
},
{
"epoch": 2.962085308056872,
"grad_norm": 0.5406615669892904,
"learning_rate": 5e-06,
"loss": 0.5753,
"step": 1250
},
{
"epoch": 2.985781990521327,
"grad_norm": 0.4880390264779233,
"learning_rate": 5e-06,
"loss": 0.5796,
"step": 1260
},
{
"epoch": 3.0,
"eval_loss": 0.6424745321273804,
"eval_runtime": 228.2499,
"eval_samples_per_second": 49.796,
"eval_steps_per_second": 0.39,
"step": 1266
},
{
"epoch": 3.0,
"step": 1266,
"total_flos": 2120178393415680.0,
"train_loss": 0.6194126374348645,
"train_runtime": 38258.2762,
"train_samples_per_second": 16.934,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 1266,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2120178393415680.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}