R1-Qwen2.5-32B-Instruct-code / trainer_state.json
alex-chiu's picture
Upload folder using huggingface_hub
11529ac verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.991111111111111,
"eval_steps": 500,
"global_step": 112,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017777777777777778,
"grad_norm": 2.4493284225463867,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.8771,
"step": 1
},
{
"epoch": 0.035555555555555556,
"grad_norm": 2.5336005687713623,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.8888,
"step": 2
},
{
"epoch": 0.05333333333333334,
"grad_norm": 2.3694822788238525,
"learning_rate": 5e-06,
"loss": 0.8663,
"step": 3
},
{
"epoch": 0.07111111111111111,
"grad_norm": 2.06482195854187,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8508,
"step": 4
},
{
"epoch": 0.08888888888888889,
"grad_norm": 1.5644984245300293,
"learning_rate": 8.333333333333334e-06,
"loss": 0.835,
"step": 5
},
{
"epoch": 0.10666666666666667,
"grad_norm": 1.4116849899291992,
"learning_rate": 1e-05,
"loss": 0.8097,
"step": 6
},
{
"epoch": 0.12444444444444444,
"grad_norm": 1.1847808361053467,
"learning_rate": 1.1666666666666668e-05,
"loss": 0.7754,
"step": 7
},
{
"epoch": 0.14222222222222222,
"grad_norm": 1.0902292728424072,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.7364,
"step": 8
},
{
"epoch": 0.16,
"grad_norm": 1.21918785572052,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.7091,
"step": 9
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.9935525059700012,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.6956,
"step": 10
},
{
"epoch": 0.19555555555555557,
"grad_norm": 1.4380978345870972,
"learning_rate": 1.8333333333333333e-05,
"loss": 0.6811,
"step": 11
},
{
"epoch": 0.21333333333333335,
"grad_norm": 1.139083743095398,
"learning_rate": 2e-05,
"loss": 0.681,
"step": 12
},
{
"epoch": 0.2311111111111111,
"grad_norm": 1.3757270574569702,
"learning_rate": 1.9995065603657317e-05,
"loss": 0.6716,
"step": 13
},
{
"epoch": 0.24888888888888888,
"grad_norm": 1.0465571880340576,
"learning_rate": 1.9980267284282718e-05,
"loss": 0.635,
"step": 14
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.9036633968353271,
"learning_rate": 1.99556196460308e-05,
"loss": 0.6447,
"step": 15
},
{
"epoch": 0.28444444444444444,
"grad_norm": 0.8748593330383301,
"learning_rate": 1.9921147013144782e-05,
"loss": 0.6476,
"step": 16
},
{
"epoch": 0.3022222222222222,
"grad_norm": 0.7611545920372009,
"learning_rate": 1.9876883405951378e-05,
"loss": 0.6097,
"step": 17
},
{
"epoch": 0.32,
"grad_norm": 0.8435001373291016,
"learning_rate": 1.982287250728689e-05,
"loss": 0.6127,
"step": 18
},
{
"epoch": 0.3377777777777778,
"grad_norm": 1.1226811408996582,
"learning_rate": 1.9759167619387474e-05,
"loss": 0.6112,
"step": 19
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.6948593258857727,
"learning_rate": 1.9685831611286312e-05,
"loss": 0.6022,
"step": 20
},
{
"epoch": 0.37333333333333335,
"grad_norm": 0.7424058318138123,
"learning_rate": 1.9602936856769432e-05,
"loss": 0.595,
"step": 21
},
{
"epoch": 0.39111111111111113,
"grad_norm": 0.6478083729743958,
"learning_rate": 1.9510565162951538e-05,
"loss": 0.6057,
"step": 22
},
{
"epoch": 0.4088888888888889,
"grad_norm": 0.6245326399803162,
"learning_rate": 1.9408807689542257e-05,
"loss": 0.6067,
"step": 23
},
{
"epoch": 0.4266666666666667,
"grad_norm": 0.6616186499595642,
"learning_rate": 1.9297764858882516e-05,
"loss": 0.599,
"step": 24
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.5162010192871094,
"learning_rate": 1.9177546256839814e-05,
"loss": 0.5856,
"step": 25
},
{
"epoch": 0.4622222222222222,
"grad_norm": 0.46659624576568604,
"learning_rate": 1.9048270524660197e-05,
"loss": 0.5798,
"step": 26
},
{
"epoch": 0.48,
"grad_norm": 0.48762547969818115,
"learning_rate": 1.891006524188368e-05,
"loss": 0.5785,
"step": 27
},
{
"epoch": 0.49777777777777776,
"grad_norm": 0.425236314535141,
"learning_rate": 1.8763066800438638e-05,
"loss": 0.5885,
"step": 28
},
{
"epoch": 0.5155555555555555,
"grad_norm": 0.4328088164329529,
"learning_rate": 1.860742027003944e-05,
"loss": 0.5897,
"step": 29
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.495597779750824,
"learning_rate": 1.8443279255020153e-05,
"loss": 0.5739,
"step": 30
},
{
"epoch": 0.5511111111111111,
"grad_norm": 0.3705892264842987,
"learning_rate": 1.827080574274562e-05,
"loss": 0.5742,
"step": 31
},
{
"epoch": 0.5688888888888889,
"grad_norm": 0.35032135248184204,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.5548,
"step": 32
},
{
"epoch": 0.5866666666666667,
"grad_norm": 0.3868001103401184,
"learning_rate": 1.7901550123756906e-05,
"loss": 0.5879,
"step": 33
},
{
"epoch": 0.6044444444444445,
"grad_norm": 0.34849685430526733,
"learning_rate": 1.7705132427757895e-05,
"loss": 0.5588,
"step": 34
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.3659916818141937,
"learning_rate": 1.7501110696304598e-05,
"loss": 0.5561,
"step": 35
},
{
"epoch": 0.64,
"grad_norm": 0.3479112982749939,
"learning_rate": 1.7289686274214116e-05,
"loss": 0.566,
"step": 36
},
{
"epoch": 0.6577777777777778,
"grad_norm": 0.38614964485168457,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.5702,
"step": 37
},
{
"epoch": 0.6755555555555556,
"grad_norm": 0.32522615790367126,
"learning_rate": 1.684547105928689e-05,
"loss": 0.5646,
"step": 38
},
{
"epoch": 0.6933333333333334,
"grad_norm": 0.34715840220451355,
"learning_rate": 1.661311865323652e-05,
"loss": 0.5629,
"step": 39
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.38434770703315735,
"learning_rate": 1.63742398974869e-05,
"loss": 0.5461,
"step": 40
},
{
"epoch": 0.7288888888888889,
"grad_norm": 0.32962343096733093,
"learning_rate": 1.6129070536529767e-05,
"loss": 0.5668,
"step": 41
},
{
"epoch": 0.7466666666666667,
"grad_norm": 0.37483224272727966,
"learning_rate": 1.5877852522924733e-05,
"loss": 0.5656,
"step": 42
},
{
"epoch": 0.7644444444444445,
"grad_norm": 0.30097684264183044,
"learning_rate": 1.5620833778521306e-05,
"loss": 0.5529,
"step": 43
},
{
"epoch": 0.7822222222222223,
"grad_norm": 0.33267274498939514,
"learning_rate": 1.5358267949789968e-05,
"loss": 0.5563,
"step": 44
},
{
"epoch": 0.8,
"grad_norm": 0.33388659358024597,
"learning_rate": 1.5090414157503715e-05,
"loss": 0.5665,
"step": 45
},
{
"epoch": 0.8177777777777778,
"grad_norm": 0.3299979269504547,
"learning_rate": 1.4817536741017153e-05,
"loss": 0.5527,
"step": 46
},
{
"epoch": 0.8355555555555556,
"grad_norm": 0.30535200238227844,
"learning_rate": 1.4539904997395468e-05,
"loss": 0.5662,
"step": 47
},
{
"epoch": 0.8533333333333334,
"grad_norm": 0.32509496808052063,
"learning_rate": 1.4257792915650728e-05,
"loss": 0.5447,
"step": 48
},
{
"epoch": 0.8711111111111111,
"grad_norm": 0.3176334798336029,
"learning_rate": 1.3971478906347806e-05,
"loss": 0.5464,
"step": 49
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.30327945947647095,
"learning_rate": 1.3681245526846782e-05,
"loss": 0.5449,
"step": 50
},
{
"epoch": 0.9066666666666666,
"grad_norm": 0.2697887718677521,
"learning_rate": 1.3387379202452917e-05,
"loss": 0.54,
"step": 51
},
{
"epoch": 0.9244444444444444,
"grad_norm": 0.2823696434497833,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.5543,
"step": 52
},
{
"epoch": 0.9422222222222222,
"grad_norm": 0.30424606800079346,
"learning_rate": 1.2789911060392295e-05,
"loss": 0.5514,
"step": 53
},
{
"epoch": 0.96,
"grad_norm": 0.26427096128463745,
"learning_rate": 1.2486898871648552e-05,
"loss": 0.5588,
"step": 54
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.27375537157058716,
"learning_rate": 1.2181432413965428e-05,
"loss": 0.5429,
"step": 55
},
{
"epoch": 0.9955555555555555,
"grad_norm": 0.2701137363910675,
"learning_rate": 1.187381314585725e-05,
"loss": 0.5585,
"step": 56
},
{
"epoch": 1.0133333333333334,
"grad_norm": 0.5673031806945801,
"learning_rate": 1.156434465040231e-05,
"loss": 0.9233,
"step": 57
},
{
"epoch": 1.031111111111111,
"grad_norm": 0.29599809646606445,
"learning_rate": 1.1253332335643043e-05,
"loss": 0.544,
"step": 58
},
{
"epoch": 1.048888888888889,
"grad_norm": 0.3540167808532715,
"learning_rate": 1.0941083133185146e-05,
"loss": 0.5149,
"step": 59
},
{
"epoch": 1.0666666666666667,
"grad_norm": 0.3205398917198181,
"learning_rate": 1.0627905195293135e-05,
"loss": 0.4813,
"step": 60
},
{
"epoch": 1.0844444444444445,
"grad_norm": 0.35224997997283936,
"learning_rate": 1.0314107590781284e-05,
"loss": 0.5588,
"step": 61
},
{
"epoch": 1.1022222222222222,
"grad_norm": 0.2714930474758148,
"learning_rate": 1e-05,
"loss": 0.4924,
"step": 62
},
{
"epoch": 1.12,
"grad_norm": 0.2932974398136139,
"learning_rate": 9.685892409218718e-06,
"loss": 0.4994,
"step": 63
},
{
"epoch": 1.1377777777777778,
"grad_norm": 0.29003581404685974,
"learning_rate": 9.372094804706867e-06,
"loss": 0.4862,
"step": 64
},
{
"epoch": 1.1555555555555554,
"grad_norm": 0.31004598736763,
"learning_rate": 9.058916866814857e-06,
"loss": 0.5294,
"step": 65
},
{
"epoch": 1.1733333333333333,
"grad_norm": 0.26587504148483276,
"learning_rate": 8.746667664356957e-06,
"loss": 0.5222,
"step": 66
},
{
"epoch": 1.1911111111111112,
"grad_norm": 0.25245964527130127,
"learning_rate": 8.43565534959769e-06,
"loss": 0.4697,
"step": 67
},
{
"epoch": 1.208888888888889,
"grad_norm": 0.27027666568756104,
"learning_rate": 8.126186854142752e-06,
"loss": 0.5343,
"step": 68
},
{
"epoch": 1.2266666666666666,
"grad_norm": 0.25415799021720886,
"learning_rate": 7.818567586034578e-06,
"loss": 0.4861,
"step": 69
},
{
"epoch": 1.2444444444444445,
"grad_norm": 0.25560981035232544,
"learning_rate": 7.513101128351454e-06,
"loss": 0.5141,
"step": 70
},
{
"epoch": 1.2622222222222224,
"grad_norm": 0.2490381896495819,
"learning_rate": 7.210088939607709e-06,
"loss": 0.4685,
"step": 71
},
{
"epoch": 1.28,
"grad_norm": 0.2686901092529297,
"learning_rate": 6.909830056250527e-06,
"loss": 0.5433,
"step": 72
},
{
"epoch": 1.2977777777777777,
"grad_norm": 0.19526812434196472,
"learning_rate": 6.612620797547087e-06,
"loss": 0.4569,
"step": 73
},
{
"epoch": 1.3155555555555556,
"grad_norm": 0.2415803074836731,
"learning_rate": 6.318754473153221e-06,
"loss": 0.5253,
"step": 74
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.19613640010356903,
"learning_rate": 6.028521093652195e-06,
"loss": 0.4791,
"step": 75
},
{
"epoch": 1.3511111111111112,
"grad_norm": 0.23172248899936676,
"learning_rate": 5.742207084349274e-06,
"loss": 0.5268,
"step": 76
},
{
"epoch": 1.3688888888888888,
"grad_norm": 0.22319753468036652,
"learning_rate": 5.460095002604533e-06,
"loss": 0.5033,
"step": 77
},
{
"epoch": 1.3866666666666667,
"grad_norm": 0.253582626581192,
"learning_rate": 5.1824632589828465e-06,
"loss": 0.5263,
"step": 78
},
{
"epoch": 1.4044444444444444,
"grad_norm": 0.22795237600803375,
"learning_rate": 4.909585842496287e-06,
"loss": 0.4646,
"step": 79
},
{
"epoch": 1.4222222222222223,
"grad_norm": 0.21554000675678253,
"learning_rate": 4.641732050210032e-06,
"loss": 0.4976,
"step": 80
},
{
"epoch": 1.44,
"grad_norm": 0.22318841516971588,
"learning_rate": 4.379166221478697e-06,
"loss": 0.5116,
"step": 81
},
{
"epoch": 1.4577777777777778,
"grad_norm": 0.22658716142177582,
"learning_rate": 4.12214747707527e-06,
"loss": 0.5323,
"step": 82
},
{
"epoch": 1.4755555555555555,
"grad_norm": 0.20563125610351562,
"learning_rate": 3.8709294634702374e-06,
"loss": 0.4499,
"step": 83
},
{
"epoch": 1.4933333333333334,
"grad_norm": 0.20641934871673584,
"learning_rate": 3.625760102513103e-06,
"loss": 0.4826,
"step": 84
},
{
"epoch": 1.511111111111111,
"grad_norm": 0.2252846360206604,
"learning_rate": 3.3868813467634833e-06,
"loss": 0.5218,
"step": 85
},
{
"epoch": 1.528888888888889,
"grad_norm": 0.24224503338336945,
"learning_rate": 3.1545289407131128e-06,
"loss": 0.4953,
"step": 86
},
{
"epoch": 1.5466666666666666,
"grad_norm": 0.20531503856182098,
"learning_rate": 2.9289321881345257e-06,
"loss": 0.5141,
"step": 87
},
{
"epoch": 1.5644444444444443,
"grad_norm": 0.2080065906047821,
"learning_rate": 2.7103137257858867e-06,
"loss": 0.5073,
"step": 88
},
{
"epoch": 1.5822222222222222,
"grad_norm": 0.19242417812347412,
"learning_rate": 2.4988893036954045e-06,
"loss": 0.4792,
"step": 89
},
{
"epoch": 1.6,
"grad_norm": 0.2007436901330948,
"learning_rate": 2.2948675722421086e-06,
"loss": 0.5097,
"step": 90
},
{
"epoch": 1.6177777777777778,
"grad_norm": 0.19588816165924072,
"learning_rate": 2.098449876243096e-06,
"loss": 0.4919,
"step": 91
},
{
"epoch": 1.6355555555555554,
"grad_norm": 0.19348369538784027,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.5275,
"step": 92
},
{
"epoch": 1.6533333333333333,
"grad_norm": 0.19428110122680664,
"learning_rate": 1.7291942572543806e-06,
"loss": 0.4784,
"step": 93
},
{
"epoch": 1.6711111111111112,
"grad_norm": 0.1935623586177826,
"learning_rate": 1.5567207449798517e-06,
"loss": 0.5102,
"step": 94
},
{
"epoch": 1.6888888888888889,
"grad_norm": 0.20146863162517548,
"learning_rate": 1.3925797299605649e-06,
"loss": 0.4932,
"step": 95
},
{
"epoch": 1.7066666666666666,
"grad_norm": 0.2071317434310913,
"learning_rate": 1.2369331995613664e-06,
"loss": 0.5106,
"step": 96
},
{
"epoch": 1.7244444444444444,
"grad_norm": 0.1940830498933792,
"learning_rate": 1.0899347581163222e-06,
"loss": 0.51,
"step": 97
},
{
"epoch": 1.7422222222222223,
"grad_norm": 0.18540237843990326,
"learning_rate": 9.517294753398066e-07,
"loss": 0.4936,
"step": 98
},
{
"epoch": 1.76,
"grad_norm": 0.18730789422988892,
"learning_rate": 8.224537431601886e-07,
"loss": 0.4605,
"step": 99
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.18820004165172577,
"learning_rate": 7.022351411174866e-07,
"loss": 0.5104,
"step": 100
},
{
"epoch": 1.7955555555555556,
"grad_norm": 0.18577077984809875,
"learning_rate": 5.911923104577455e-07,
"loss": 0.4996,
"step": 101
},
{
"epoch": 1.8133333333333335,
"grad_norm": 0.19338218867778778,
"learning_rate": 4.894348370484648e-07,
"loss": 0.4835,
"step": 102
},
{
"epoch": 1.8311111111111111,
"grad_norm": 0.19661261141300201,
"learning_rate": 3.9706314323056936e-07,
"loss": 0.4947,
"step": 103
},
{
"epoch": 1.8488888888888888,
"grad_norm": 0.19002115726470947,
"learning_rate": 3.1416838871368925e-07,
"loss": 0.4762,
"step": 104
},
{
"epoch": 1.8666666666666667,
"grad_norm": 0.18881858885288239,
"learning_rate": 2.4083238061252565e-07,
"loss": 0.5344,
"step": 105
},
{
"epoch": 1.8844444444444446,
"grad_norm": 0.18790070712566376,
"learning_rate": 1.7712749271311392e-07,
"loss": 0.4645,
"step": 106
},
{
"epoch": 1.9022222222222223,
"grad_norm": 0.19742755591869354,
"learning_rate": 1.231165940486234e-07,
"loss": 0.4809,
"step": 107
},
{
"epoch": 1.92,
"grad_norm": 0.19038796424865723,
"learning_rate": 7.885298685522235e-08,
"loss": 0.4858,
"step": 108
},
{
"epoch": 1.9377777777777778,
"grad_norm": 0.1900634616613388,
"learning_rate": 4.438035396920004e-08,
"loss": 0.5078,
"step": 109
},
{
"epoch": 1.9555555555555557,
"grad_norm": 0.19468751549720764,
"learning_rate": 1.973271571728441e-08,
"loss": 0.5179,
"step": 110
},
{
"epoch": 1.9733333333333334,
"grad_norm": 0.1910133957862854,
"learning_rate": 4.9343963426840006e-09,
"loss": 0.4961,
"step": 111
},
{
"epoch": 1.991111111111111,
"grad_norm": 0.19184480607509613,
"learning_rate": 0.0,
"loss": 0.511,
"step": 112
},
{
"epoch": 1.991111111111111,
"step": 112,
"total_flos": 621868991119360.0,
"train_loss": 0.5655743213636535,
"train_runtime": 48561.3008,
"train_samples_per_second": 0.222,
"train_steps_per_second": 0.002
}
],
"logging_steps": 1,
"max_steps": 112,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 621868991119360.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}