GaetanMichelet's picture
Model save
eafe8b7 verified
{
"best_metric": 1.0710123777389526,
"best_model_checkpoint": "data/Llama-31-8B_task-2_60-samples_config-3_full/checkpoint-276",
"epoch": 54.95652173913044,
"eval_steps": 500,
"global_step": 316,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17391304347826086,
"grad_norm": 0.5168920755386353,
"learning_rate": 1.3333333333333336e-07,
"loss": 1.5956,
"step": 1
},
{
"epoch": 0.34782608695652173,
"grad_norm": 0.5719501376152039,
"learning_rate": 2.666666666666667e-07,
"loss": 1.6046,
"step": 2
},
{
"epoch": 0.6956521739130435,
"grad_norm": 0.4981832504272461,
"learning_rate": 5.333333333333335e-07,
"loss": 1.5659,
"step": 4
},
{
"epoch": 0.8695652173913043,
"eval_loss": 1.5845621824264526,
"eval_runtime": 12.9809,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 5
},
{
"epoch": 1.0434782608695652,
"grad_norm": 0.4701167941093445,
"learning_rate": 8.000000000000001e-07,
"loss": 1.5639,
"step": 6
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.5071099996566772,
"learning_rate": 1.066666666666667e-06,
"loss": 1.5807,
"step": 8
},
{
"epoch": 1.7391304347826086,
"grad_norm": 0.5001740455627441,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.5947,
"step": 10
},
{
"epoch": 1.9130434782608696,
"eval_loss": 1.581119179725647,
"eval_runtime": 12.9812,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 11
},
{
"epoch": 2.0869565217391304,
"grad_norm": 0.5267815589904785,
"learning_rate": 1.6000000000000001e-06,
"loss": 1.589,
"step": 12
},
{
"epoch": 2.4347826086956523,
"grad_norm": 0.49407559633255005,
"learning_rate": 1.8666666666666669e-06,
"loss": 1.5287,
"step": 14
},
{
"epoch": 2.782608695652174,
"grad_norm": 0.45030105113983154,
"learning_rate": 2.133333333333334e-06,
"loss": 1.6305,
"step": 16
},
{
"epoch": 2.9565217391304346,
"eval_loss": 1.5758260488510132,
"eval_runtime": 12.9766,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 17
},
{
"epoch": 3.130434782608696,
"grad_norm": 0.4595053493976593,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.5404,
"step": 18
},
{
"epoch": 3.4782608695652173,
"grad_norm": 0.519245982170105,
"learning_rate": 2.666666666666667e-06,
"loss": 1.5737,
"step": 20
},
{
"epoch": 3.8260869565217392,
"grad_norm": 0.4773900508880615,
"learning_rate": 2.9333333333333338e-06,
"loss": 1.5682,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 1.5672565698623657,
"eval_runtime": 12.9785,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 23
},
{
"epoch": 4.173913043478261,
"grad_norm": 0.4907453954219818,
"learning_rate": 3.2000000000000003e-06,
"loss": 1.5736,
"step": 24
},
{
"epoch": 4.521739130434782,
"grad_norm": 0.4995110332965851,
"learning_rate": 3.4666666666666672e-06,
"loss": 1.548,
"step": 26
},
{
"epoch": 4.869565217391305,
"grad_norm": 0.48306918144226074,
"learning_rate": 3.7333333333333337e-06,
"loss": 1.5687,
"step": 28
},
{
"epoch": 4.869565217391305,
"eval_loss": 1.5570815801620483,
"eval_runtime": 12.977,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 28
},
{
"epoch": 5.217391304347826,
"grad_norm": 0.5053550601005554,
"learning_rate": 4.000000000000001e-06,
"loss": 1.5427,
"step": 30
},
{
"epoch": 5.565217391304348,
"grad_norm": 0.5055222511291504,
"learning_rate": 4.266666666666668e-06,
"loss": 1.5277,
"step": 32
},
{
"epoch": 5.913043478260869,
"grad_norm": 0.45084983110427856,
"learning_rate": 4.533333333333334e-06,
"loss": 1.5556,
"step": 34
},
{
"epoch": 5.913043478260869,
"eval_loss": 1.5406227111816406,
"eval_runtime": 12.9754,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 34
},
{
"epoch": 6.260869565217392,
"grad_norm": 0.5172422528266907,
"learning_rate": 4.800000000000001e-06,
"loss": 1.5111,
"step": 36
},
{
"epoch": 6.608695652173913,
"grad_norm": 0.571245551109314,
"learning_rate": 5.0666666666666676e-06,
"loss": 1.5721,
"step": 38
},
{
"epoch": 6.956521739130435,
"grad_norm": 0.46223798394203186,
"learning_rate": 5.333333333333334e-06,
"loss": 1.4699,
"step": 40
},
{
"epoch": 6.956521739130435,
"eval_loss": 1.5189919471740723,
"eval_runtime": 12.9806,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 40
},
{
"epoch": 7.304347826086957,
"grad_norm": 0.44059333205223083,
"learning_rate": 5.600000000000001e-06,
"loss": 1.5389,
"step": 42
},
{
"epoch": 7.6521739130434785,
"grad_norm": 0.39538195729255676,
"learning_rate": 5.8666666666666675e-06,
"loss": 1.4685,
"step": 44
},
{
"epoch": 8.0,
"grad_norm": 0.44168609380722046,
"learning_rate": 6.133333333333334e-06,
"loss": 1.5027,
"step": 46
},
{
"epoch": 8.0,
"eval_loss": 1.4957526922225952,
"eval_runtime": 12.982,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 46
},
{
"epoch": 8.347826086956522,
"grad_norm": 0.4372757375240326,
"learning_rate": 6.4000000000000006e-06,
"loss": 1.4465,
"step": 48
},
{
"epoch": 8.695652173913043,
"grad_norm": 0.4501511752605438,
"learning_rate": 6.666666666666667e-06,
"loss": 1.5203,
"step": 50
},
{
"epoch": 8.869565217391305,
"eval_loss": 1.4718750715255737,
"eval_runtime": 12.9768,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 51
},
{
"epoch": 9.043478260869565,
"grad_norm": 0.38575872778892517,
"learning_rate": 6.9333333333333344e-06,
"loss": 1.4364,
"step": 52
},
{
"epoch": 9.391304347826088,
"grad_norm": 0.3618987798690796,
"learning_rate": 7.2000000000000005e-06,
"loss": 1.4197,
"step": 54
},
{
"epoch": 9.73913043478261,
"grad_norm": 0.3317548930644989,
"learning_rate": 7.4666666666666675e-06,
"loss": 1.4872,
"step": 56
},
{
"epoch": 9.91304347826087,
"eval_loss": 1.4444891214370728,
"eval_runtime": 12.9775,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 57
},
{
"epoch": 10.08695652173913,
"grad_norm": 0.3010832369327545,
"learning_rate": 7.733333333333334e-06,
"loss": 1.4324,
"step": 58
},
{
"epoch": 10.434782608695652,
"grad_norm": 0.32329097390174866,
"learning_rate": 8.000000000000001e-06,
"loss": 1.4333,
"step": 60
},
{
"epoch": 10.782608695652174,
"grad_norm": 0.3354980945587158,
"learning_rate": 8.266666666666667e-06,
"loss": 1.4184,
"step": 62
},
{
"epoch": 10.956521739130435,
"eval_loss": 1.4150630235671997,
"eval_runtime": 12.9754,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 63
},
{
"epoch": 11.130434782608695,
"grad_norm": 0.33713477849960327,
"learning_rate": 8.533333333333335e-06,
"loss": 1.3763,
"step": 64
},
{
"epoch": 11.478260869565217,
"grad_norm": 0.23276787996292114,
"learning_rate": 8.8e-06,
"loss": 1.3969,
"step": 66
},
{
"epoch": 11.826086956521738,
"grad_norm": 0.2602153718471527,
"learning_rate": 9.066666666666667e-06,
"loss": 1.3817,
"step": 68
},
{
"epoch": 12.0,
"eval_loss": 1.386828064918518,
"eval_runtime": 12.979,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 69
},
{
"epoch": 12.173913043478262,
"grad_norm": 0.2566069960594177,
"learning_rate": 9.333333333333334e-06,
"loss": 1.3523,
"step": 70
},
{
"epoch": 12.521739130434783,
"grad_norm": 0.2781592607498169,
"learning_rate": 9.600000000000001e-06,
"loss": 1.3737,
"step": 72
},
{
"epoch": 12.869565217391305,
"grad_norm": 0.2503822147846222,
"learning_rate": 9.866666666666668e-06,
"loss": 1.3397,
"step": 74
},
{
"epoch": 12.869565217391305,
"eval_loss": 1.3647750616073608,
"eval_runtime": 12.9767,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 74
},
{
"epoch": 13.217391304347826,
"grad_norm": 0.269147664308548,
"learning_rate": 9.999945845889795e-06,
"loss": 1.3282,
"step": 76
},
{
"epoch": 13.565217391304348,
"grad_norm": 0.25336846709251404,
"learning_rate": 9.999512620046523e-06,
"loss": 1.3328,
"step": 78
},
{
"epoch": 13.91304347826087,
"grad_norm": 0.24610716104507446,
"learning_rate": 9.99864620589731e-06,
"loss": 1.3234,
"step": 80
},
{
"epoch": 13.91304347826087,
"eval_loss": 1.3389836549758911,
"eval_runtime": 12.9781,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 80
},
{
"epoch": 14.26086956521739,
"grad_norm": 0.23158159852027893,
"learning_rate": 9.99734667851357e-06,
"loss": 1.2771,
"step": 82
},
{
"epoch": 14.608695652173914,
"grad_norm": 0.2516274154186249,
"learning_rate": 9.995614150494293e-06,
"loss": 1.3073,
"step": 84
},
{
"epoch": 14.956521739130435,
"grad_norm": 0.28719547390937805,
"learning_rate": 9.993448771956285e-06,
"loss": 1.2893,
"step": 86
},
{
"epoch": 14.956521739130435,
"eval_loss": 1.3121906518936157,
"eval_runtime": 12.9801,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 86
},
{
"epoch": 15.304347826086957,
"grad_norm": 0.2287660539150238,
"learning_rate": 9.99085073052117e-06,
"loss": 1.2679,
"step": 88
},
{
"epoch": 15.652173913043478,
"grad_norm": 0.29683366417884827,
"learning_rate": 9.987820251299121e-06,
"loss": 1.2504,
"step": 90
},
{
"epoch": 16.0,
"grad_norm": 0.20408132672309875,
"learning_rate": 9.984357596869369e-06,
"loss": 1.2999,
"step": 92
},
{
"epoch": 16.0,
"eval_loss": 1.2852083444595337,
"eval_runtime": 12.9826,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 92
},
{
"epoch": 16.347826086956523,
"grad_norm": 0.2704583406448364,
"learning_rate": 9.980463067257437e-06,
"loss": 1.2504,
"step": 94
},
{
"epoch": 16.695652173913043,
"grad_norm": 0.2368343323469162,
"learning_rate": 9.976136999909156e-06,
"loss": 1.2212,
"step": 96
},
{
"epoch": 16.869565217391305,
"eval_loss": 1.2628425359725952,
"eval_runtime": 12.9802,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 97
},
{
"epoch": 17.043478260869566,
"grad_norm": 0.22224029898643494,
"learning_rate": 9.971379769661422e-06,
"loss": 1.2242,
"step": 98
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.2928115725517273,
"learning_rate": 9.966191788709716e-06,
"loss": 1.2211,
"step": 100
},
{
"epoch": 17.73913043478261,
"grad_norm": 0.22390316426753998,
"learning_rate": 9.960573506572391e-06,
"loss": 1.234,
"step": 102
},
{
"epoch": 17.91304347826087,
"eval_loss": 1.2357784509658813,
"eval_runtime": 12.9749,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 103
},
{
"epoch": 18.08695652173913,
"grad_norm": 0.25287434458732605,
"learning_rate": 9.95452541005172e-06,
"loss": 1.1594,
"step": 104
},
{
"epoch": 18.434782608695652,
"grad_norm": 0.22554974257946014,
"learning_rate": 9.948048023191728e-06,
"loss": 1.1941,
"step": 106
},
{
"epoch": 18.782608695652176,
"grad_norm": 0.28337913751602173,
"learning_rate": 9.941141907232766e-06,
"loss": 1.1704,
"step": 108
},
{
"epoch": 18.956521739130434,
"eval_loss": 1.2077757120132446,
"eval_runtime": 12.9864,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 109
},
{
"epoch": 19.130434782608695,
"grad_norm": 0.2129991501569748,
"learning_rate": 9.933807660562898e-06,
"loss": 1.1575,
"step": 110
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.3318869471549988,
"learning_rate": 9.926045918666045e-06,
"loss": 1.1555,
"step": 112
},
{
"epoch": 19.82608695652174,
"grad_norm": 0.3031323552131653,
"learning_rate": 9.91785735406693e-06,
"loss": 1.1499,
"step": 114
},
{
"epoch": 20.0,
"eval_loss": 1.1795598268508911,
"eval_runtime": 12.9743,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 115
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.2700609862804413,
"learning_rate": 9.909242676272797e-06,
"loss": 1.1176,
"step": 116
},
{
"epoch": 20.52173913043478,
"grad_norm": 0.24468179047107697,
"learning_rate": 9.90020263171194e-06,
"loss": 1.1005,
"step": 118
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.2991030216217041,
"learning_rate": 9.890738003669029e-06,
"loss": 1.1265,
"step": 120
},
{
"epoch": 20.869565217391305,
"eval_loss": 1.1569637060165405,
"eval_runtime": 12.9777,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 120
},
{
"epoch": 21.217391304347824,
"grad_norm": 0.2351488173007965,
"learning_rate": 9.880849612217238e-06,
"loss": 1.1073,
"step": 122
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.26138749718666077,
"learning_rate": 9.870538314147194e-06,
"loss": 1.0675,
"step": 124
},
{
"epoch": 21.91304347826087,
"grad_norm": 0.2605469226837158,
"learning_rate": 9.859805002892733e-06,
"loss": 1.0716,
"step": 126
},
{
"epoch": 21.91304347826087,
"eval_loss": 1.1357496976852417,
"eval_runtime": 12.9785,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 126
},
{
"epoch": 22.26086956521739,
"grad_norm": 0.2572947144508362,
"learning_rate": 9.84865060845349e-06,
"loss": 1.0791,
"step": 128
},
{
"epoch": 22.608695652173914,
"grad_norm": 0.2186698317527771,
"learning_rate": 9.83707609731432e-06,
"loss": 1.0729,
"step": 130
},
{
"epoch": 22.956521739130434,
"grad_norm": 0.3026718199253082,
"learning_rate": 9.825082472361558e-06,
"loss": 1.0332,
"step": 132
},
{
"epoch": 22.956521739130434,
"eval_loss": 1.122292160987854,
"eval_runtime": 12.9802,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 132
},
{
"epoch": 23.304347826086957,
"grad_norm": 0.21330009400844574,
"learning_rate": 9.812670772796113e-06,
"loss": 1.0612,
"step": 134
},
{
"epoch": 23.652173913043477,
"grad_norm": 0.17287513613700867,
"learning_rate": 9.799842074043438e-06,
"loss": 1.05,
"step": 136
},
{
"epoch": 24.0,
"grad_norm": 0.17213226854801178,
"learning_rate": 9.786597487660336e-06,
"loss": 1.0631,
"step": 138
},
{
"epoch": 24.0,
"eval_loss": 1.1154590845108032,
"eval_runtime": 12.9794,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 138
},
{
"epoch": 24.347826086956523,
"grad_norm": 0.17510361969470978,
"learning_rate": 9.77293816123866e-06,
"loss": 1.0322,
"step": 140
},
{
"epoch": 24.695652173913043,
"grad_norm": 0.16553597152233124,
"learning_rate": 9.75886527830587e-06,
"loss": 1.0659,
"step": 142
},
{
"epoch": 24.869565217391305,
"eval_loss": 1.1110743284225464,
"eval_runtime": 12.9742,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 143
},
{
"epoch": 25.043478260869566,
"grad_norm": 0.17666411399841309,
"learning_rate": 9.744380058222483e-06,
"loss": 1.0226,
"step": 144
},
{
"epoch": 25.391304347826086,
"grad_norm": 0.1684177666902542,
"learning_rate": 9.729483756076436e-06,
"loss": 1.0355,
"step": 146
},
{
"epoch": 25.73913043478261,
"grad_norm": 0.18400514125823975,
"learning_rate": 9.714177662574316e-06,
"loss": 1.0637,
"step": 148
},
{
"epoch": 25.91304347826087,
"eval_loss": 1.1067678928375244,
"eval_runtime": 12.9821,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 149
},
{
"epoch": 26.08695652173913,
"grad_norm": 0.16044579446315765,
"learning_rate": 9.698463103929542e-06,
"loss": 1.0691,
"step": 150
},
{
"epoch": 26.434782608695652,
"grad_norm": 0.17162466049194336,
"learning_rate": 9.682341441747446e-06,
"loss": 1.0148,
"step": 152
},
{
"epoch": 26.782608695652176,
"grad_norm": 0.16096869111061096,
"learning_rate": 9.665814072907293e-06,
"loss": 0.9979,
"step": 154
},
{
"epoch": 26.956521739130434,
"eval_loss": 1.1030863523483276,
"eval_runtime": 12.9795,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 155
},
{
"epoch": 27.130434782608695,
"grad_norm": 0.15515439212322235,
"learning_rate": 9.648882429441258e-06,
"loss": 1.0792,
"step": 156
},
{
"epoch": 27.47826086956522,
"grad_norm": 0.21910764276981354,
"learning_rate": 9.63154797841033e-06,
"loss": 0.9741,
"step": 158
},
{
"epoch": 27.82608695652174,
"grad_norm": 0.15719705820083618,
"learning_rate": 9.613812221777212e-06,
"loss": 1.0495,
"step": 160
},
{
"epoch": 28.0,
"eval_loss": 1.0992634296417236,
"eval_runtime": 12.9823,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 161
},
{
"epoch": 28.17391304347826,
"grad_norm": 0.17963844537734985,
"learning_rate": 9.595676696276173e-06,
"loss": 0.956,
"step": 162
},
{
"epoch": 28.52173913043478,
"grad_norm": 0.1683446168899536,
"learning_rate": 9.577142973279896e-06,
"loss": 1.0629,
"step": 164
},
{
"epoch": 28.869565217391305,
"grad_norm": 0.18944136798381805,
"learning_rate": 9.55821265866333e-06,
"loss": 1.0126,
"step": 166
},
{
"epoch": 28.869565217391305,
"eval_loss": 1.096557378768921,
"eval_runtime": 12.9756,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 166
},
{
"epoch": 29.217391304347824,
"grad_norm": 0.17034175992012024,
"learning_rate": 9.538887392664544e-06,
"loss": 1.0652,
"step": 168
},
{
"epoch": 29.565217391304348,
"grad_norm": 0.17872415482997894,
"learning_rate": 9.519168849742603e-06,
"loss": 0.9865,
"step": 170
},
{
"epoch": 29.91304347826087,
"grad_norm": 0.17247214913368225,
"learning_rate": 9.499058738432492e-06,
"loss": 0.9884,
"step": 172
},
{
"epoch": 29.91304347826087,
"eval_loss": 1.0938386917114258,
"eval_runtime": 12.9748,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 172
},
{
"epoch": 30.26086956521739,
"grad_norm": 0.18095757067203522,
"learning_rate": 9.478558801197065e-06,
"loss": 1.026,
"step": 174
},
{
"epoch": 30.608695652173914,
"grad_norm": 0.17440937459468842,
"learning_rate": 9.457670814276083e-06,
"loss": 1.007,
"step": 176
},
{
"epoch": 30.956521739130434,
"grad_norm": 0.17576059699058533,
"learning_rate": 9.436396587532297e-06,
"loss": 1.0366,
"step": 178
},
{
"epoch": 30.956521739130434,
"eval_loss": 1.0909334421157837,
"eval_runtime": 12.9767,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 178
},
{
"epoch": 31.304347826086957,
"grad_norm": 0.18143777549266815,
"learning_rate": 9.414737964294636e-06,
"loss": 0.9512,
"step": 180
},
{
"epoch": 31.652173913043477,
"grad_norm": 0.1847611963748932,
"learning_rate": 9.392696821198488e-06,
"loss": 1.0277,
"step": 182
},
{
"epoch": 32.0,
"grad_norm": 0.18112817406654358,
"learning_rate": 9.370275068023097e-06,
"loss": 1.0434,
"step": 184
},
{
"epoch": 32.0,
"eval_loss": 1.0885893106460571,
"eval_runtime": 12.9743,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 184
},
{
"epoch": 32.34782608695652,
"grad_norm": 0.2060151845216751,
"learning_rate": 9.347474647526095e-06,
"loss": 1.0112,
"step": 186
},
{
"epoch": 32.69565217391305,
"grad_norm": 0.18784299492835999,
"learning_rate": 9.324297535275156e-06,
"loss": 1.0222,
"step": 188
},
{
"epoch": 32.869565217391305,
"eval_loss": 1.086158037185669,
"eval_runtime": 12.9781,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 189
},
{
"epoch": 33.04347826086956,
"grad_norm": 0.20884543657302856,
"learning_rate": 9.30074573947683e-06,
"loss": 0.9734,
"step": 190
},
{
"epoch": 33.391304347826086,
"grad_norm": 0.19983573257923126,
"learning_rate": 9.276821300802535e-06,
"loss": 0.9753,
"step": 192
},
{
"epoch": 33.73913043478261,
"grad_norm": 0.1889132559299469,
"learning_rate": 9.25252629221175e-06,
"loss": 0.9978,
"step": 194
},
{
"epoch": 33.91304347826087,
"eval_loss": 1.0842341184616089,
"eval_runtime": 12.9762,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 195
},
{
"epoch": 34.08695652173913,
"grad_norm": 0.1903546005487442,
"learning_rate": 9.227862818772392e-06,
"loss": 0.993,
"step": 196
},
{
"epoch": 34.43478260869565,
"grad_norm": 0.1973000466823578,
"learning_rate": 9.202833017478421e-06,
"loss": 0.9973,
"step": 198
},
{
"epoch": 34.78260869565217,
"grad_norm": 0.23764756321907043,
"learning_rate": 9.177439057064684e-06,
"loss": 0.9593,
"step": 200
},
{
"epoch": 34.95652173913044,
"eval_loss": 1.0823968648910522,
"eval_runtime": 12.9773,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 201
},
{
"epoch": 35.130434782608695,
"grad_norm": 0.1871621310710907,
"learning_rate": 9.151683137818989e-06,
"loss": 1.066,
"step": 202
},
{
"epoch": 35.47826086956522,
"grad_norm": 0.20576035976409912,
"learning_rate": 9.125567491391476e-06,
"loss": 0.9734,
"step": 204
},
{
"epoch": 35.82608695652174,
"grad_norm": 0.1983082890510559,
"learning_rate": 9.099094380601244e-06,
"loss": 1.0383,
"step": 206
},
{
"epoch": 36.0,
"eval_loss": 1.0803947448730469,
"eval_runtime": 12.9755,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 207
},
{
"epoch": 36.17391304347826,
"grad_norm": 0.18446066975593567,
"learning_rate": 9.072266099240286e-06,
"loss": 0.9198,
"step": 208
},
{
"epoch": 36.52173913043478,
"grad_norm": 0.20637229084968567,
"learning_rate": 9.045084971874738e-06,
"loss": 1.0071,
"step": 210
},
{
"epoch": 36.869565217391305,
"grad_norm": 0.21963337063789368,
"learning_rate": 9.017553353643479e-06,
"loss": 0.9958,
"step": 212
},
{
"epoch": 36.869565217391305,
"eval_loss": 1.0792189836502075,
"eval_runtime": 12.9787,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 212
},
{
"epoch": 37.21739130434783,
"grad_norm": 0.2046482264995575,
"learning_rate": 8.989673630054044e-06,
"loss": 0.9336,
"step": 214
},
{
"epoch": 37.56521739130435,
"grad_norm": 0.21577562391757965,
"learning_rate": 8.961448216775955e-06,
"loss": 1.01,
"step": 216
},
{
"epoch": 37.91304347826087,
"grad_norm": 0.21256496012210846,
"learning_rate": 8.932879559431392e-06,
"loss": 0.9774,
"step": 218
},
{
"epoch": 37.91304347826087,
"eval_loss": 1.0778032541275024,
"eval_runtime": 12.9805,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 218
},
{
"epoch": 38.26086956521739,
"grad_norm": 0.2028392106294632,
"learning_rate": 8.903970133383297e-06,
"loss": 0.9966,
"step": 220
},
{
"epoch": 38.608695652173914,
"grad_norm": 0.22231891751289368,
"learning_rate": 8.874722443520898e-06,
"loss": 0.936,
"step": 222
},
{
"epoch": 38.95652173913044,
"grad_norm": 0.23805910348892212,
"learning_rate": 8.845139024042664e-06,
"loss": 0.9853,
"step": 224
},
{
"epoch": 38.95652173913044,
"eval_loss": 1.0763030052185059,
"eval_runtime": 12.9799,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 224
},
{
"epoch": 39.30434782608695,
"grad_norm": 0.21919763088226318,
"learning_rate": 8.815222438236726e-06,
"loss": 1.0162,
"step": 226
},
{
"epoch": 39.65217391304348,
"grad_norm": 0.24376323819160461,
"learning_rate": 8.784975278258783e-06,
"loss": 0.9777,
"step": 228
},
{
"epoch": 40.0,
"grad_norm": 0.2683652639389038,
"learning_rate": 8.754400164907496e-06,
"loss": 0.9241,
"step": 230
},
{
"epoch": 40.0,
"eval_loss": 1.07473886013031,
"eval_runtime": 12.9776,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 230
},
{
"epoch": 40.34782608695652,
"grad_norm": 0.23253561556339264,
"learning_rate": 8.723499747397415e-06,
"loss": 0.9221,
"step": 232
},
{
"epoch": 40.69565217391305,
"grad_norm": 0.2230878323316574,
"learning_rate": 8.692276703129421e-06,
"loss": 1.0387,
"step": 234
},
{
"epoch": 40.869565217391305,
"eval_loss": 1.0742899179458618,
"eval_runtime": 12.9772,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 235
},
{
"epoch": 41.04347826086956,
"grad_norm": 0.23267078399658203,
"learning_rate": 8.660733737458751e-06,
"loss": 0.9551,
"step": 236
},
{
"epoch": 41.391304347826086,
"grad_norm": 0.25790396332740784,
"learning_rate": 8.628873583460593e-06,
"loss": 0.9623,
"step": 238
},
{
"epoch": 41.73913043478261,
"grad_norm": 0.23886793851852417,
"learning_rate": 8.596699001693257e-06,
"loss": 0.9616,
"step": 240
},
{
"epoch": 41.91304347826087,
"eval_loss": 1.073310375213623,
"eval_runtime": 12.9821,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 241
},
{
"epoch": 42.08695652173913,
"grad_norm": 0.24948331713676453,
"learning_rate": 8.564212779959003e-06,
"loss": 0.9294,
"step": 242
},
{
"epoch": 42.43478260869565,
"grad_norm": 0.24044832587242126,
"learning_rate": 8.531417733062476e-06,
"loss": 0.9579,
"step": 244
},
{
"epoch": 42.78260869565217,
"grad_norm": 0.2429089993238449,
"learning_rate": 8.498316702566828e-06,
"loss": 0.9909,
"step": 246
},
{
"epoch": 42.95652173913044,
"eval_loss": 1.0724002122879028,
"eval_runtime": 12.9815,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 247
},
{
"epoch": 43.130434782608695,
"grad_norm": 0.25010445713996887,
"learning_rate": 8.464912556547486e-06,
"loss": 0.9609,
"step": 248
},
{
"epoch": 43.47826086956522,
"grad_norm": 0.25847363471984863,
"learning_rate": 8.43120818934367e-06,
"loss": 0.9271,
"step": 250
},
{
"epoch": 43.82608695652174,
"grad_norm": 0.25236183404922485,
"learning_rate": 8.397206521307584e-06,
"loss": 0.9055,
"step": 252
},
{
"epoch": 44.0,
"eval_loss": 1.0719984769821167,
"eval_runtime": 12.9768,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 253
},
{
"epoch": 44.17391304347826,
"grad_norm": 0.2587444484233856,
"learning_rate": 8.362910498551402e-06,
"loss": 0.9453,
"step": 254
},
{
"epoch": 44.52173913043478,
"grad_norm": 0.2820061147212982,
"learning_rate": 8.328323092691985e-06,
"loss": 0.952,
"step": 256
},
{
"epoch": 44.869565217391305,
"grad_norm": 0.2769736349582672,
"learning_rate": 8.293447300593402e-06,
"loss": 1.0025,
"step": 258
},
{
"epoch": 44.869565217391305,
"eval_loss": 1.0721515417099,
"eval_runtime": 12.979,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 258
},
{
"epoch": 45.21739130434783,
"grad_norm": 0.2502345144748688,
"learning_rate": 8.258286144107277e-06,
"loss": 0.9481,
"step": 260
},
{
"epoch": 45.56521739130435,
"grad_norm": 0.2711241841316223,
"learning_rate": 8.222842669810936e-06,
"loss": 0.9477,
"step": 262
},
{
"epoch": 45.91304347826087,
"grad_norm": 0.25015076994895935,
"learning_rate": 8.18711994874345e-06,
"loss": 0.9325,
"step": 264
},
{
"epoch": 45.91304347826087,
"eval_loss": 1.0710887908935547,
"eval_runtime": 12.9806,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 264
},
{
"epoch": 46.26086956521739,
"grad_norm": 0.3114207088947296,
"learning_rate": 8.151121076139534e-06,
"loss": 0.8994,
"step": 266
},
{
"epoch": 46.608695652173914,
"grad_norm": 0.2907244861125946,
"learning_rate": 8.11484917116136e-06,
"loss": 1.0092,
"step": 268
},
{
"epoch": 46.95652173913044,
"grad_norm": 0.3372167646884918,
"learning_rate": 8.078307376628292e-06,
"loss": 0.8921,
"step": 270
},
{
"epoch": 46.95652173913044,
"eval_loss": 1.0723286867141724,
"eval_runtime": 12.9797,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 270
},
{
"epoch": 47.30434782608695,
"grad_norm": 0.32444846630096436,
"learning_rate": 8.041498858744572e-06,
"loss": 0.954,
"step": 272
},
{
"epoch": 47.65217391304348,
"grad_norm": 0.2917410731315613,
"learning_rate": 8.004426806824985e-06,
"loss": 0.9444,
"step": 274
},
{
"epoch": 48.0,
"grad_norm": 0.3161114454269409,
"learning_rate": 7.967094433018508e-06,
"loss": 0.9079,
"step": 276
},
{
"epoch": 48.0,
"eval_loss": 1.0710123777389526,
"eval_runtime": 12.9764,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 276
},
{
"epoch": 48.34782608695652,
"grad_norm": 0.27982279658317566,
"learning_rate": 7.929504972030003e-06,
"loss": 0.8982,
"step": 278
},
{
"epoch": 48.69565217391305,
"grad_norm": 0.29784122109413147,
"learning_rate": 7.891661680839932e-06,
"loss": 0.9615,
"step": 280
},
{
"epoch": 48.869565217391305,
"eval_loss": 1.07285475730896,
"eval_runtime": 12.9818,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 281
},
{
"epoch": 49.04347826086956,
"grad_norm": 0.3300091326236725,
"learning_rate": 7.85356783842216e-06,
"loss": 0.8969,
"step": 282
},
{
"epoch": 49.391304347826086,
"grad_norm": 0.3034118413925171,
"learning_rate": 7.815226745459831e-06,
"loss": 0.9267,
"step": 284
},
{
"epoch": 49.73913043478261,
"grad_norm": 0.28807440400123596,
"learning_rate": 7.776641724059398e-06,
"loss": 0.9517,
"step": 286
},
{
"epoch": 49.91304347826087,
"eval_loss": 1.0717887878417969,
"eval_runtime": 12.9755,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 287
},
{
"epoch": 50.08695652173913,
"grad_norm": 0.34060779213905334,
"learning_rate": 7.737816117462752e-06,
"loss": 0.9092,
"step": 288
},
{
"epoch": 50.43478260869565,
"grad_norm": 0.28578662872314453,
"learning_rate": 7.698753289757565e-06,
"loss": 0.9824,
"step": 290
},
{
"epoch": 50.78260869565217,
"grad_norm": 0.316270112991333,
"learning_rate": 7.65945662558579e-06,
"loss": 0.8619,
"step": 292
},
{
"epoch": 50.95652173913044,
"eval_loss": 1.0729678869247437,
"eval_runtime": 12.9784,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 293
},
{
"epoch": 51.130434782608695,
"grad_norm": 0.2928197681903839,
"learning_rate": 7.619929529850397e-06,
"loss": 0.946,
"step": 294
},
{
"epoch": 51.47826086956522,
"grad_norm": 0.30175477266311646,
"learning_rate": 7.580175427420358e-06,
"loss": 0.9319,
"step": 296
},
{
"epoch": 51.82608695652174,
"grad_norm": 0.3955518305301666,
"learning_rate": 7.54019776283389e-06,
"loss": 0.8894,
"step": 298
},
{
"epoch": 52.0,
"eval_loss": 1.0739282369613647,
"eval_runtime": 12.9857,
"eval_samples_per_second": 0.924,
"eval_steps_per_second": 0.924,
"step": 299
},
{
"epoch": 52.17391304347826,
"grad_norm": 0.33545202016830444,
"learning_rate": 7.500000000000001e-06,
"loss": 0.8967,
"step": 300
},
{
"epoch": 52.52173913043478,
"grad_norm": 0.3249565362930298,
"learning_rate": 7.459585621898353e-06,
"loss": 0.9514,
"step": 302
},
{
"epoch": 52.869565217391305,
"grad_norm": 0.3969457745552063,
"learning_rate": 7.418958130277483e-06,
"loss": 0.8389,
"step": 304
},
{
"epoch": 52.869565217391305,
"eval_loss": 1.0741795301437378,
"eval_runtime": 12.9752,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 304
},
{
"epoch": 53.21739130434783,
"grad_norm": 0.3565354347229004,
"learning_rate": 7.378121045351378e-06,
"loss": 0.882,
"step": 306
},
{
"epoch": 53.56521739130435,
"grad_norm": 0.41273507475852966,
"learning_rate": 7.337077905494472e-06,
"loss": 0.8992,
"step": 308
},
{
"epoch": 53.91304347826087,
"grad_norm": 0.3917119801044464,
"learning_rate": 7.295832266935059e-06,
"loss": 0.9032,
"step": 310
},
{
"epoch": 53.91304347826087,
"eval_loss": 1.0749919414520264,
"eval_runtime": 12.9751,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 310
},
{
"epoch": 54.26086956521739,
"grad_norm": 0.34160563349723816,
"learning_rate": 7.254387703447154e-06,
"loss": 0.9499,
"step": 312
},
{
"epoch": 54.608695652173914,
"grad_norm": 0.4153737723827362,
"learning_rate": 7.212747806040845e-06,
"loss": 0.9246,
"step": 314
},
{
"epoch": 54.95652173913044,
"grad_norm": 0.3735916316509247,
"learning_rate": 7.170916182651141e-06,
"loss": 0.9015,
"step": 316
},
{
"epoch": 54.95652173913044,
"eval_loss": 1.0760287046432495,
"eval_runtime": 12.9763,
"eval_samples_per_second": 0.925,
"eval_steps_per_second": 0.925,
"step": 316
},
{
"epoch": 54.95652173913044,
"step": 316,
"total_flos": 2.4451647665917133e+17,
"train_loss": 1.137867835975146,
"train_runtime": 8799.2233,
"train_samples_per_second": 0.784,
"train_steps_per_second": 0.085
}
],
"logging_steps": 2,
"max_steps": 750,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.4451647665917133e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}