lora-2nov / checkpoint-6000 /trainer_state.json
Yanfeng
First commit
0070e5a
{
"best_metric": 0.9106292724609375,
"best_model_checkpoint": "./results/chinese-llama-2-lora-2nov/checkpoint-6000",
"epoch": 9.868421052631579,
"eval_steps": 200,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.9999999999999997e-05,
"loss": 2.2931,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 5.9999999999999995e-05,
"loss": 2.2482,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 8.999999999999999e-05,
"loss": 2.0171,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 0.00011999999999999999,
"loss": 1.6749,
"step": 40
},
{
"epoch": 0.08,
"learning_rate": 0.00015,
"loss": 1.5164,
"step": 50
},
{
"epoch": 0.1,
"learning_rate": 0.00017999999999999998,
"loss": 1.4101,
"step": 60
},
{
"epoch": 0.12,
"learning_rate": 0.00020999999999999998,
"loss": 1.3538,
"step": 70
},
{
"epoch": 0.13,
"learning_rate": 0.00023999999999999998,
"loss": 1.2861,
"step": 80
},
{
"epoch": 0.15,
"learning_rate": 0.00027,
"loss": 1.2444,
"step": 90
},
{
"epoch": 0.16,
"learning_rate": 0.0003,
"loss": 1.2369,
"step": 100
},
{
"epoch": 0.18,
"learning_rate": 0.0002994983277591973,
"loss": 1.2258,
"step": 110
},
{
"epoch": 0.2,
"learning_rate": 0.0002989966555183946,
"loss": 1.1953,
"step": 120
},
{
"epoch": 0.21,
"learning_rate": 0.00029849498327759195,
"loss": 1.2009,
"step": 130
},
{
"epoch": 0.23,
"learning_rate": 0.0002979933110367893,
"loss": 1.1498,
"step": 140
},
{
"epoch": 0.25,
"learning_rate": 0.0002974916387959866,
"loss": 1.1646,
"step": 150
},
{
"epoch": 0.26,
"learning_rate": 0.0002969899665551839,
"loss": 1.1375,
"step": 160
},
{
"epoch": 0.28,
"learning_rate": 0.00029648829431438127,
"loss": 1.1429,
"step": 170
},
{
"epoch": 0.3,
"learning_rate": 0.00029598662207357856,
"loss": 1.1322,
"step": 180
},
{
"epoch": 0.31,
"learning_rate": 0.0002954849498327759,
"loss": 1.1255,
"step": 190
},
{
"epoch": 0.33,
"learning_rate": 0.00029498327759197324,
"loss": 1.1026,
"step": 200
},
{
"epoch": 0.33,
"eval_loss": 1.1218857765197754,
"eval_runtime": 33.0988,
"eval_samples_per_second": 60.425,
"eval_steps_per_second": 1.269,
"step": 200
},
{
"epoch": 0.35,
"learning_rate": 0.00029448160535117053,
"loss": 1.1085,
"step": 210
},
{
"epoch": 0.36,
"learning_rate": 0.0002939799331103679,
"loss": 1.0916,
"step": 220
},
{
"epoch": 0.38,
"learning_rate": 0.0002934782608695652,
"loss": 1.1211,
"step": 230
},
{
"epoch": 0.39,
"learning_rate": 0.0002929765886287625,
"loss": 1.0773,
"step": 240
},
{
"epoch": 0.41,
"learning_rate": 0.00029247491638795985,
"loss": 1.085,
"step": 250
},
{
"epoch": 0.43,
"learning_rate": 0.0002919732441471572,
"loss": 1.0696,
"step": 260
},
{
"epoch": 0.44,
"learning_rate": 0.0002914715719063545,
"loss": 1.0814,
"step": 270
},
{
"epoch": 0.46,
"learning_rate": 0.0002909698996655518,
"loss": 1.0931,
"step": 280
},
{
"epoch": 0.48,
"learning_rate": 0.00029046822742474917,
"loss": 1.064,
"step": 290
},
{
"epoch": 0.49,
"learning_rate": 0.00028996655518394646,
"loss": 1.076,
"step": 300
},
{
"epoch": 0.51,
"learning_rate": 0.0002894648829431438,
"loss": 1.0667,
"step": 310
},
{
"epoch": 0.53,
"learning_rate": 0.00028896321070234115,
"loss": 1.0682,
"step": 320
},
{
"epoch": 0.54,
"learning_rate": 0.00028846153846153843,
"loss": 1.0589,
"step": 330
},
{
"epoch": 0.56,
"learning_rate": 0.0002879598662207358,
"loss": 1.0524,
"step": 340
},
{
"epoch": 0.58,
"learning_rate": 0.0002874581939799331,
"loss": 1.061,
"step": 350
},
{
"epoch": 0.59,
"learning_rate": 0.0002869565217391304,
"loss": 1.0575,
"step": 360
},
{
"epoch": 0.61,
"learning_rate": 0.00028645484949832775,
"loss": 1.0381,
"step": 370
},
{
"epoch": 0.62,
"learning_rate": 0.00028595317725752504,
"loss": 1.0463,
"step": 380
},
{
"epoch": 0.64,
"learning_rate": 0.0002854515050167224,
"loss": 1.0616,
"step": 390
},
{
"epoch": 0.66,
"learning_rate": 0.00028494983277591973,
"loss": 1.0462,
"step": 400
},
{
"epoch": 0.66,
"eval_loss": 1.0583561658859253,
"eval_runtime": 34.9211,
"eval_samples_per_second": 57.272,
"eval_steps_per_second": 1.203,
"step": 400
},
{
"epoch": 0.67,
"learning_rate": 0.000284448160535117,
"loss": 1.0481,
"step": 410
},
{
"epoch": 0.69,
"learning_rate": 0.00028394648829431436,
"loss": 1.0362,
"step": 420
},
{
"epoch": 0.71,
"learning_rate": 0.0002834448160535117,
"loss": 1.0517,
"step": 430
},
{
"epoch": 0.72,
"learning_rate": 0.000282943143812709,
"loss": 1.0465,
"step": 440
},
{
"epoch": 0.74,
"learning_rate": 0.00028244147157190634,
"loss": 1.0313,
"step": 450
},
{
"epoch": 0.76,
"learning_rate": 0.0002819397993311037,
"loss": 1.0134,
"step": 460
},
{
"epoch": 0.77,
"learning_rate": 0.00028143812709030097,
"loss": 1.0373,
"step": 470
},
{
"epoch": 0.79,
"learning_rate": 0.0002809364548494983,
"loss": 1.0357,
"step": 480
},
{
"epoch": 0.81,
"learning_rate": 0.00028043478260869565,
"loss": 1.0113,
"step": 490
},
{
"epoch": 0.82,
"learning_rate": 0.00027993311036789294,
"loss": 1.0201,
"step": 500
},
{
"epoch": 0.84,
"learning_rate": 0.0002794314381270903,
"loss": 1.0359,
"step": 510
},
{
"epoch": 0.86,
"learning_rate": 0.00027892976588628763,
"loss": 1.0064,
"step": 520
},
{
"epoch": 0.87,
"learning_rate": 0.0002784280936454849,
"loss": 1.0166,
"step": 530
},
{
"epoch": 0.89,
"learning_rate": 0.00027792642140468226,
"loss": 1.0025,
"step": 540
},
{
"epoch": 0.9,
"learning_rate": 0.0002774247491638796,
"loss": 1.028,
"step": 550
},
{
"epoch": 0.92,
"learning_rate": 0.0002769230769230769,
"loss": 1.0216,
"step": 560
},
{
"epoch": 0.94,
"learning_rate": 0.00027642140468227424,
"loss": 1.0037,
"step": 570
},
{
"epoch": 0.95,
"learning_rate": 0.0002759197324414716,
"loss": 1.0028,
"step": 580
},
{
"epoch": 0.97,
"learning_rate": 0.00027541806020066887,
"loss": 1.0193,
"step": 590
},
{
"epoch": 0.99,
"learning_rate": 0.00027491638795986616,
"loss": 1.0128,
"step": 600
},
{
"epoch": 0.99,
"eval_loss": 1.0269545316696167,
"eval_runtime": 33.0707,
"eval_samples_per_second": 60.476,
"eval_steps_per_second": 1.27,
"step": 600
},
{
"epoch": 1.0,
"learning_rate": 0.0002744147157190635,
"loss": 0.9854,
"step": 610
},
{
"epoch": 1.02,
"learning_rate": 0.00027391304347826085,
"loss": 1.0161,
"step": 620
},
{
"epoch": 1.04,
"learning_rate": 0.00027341137123745813,
"loss": 0.9755,
"step": 630
},
{
"epoch": 1.05,
"learning_rate": 0.0002729096989966555,
"loss": 0.997,
"step": 640
},
{
"epoch": 1.07,
"learning_rate": 0.0002724080267558528,
"loss": 0.9834,
"step": 650
},
{
"epoch": 1.09,
"learning_rate": 0.0002719063545150501,
"loss": 1.0024,
"step": 660
},
{
"epoch": 1.1,
"learning_rate": 0.00027140468227424745,
"loss": 0.985,
"step": 670
},
{
"epoch": 1.12,
"learning_rate": 0.0002709030100334448,
"loss": 1.0037,
"step": 680
},
{
"epoch": 1.13,
"learning_rate": 0.0002704013377926421,
"loss": 1.0004,
"step": 690
},
{
"epoch": 1.15,
"learning_rate": 0.00026989966555183943,
"loss": 1.006,
"step": 700
},
{
"epoch": 1.17,
"learning_rate": 0.00026939799331103677,
"loss": 0.9887,
"step": 710
},
{
"epoch": 1.18,
"learning_rate": 0.00026889632107023406,
"loss": 0.9817,
"step": 720
},
{
"epoch": 1.2,
"learning_rate": 0.0002683946488294314,
"loss": 0.9718,
"step": 730
},
{
"epoch": 1.22,
"learning_rate": 0.00026789297658862875,
"loss": 0.9958,
"step": 740
},
{
"epoch": 1.23,
"learning_rate": 0.00026739130434782604,
"loss": 0.9654,
"step": 750
},
{
"epoch": 1.25,
"learning_rate": 0.0002668896321070234,
"loss": 0.9906,
"step": 760
},
{
"epoch": 1.27,
"learning_rate": 0.0002663879598662207,
"loss": 0.9922,
"step": 770
},
{
"epoch": 1.28,
"learning_rate": 0.000265886287625418,
"loss": 0.9803,
"step": 780
},
{
"epoch": 1.3,
"learning_rate": 0.00026538461538461536,
"loss": 0.9709,
"step": 790
},
{
"epoch": 1.32,
"learning_rate": 0.0002648829431438127,
"loss": 0.9562,
"step": 800
},
{
"epoch": 1.32,
"eval_loss": 1.007567048072815,
"eval_runtime": 33.1229,
"eval_samples_per_second": 60.381,
"eval_steps_per_second": 1.268,
"step": 800
},
{
"epoch": 1.33,
"learning_rate": 0.00026438127090301,
"loss": 0.98,
"step": 810
},
{
"epoch": 1.35,
"learning_rate": 0.00026387959866220733,
"loss": 1.0006,
"step": 820
},
{
"epoch": 1.37,
"learning_rate": 0.0002633779264214047,
"loss": 0.9748,
"step": 830
},
{
"epoch": 1.38,
"learning_rate": 0.00026287625418060196,
"loss": 0.9878,
"step": 840
},
{
"epoch": 1.4,
"learning_rate": 0.0002623745819397993,
"loss": 0.9827,
"step": 850
},
{
"epoch": 1.41,
"learning_rate": 0.00026187290969899665,
"loss": 0.9752,
"step": 860
},
{
"epoch": 1.43,
"learning_rate": 0.00026137123745819394,
"loss": 0.9675,
"step": 870
},
{
"epoch": 1.45,
"learning_rate": 0.0002608695652173913,
"loss": 0.9563,
"step": 880
},
{
"epoch": 1.46,
"learning_rate": 0.0002603678929765886,
"loss": 0.99,
"step": 890
},
{
"epoch": 1.48,
"learning_rate": 0.0002598662207357859,
"loss": 1.0059,
"step": 900
},
{
"epoch": 1.5,
"learning_rate": 0.00025936454849498326,
"loss": 0.9916,
"step": 910
},
{
"epoch": 1.51,
"learning_rate": 0.0002588628762541806,
"loss": 0.9866,
"step": 920
},
{
"epoch": 1.53,
"learning_rate": 0.0002583612040133779,
"loss": 0.9522,
"step": 930
},
{
"epoch": 1.55,
"learning_rate": 0.00025785953177257523,
"loss": 0.9421,
"step": 940
},
{
"epoch": 1.56,
"learning_rate": 0.0002573578595317725,
"loss": 0.9678,
"step": 950
},
{
"epoch": 1.58,
"learning_rate": 0.00025685618729096986,
"loss": 0.9556,
"step": 960
},
{
"epoch": 1.6,
"learning_rate": 0.0002563545150501672,
"loss": 0.9794,
"step": 970
},
{
"epoch": 1.61,
"learning_rate": 0.0002558528428093645,
"loss": 0.9603,
"step": 980
},
{
"epoch": 1.63,
"learning_rate": 0.00025535117056856184,
"loss": 0.9457,
"step": 990
},
{
"epoch": 1.64,
"learning_rate": 0.0002548494983277592,
"loss": 0.9656,
"step": 1000
},
{
"epoch": 1.64,
"eval_loss": 0.9902360439300537,
"eval_runtime": 33.1444,
"eval_samples_per_second": 60.342,
"eval_steps_per_second": 1.267,
"step": 1000
},
{
"epoch": 1.66,
"learning_rate": 0.00025434782608695647,
"loss": 0.955,
"step": 1010
},
{
"epoch": 1.68,
"learning_rate": 0.0002538461538461538,
"loss": 0.9664,
"step": 1020
},
{
"epoch": 1.69,
"learning_rate": 0.00025334448160535116,
"loss": 0.9712,
"step": 1030
},
{
"epoch": 1.71,
"learning_rate": 0.00025284280936454845,
"loss": 0.947,
"step": 1040
},
{
"epoch": 1.73,
"learning_rate": 0.0002523411371237458,
"loss": 0.973,
"step": 1050
},
{
"epoch": 1.74,
"learning_rate": 0.00025183946488294313,
"loss": 0.9664,
"step": 1060
},
{
"epoch": 1.76,
"learning_rate": 0.0002513377926421404,
"loss": 0.9581,
"step": 1070
},
{
"epoch": 1.78,
"learning_rate": 0.00025083612040133777,
"loss": 0.9544,
"step": 1080
},
{
"epoch": 1.79,
"learning_rate": 0.0002503344481605351,
"loss": 0.9774,
"step": 1090
},
{
"epoch": 1.81,
"learning_rate": 0.0002498327759197324,
"loss": 0.9493,
"step": 1100
},
{
"epoch": 1.83,
"learning_rate": 0.00024933110367892974,
"loss": 0.9584,
"step": 1110
},
{
"epoch": 1.84,
"learning_rate": 0.0002488294314381271,
"loss": 0.947,
"step": 1120
},
{
"epoch": 1.86,
"learning_rate": 0.0002483277591973244,
"loss": 0.9502,
"step": 1130
},
{
"epoch": 1.88,
"learning_rate": 0.0002478260869565217,
"loss": 0.944,
"step": 1140
},
{
"epoch": 1.89,
"learning_rate": 0.00024732441471571906,
"loss": 0.9556,
"step": 1150
},
{
"epoch": 1.91,
"learning_rate": 0.00024682274247491635,
"loss": 0.9627,
"step": 1160
},
{
"epoch": 1.92,
"learning_rate": 0.0002463210702341137,
"loss": 0.9376,
"step": 1170
},
{
"epoch": 1.94,
"learning_rate": 0.00024581939799331104,
"loss": 0.9399,
"step": 1180
},
{
"epoch": 1.96,
"learning_rate": 0.0002453177257525083,
"loss": 0.9449,
"step": 1190
},
{
"epoch": 1.97,
"learning_rate": 0.00024481605351170567,
"loss": 0.9514,
"step": 1200
},
{
"epoch": 1.97,
"eval_loss": 0.9769035577774048,
"eval_runtime": 33.1416,
"eval_samples_per_second": 60.347,
"eval_steps_per_second": 1.267,
"step": 1200
},
{
"epoch": 1.99,
"learning_rate": 0.000244314381270903,
"loss": 0.9439,
"step": 1210
},
{
"epoch": 2.01,
"learning_rate": 0.00024381270903010033,
"loss": 0.9512,
"step": 1220
},
{
"epoch": 2.02,
"learning_rate": 0.00024331103678929764,
"loss": 0.948,
"step": 1230
},
{
"epoch": 2.04,
"learning_rate": 0.00024280936454849496,
"loss": 0.9421,
"step": 1240
},
{
"epoch": 2.06,
"learning_rate": 0.0002423076923076923,
"loss": 0.9323,
"step": 1250
},
{
"epoch": 2.07,
"learning_rate": 0.00024180602006688962,
"loss": 0.9278,
"step": 1260
},
{
"epoch": 2.09,
"learning_rate": 0.00024130434782608694,
"loss": 0.9255,
"step": 1270
},
{
"epoch": 2.11,
"learning_rate": 0.00024080267558528428,
"loss": 0.9214,
"step": 1280
},
{
"epoch": 2.12,
"learning_rate": 0.0002403010033444816,
"loss": 0.9456,
"step": 1290
},
{
"epoch": 2.14,
"learning_rate": 0.0002397993311036789,
"loss": 0.9435,
"step": 1300
},
{
"epoch": 2.15,
"learning_rate": 0.00023929765886287623,
"loss": 0.9261,
"step": 1310
},
{
"epoch": 2.17,
"learning_rate": 0.00023879598662207357,
"loss": 0.9225,
"step": 1320
},
{
"epoch": 2.19,
"learning_rate": 0.00023829431438127089,
"loss": 0.9184,
"step": 1330
},
{
"epoch": 2.2,
"learning_rate": 0.0002377926421404682,
"loss": 0.9301,
"step": 1340
},
{
"epoch": 2.22,
"learning_rate": 0.00023729096989966555,
"loss": 0.9599,
"step": 1350
},
{
"epoch": 2.24,
"learning_rate": 0.00023678929765886286,
"loss": 0.9316,
"step": 1360
},
{
"epoch": 2.25,
"learning_rate": 0.00023628762541806018,
"loss": 0.9214,
"step": 1370
},
{
"epoch": 2.27,
"learning_rate": 0.00023578595317725752,
"loss": 0.9206,
"step": 1380
},
{
"epoch": 2.29,
"learning_rate": 0.00023528428093645484,
"loss": 0.9413,
"step": 1390
},
{
"epoch": 2.3,
"learning_rate": 0.00023478260869565215,
"loss": 0.9136,
"step": 1400
},
{
"epoch": 2.3,
"eval_loss": 0.9681736826896667,
"eval_runtime": 33.0949,
"eval_samples_per_second": 60.432,
"eval_steps_per_second": 1.269,
"step": 1400
},
{
"epoch": 2.32,
"learning_rate": 0.0002342809364548495,
"loss": 0.9128,
"step": 1410
},
{
"epoch": 2.34,
"learning_rate": 0.0002337792642140468,
"loss": 0.9229,
"step": 1420
},
{
"epoch": 2.35,
"learning_rate": 0.00023327759197324413,
"loss": 0.9127,
"step": 1430
},
{
"epoch": 2.37,
"learning_rate": 0.00023277591973244147,
"loss": 0.9305,
"step": 1440
},
{
"epoch": 2.38,
"learning_rate": 0.0002322742474916388,
"loss": 0.9238,
"step": 1450
},
{
"epoch": 2.4,
"learning_rate": 0.0002317725752508361,
"loss": 0.9179,
"step": 1460
},
{
"epoch": 2.42,
"learning_rate": 0.00023127090301003345,
"loss": 0.9392,
"step": 1470
},
{
"epoch": 2.43,
"learning_rate": 0.00023076923076923076,
"loss": 0.9301,
"step": 1480
},
{
"epoch": 2.45,
"learning_rate": 0.00023026755852842808,
"loss": 0.9298,
"step": 1490
},
{
"epoch": 2.47,
"learning_rate": 0.00022976588628762542,
"loss": 0.9426,
"step": 1500
},
{
"epoch": 2.48,
"learning_rate": 0.00022926421404682274,
"loss": 0.9228,
"step": 1510
},
{
"epoch": 2.5,
"learning_rate": 0.00022876254180602006,
"loss": 0.9242,
"step": 1520
},
{
"epoch": 2.52,
"learning_rate": 0.0002282608695652174,
"loss": 0.9317,
"step": 1530
},
{
"epoch": 2.53,
"learning_rate": 0.00022775919732441471,
"loss": 0.9093,
"step": 1540
},
{
"epoch": 2.55,
"learning_rate": 0.00022725752508361203,
"loss": 0.91,
"step": 1550
},
{
"epoch": 2.57,
"learning_rate": 0.00022675585284280935,
"loss": 0.9177,
"step": 1560
},
{
"epoch": 2.58,
"learning_rate": 0.0002262541806020067,
"loss": 0.9222,
"step": 1570
},
{
"epoch": 2.6,
"learning_rate": 0.000225752508361204,
"loss": 0.9165,
"step": 1580
},
{
"epoch": 2.62,
"learning_rate": 0.00022525083612040132,
"loss": 0.9159,
"step": 1590
},
{
"epoch": 2.63,
"learning_rate": 0.00022474916387959864,
"loss": 0.887,
"step": 1600
},
{
"epoch": 2.63,
"eval_loss": 0.9578931927680969,
"eval_runtime": 33.1845,
"eval_samples_per_second": 60.269,
"eval_steps_per_second": 1.266,
"step": 1600
},
{
"epoch": 2.65,
"learning_rate": 0.00022424749163879595,
"loss": 0.8999,
"step": 1610
},
{
"epoch": 2.66,
"learning_rate": 0.00022374581939799327,
"loss": 0.9044,
"step": 1620
},
{
"epoch": 2.68,
"learning_rate": 0.00022324414715719061,
"loss": 0.8885,
"step": 1630
},
{
"epoch": 2.7,
"learning_rate": 0.00022274247491638793,
"loss": 0.9134,
"step": 1640
},
{
"epoch": 2.71,
"learning_rate": 0.00022224080267558525,
"loss": 0.9055,
"step": 1650
},
{
"epoch": 2.73,
"learning_rate": 0.00022173913043478256,
"loss": 0.919,
"step": 1660
},
{
"epoch": 2.75,
"learning_rate": 0.0002212374581939799,
"loss": 0.9358,
"step": 1670
},
{
"epoch": 2.76,
"learning_rate": 0.00022073578595317722,
"loss": 0.918,
"step": 1680
},
{
"epoch": 2.78,
"learning_rate": 0.00022023411371237454,
"loss": 0.9072,
"step": 1690
},
{
"epoch": 2.8,
"learning_rate": 0.00021973244147157188,
"loss": 0.9131,
"step": 1700
},
{
"epoch": 2.81,
"learning_rate": 0.0002192307692307692,
"loss": 0.9092,
"step": 1710
},
{
"epoch": 2.83,
"learning_rate": 0.0002187290969899665,
"loss": 0.8981,
"step": 1720
},
{
"epoch": 2.85,
"learning_rate": 0.00021822742474916386,
"loss": 0.9273,
"step": 1730
},
{
"epoch": 2.86,
"learning_rate": 0.00021772575250836117,
"loss": 0.9226,
"step": 1740
},
{
"epoch": 2.88,
"learning_rate": 0.0002172240802675585,
"loss": 0.9169,
"step": 1750
},
{
"epoch": 2.89,
"learning_rate": 0.00021672240802675583,
"loss": 0.9278,
"step": 1760
},
{
"epoch": 2.91,
"learning_rate": 0.00021622073578595315,
"loss": 0.907,
"step": 1770
},
{
"epoch": 2.93,
"learning_rate": 0.00021571906354515046,
"loss": 0.8985,
"step": 1780
},
{
"epoch": 2.94,
"learning_rate": 0.0002152173913043478,
"loss": 0.9093,
"step": 1790
},
{
"epoch": 2.96,
"learning_rate": 0.00021471571906354512,
"loss": 0.8969,
"step": 1800
},
{
"epoch": 2.96,
"eval_loss": 0.9505146145820618,
"eval_runtime": 33.1216,
"eval_samples_per_second": 60.383,
"eval_steps_per_second": 1.268,
"step": 1800
},
{
"epoch": 2.98,
"learning_rate": 0.00021421404682274244,
"loss": 0.8899,
"step": 1810
},
{
"epoch": 2.99,
"learning_rate": 0.00021371237458193978,
"loss": 0.8995,
"step": 1820
},
{
"epoch": 3.01,
"learning_rate": 0.0002132107023411371,
"loss": 0.9034,
"step": 1830
},
{
"epoch": 3.03,
"learning_rate": 0.00021270903010033441,
"loss": 0.8901,
"step": 1840
},
{
"epoch": 3.04,
"learning_rate": 0.00021220735785953176,
"loss": 0.8977,
"step": 1850
},
{
"epoch": 3.06,
"learning_rate": 0.00021170568561872907,
"loss": 0.891,
"step": 1860
},
{
"epoch": 3.08,
"learning_rate": 0.0002112040133779264,
"loss": 0.8848,
"step": 1870
},
{
"epoch": 3.09,
"learning_rate": 0.00021070234113712373,
"loss": 0.8829,
"step": 1880
},
{
"epoch": 3.11,
"learning_rate": 0.00021020066889632105,
"loss": 0.8728,
"step": 1890
},
{
"epoch": 3.12,
"learning_rate": 0.00020969899665551837,
"loss": 0.8927,
"step": 1900
},
{
"epoch": 3.14,
"learning_rate": 0.00020919732441471568,
"loss": 0.8834,
"step": 1910
},
{
"epoch": 3.16,
"learning_rate": 0.00020869565217391303,
"loss": 0.9003,
"step": 1920
},
{
"epoch": 3.17,
"learning_rate": 0.00020819397993311034,
"loss": 0.8958,
"step": 1930
},
{
"epoch": 3.19,
"learning_rate": 0.00020769230769230766,
"loss": 0.9081,
"step": 1940
},
{
"epoch": 3.21,
"learning_rate": 0.000207190635451505,
"loss": 0.9027,
"step": 1950
},
{
"epoch": 3.22,
"learning_rate": 0.00020668896321070232,
"loss": 0.8997,
"step": 1960
},
{
"epoch": 3.24,
"learning_rate": 0.00020618729096989963,
"loss": 0.8906,
"step": 1970
},
{
"epoch": 3.26,
"learning_rate": 0.00020568561872909698,
"loss": 0.8886,
"step": 1980
},
{
"epoch": 3.27,
"learning_rate": 0.0002051839464882943,
"loss": 0.8886,
"step": 1990
},
{
"epoch": 3.29,
"learning_rate": 0.0002046822742474916,
"loss": 0.8885,
"step": 2000
},
{
"epoch": 3.29,
"eval_loss": 0.9456945061683655,
"eval_runtime": 33.1317,
"eval_samples_per_second": 60.365,
"eval_steps_per_second": 1.268,
"step": 2000
},
{
"epoch": 3.31,
"learning_rate": 0.00020418060200668895,
"loss": 0.8992,
"step": 2010
},
{
"epoch": 3.32,
"learning_rate": 0.00020367892976588627,
"loss": 0.892,
"step": 2020
},
{
"epoch": 3.34,
"learning_rate": 0.00020317725752508358,
"loss": 0.8848,
"step": 2030
},
{
"epoch": 3.36,
"learning_rate": 0.00020267558528428093,
"loss": 0.8889,
"step": 2040
},
{
"epoch": 3.37,
"learning_rate": 0.00020217391304347824,
"loss": 0.8872,
"step": 2050
},
{
"epoch": 3.39,
"learning_rate": 0.00020167224080267556,
"loss": 0.8933,
"step": 2060
},
{
"epoch": 3.4,
"learning_rate": 0.0002011705685618729,
"loss": 0.8816,
"step": 2070
},
{
"epoch": 3.42,
"learning_rate": 0.00020066889632107022,
"loss": 0.8836,
"step": 2080
},
{
"epoch": 3.44,
"learning_rate": 0.00020016722408026753,
"loss": 0.8918,
"step": 2090
},
{
"epoch": 3.45,
"learning_rate": 0.00019966555183946488,
"loss": 0.8944,
"step": 2100
},
{
"epoch": 3.47,
"learning_rate": 0.0001991638795986622,
"loss": 0.9055,
"step": 2110
},
{
"epoch": 3.49,
"learning_rate": 0.0001986622073578595,
"loss": 0.8809,
"step": 2120
},
{
"epoch": 3.5,
"learning_rate": 0.00019816053511705685,
"loss": 0.8982,
"step": 2130
},
{
"epoch": 3.52,
"learning_rate": 0.00019765886287625417,
"loss": 0.89,
"step": 2140
},
{
"epoch": 3.54,
"learning_rate": 0.00019715719063545149,
"loss": 0.8798,
"step": 2150
},
{
"epoch": 3.55,
"learning_rate": 0.0001966555183946488,
"loss": 0.9001,
"step": 2160
},
{
"epoch": 3.57,
"learning_rate": 0.00019615384615384615,
"loss": 0.8746,
"step": 2170
},
{
"epoch": 3.59,
"learning_rate": 0.00019565217391304346,
"loss": 0.8945,
"step": 2180
},
{
"epoch": 3.6,
"learning_rate": 0.00019515050167224078,
"loss": 0.8745,
"step": 2190
},
{
"epoch": 3.62,
"learning_rate": 0.00019464882943143812,
"loss": 0.8695,
"step": 2200
},
{
"epoch": 3.62,
"eval_loss": 0.9407016038894653,
"eval_runtime": 33.1522,
"eval_samples_per_second": 60.328,
"eval_steps_per_second": 1.267,
"step": 2200
},
{
"epoch": 3.63,
"learning_rate": 0.00019414715719063544,
"loss": 0.8917,
"step": 2210
},
{
"epoch": 3.65,
"learning_rate": 0.00019364548494983275,
"loss": 0.8822,
"step": 2220
},
{
"epoch": 3.67,
"learning_rate": 0.0001931438127090301,
"loss": 0.8694,
"step": 2230
},
{
"epoch": 3.68,
"learning_rate": 0.0001926421404682274,
"loss": 0.8904,
"step": 2240
},
{
"epoch": 3.7,
"learning_rate": 0.00019214046822742473,
"loss": 0.8759,
"step": 2250
},
{
"epoch": 3.72,
"learning_rate": 0.00019163879598662207,
"loss": 0.8784,
"step": 2260
},
{
"epoch": 3.73,
"learning_rate": 0.0001911371237458194,
"loss": 0.8829,
"step": 2270
},
{
"epoch": 3.75,
"learning_rate": 0.0001906354515050167,
"loss": 0.8788,
"step": 2280
},
{
"epoch": 3.77,
"learning_rate": 0.00019013377926421405,
"loss": 0.8966,
"step": 2290
},
{
"epoch": 3.78,
"learning_rate": 0.00018963210702341136,
"loss": 0.8803,
"step": 2300
},
{
"epoch": 3.8,
"learning_rate": 0.00018913043478260868,
"loss": 0.8783,
"step": 2310
},
{
"epoch": 3.82,
"learning_rate": 0.00018862876254180602,
"loss": 0.8874,
"step": 2320
},
{
"epoch": 3.83,
"learning_rate": 0.00018812709030100334,
"loss": 0.8711,
"step": 2330
},
{
"epoch": 3.85,
"learning_rate": 0.00018762541806020065,
"loss": 0.8676,
"step": 2340
},
{
"epoch": 3.87,
"learning_rate": 0.000187123745819398,
"loss": 0.8638,
"step": 2350
},
{
"epoch": 3.88,
"learning_rate": 0.00018662207357859531,
"loss": 0.8945,
"step": 2360
},
{
"epoch": 3.9,
"learning_rate": 0.00018612040133779263,
"loss": 0.8772,
"step": 2370
},
{
"epoch": 3.91,
"learning_rate": 0.00018561872909698997,
"loss": 0.8895,
"step": 2380
},
{
"epoch": 3.93,
"learning_rate": 0.0001851170568561873,
"loss": 0.87,
"step": 2390
},
{
"epoch": 3.95,
"learning_rate": 0.0001846153846153846,
"loss": 0.8608,
"step": 2400
},
{
"epoch": 3.95,
"eval_loss": 0.936105489730835,
"eval_runtime": 33.1676,
"eval_samples_per_second": 60.3,
"eval_steps_per_second": 1.266,
"step": 2400
},
{
"epoch": 3.96,
"learning_rate": 0.00018411371237458192,
"loss": 0.8848,
"step": 2410
},
{
"epoch": 3.98,
"learning_rate": 0.00018361204013377926,
"loss": 0.8823,
"step": 2420
},
{
"epoch": 4.0,
"learning_rate": 0.00018311036789297658,
"loss": 0.8692,
"step": 2430
},
{
"epoch": 4.01,
"learning_rate": 0.0001826086956521739,
"loss": 0.896,
"step": 2440
},
{
"epoch": 4.03,
"learning_rate": 0.00018210702341137124,
"loss": 0.8704,
"step": 2450
},
{
"epoch": 4.05,
"learning_rate": 0.00018160535117056856,
"loss": 0.8869,
"step": 2460
},
{
"epoch": 4.06,
"learning_rate": 0.00018110367892976587,
"loss": 0.8492,
"step": 2470
},
{
"epoch": 4.08,
"learning_rate": 0.00018060200668896322,
"loss": 0.8558,
"step": 2480
},
{
"epoch": 4.1,
"learning_rate": 0.00018010033444816053,
"loss": 0.8607,
"step": 2490
},
{
"epoch": 4.11,
"learning_rate": 0.00017959866220735785,
"loss": 0.8651,
"step": 2500
},
{
"epoch": 4.13,
"learning_rate": 0.0001790969899665552,
"loss": 0.8648,
"step": 2510
},
{
"epoch": 4.14,
"learning_rate": 0.0001785953177257525,
"loss": 0.8614,
"step": 2520
},
{
"epoch": 4.16,
"learning_rate": 0.00017809364548494982,
"loss": 0.8759,
"step": 2530
},
{
"epoch": 4.18,
"learning_rate": 0.00017759197324414717,
"loss": 0.8653,
"step": 2540
},
{
"epoch": 4.19,
"learning_rate": 0.00017709030100334448,
"loss": 0.8478,
"step": 2550
},
{
"epoch": 4.21,
"learning_rate": 0.0001765886287625418,
"loss": 0.8636,
"step": 2560
},
{
"epoch": 4.23,
"learning_rate": 0.00017608695652173914,
"loss": 0.859,
"step": 2570
},
{
"epoch": 4.24,
"learning_rate": 0.00017558528428093646,
"loss": 0.8685,
"step": 2580
},
{
"epoch": 4.26,
"learning_rate": 0.00017508361204013377,
"loss": 0.8595,
"step": 2590
},
{
"epoch": 4.28,
"learning_rate": 0.00017458193979933106,
"loss": 0.8698,
"step": 2600
},
{
"epoch": 4.28,
"eval_loss": 0.9336724877357483,
"eval_runtime": 33.1889,
"eval_samples_per_second": 60.261,
"eval_steps_per_second": 1.265,
"step": 2600
},
{
"epoch": 4.29,
"learning_rate": 0.0001740802675585284,
"loss": 0.8696,
"step": 2610
},
{
"epoch": 4.31,
"learning_rate": 0.00017357859531772572,
"loss": 0.8667,
"step": 2620
},
{
"epoch": 4.33,
"learning_rate": 0.00017307692307692304,
"loss": 0.8648,
"step": 2630
},
{
"epoch": 4.34,
"learning_rate": 0.00017257525083612038,
"loss": 0.8524,
"step": 2640
},
{
"epoch": 4.36,
"learning_rate": 0.0001720735785953177,
"loss": 0.8506,
"step": 2650
},
{
"epoch": 4.38,
"learning_rate": 0.00017157190635451501,
"loss": 0.8638,
"step": 2660
},
{
"epoch": 4.39,
"learning_rate": 0.00017107023411371236,
"loss": 0.8769,
"step": 2670
},
{
"epoch": 4.41,
"learning_rate": 0.00017056856187290967,
"loss": 0.8581,
"step": 2680
},
{
"epoch": 4.42,
"learning_rate": 0.000170066889632107,
"loss": 0.8564,
"step": 2690
},
{
"epoch": 4.44,
"learning_rate": 0.00016956521739130433,
"loss": 0.8369,
"step": 2700
},
{
"epoch": 4.46,
"learning_rate": 0.00016906354515050165,
"loss": 0.879,
"step": 2710
},
{
"epoch": 4.47,
"learning_rate": 0.00016856187290969897,
"loss": 0.8426,
"step": 2720
},
{
"epoch": 4.49,
"learning_rate": 0.00016806020066889628,
"loss": 0.8603,
"step": 2730
},
{
"epoch": 4.51,
"learning_rate": 0.00016755852842809362,
"loss": 0.8613,
"step": 2740
},
{
"epoch": 4.52,
"learning_rate": 0.00016705685618729094,
"loss": 0.8695,
"step": 2750
},
{
"epoch": 4.54,
"learning_rate": 0.00016655518394648826,
"loss": 0.8652,
"step": 2760
},
{
"epoch": 4.56,
"learning_rate": 0.0001660535117056856,
"loss": 0.8537,
"step": 2770
},
{
"epoch": 4.57,
"learning_rate": 0.00016555183946488292,
"loss": 0.8529,
"step": 2780
},
{
"epoch": 4.59,
"learning_rate": 0.00016505016722408023,
"loss": 0.8426,
"step": 2790
},
{
"epoch": 4.61,
"learning_rate": 0.00016454849498327758,
"loss": 0.8691,
"step": 2800
},
{
"epoch": 4.61,
"eval_loss": 0.9289954900741577,
"eval_runtime": 33.1507,
"eval_samples_per_second": 60.331,
"eval_steps_per_second": 1.267,
"step": 2800
},
{
"epoch": 4.62,
"learning_rate": 0.0001640468227424749,
"loss": 0.8589,
"step": 2810
},
{
"epoch": 4.64,
"learning_rate": 0.0001635451505016722,
"loss": 0.8583,
"step": 2820
},
{
"epoch": 4.65,
"learning_rate": 0.00016304347826086955,
"loss": 0.8408,
"step": 2830
},
{
"epoch": 4.67,
"learning_rate": 0.00016254180602006687,
"loss": 0.8634,
"step": 2840
},
{
"epoch": 4.69,
"learning_rate": 0.00016204013377926418,
"loss": 0.8513,
"step": 2850
},
{
"epoch": 4.7,
"learning_rate": 0.00016153846153846153,
"loss": 0.8568,
"step": 2860
},
{
"epoch": 4.72,
"learning_rate": 0.00016103678929765884,
"loss": 0.8703,
"step": 2870
},
{
"epoch": 4.74,
"learning_rate": 0.00016053511705685616,
"loss": 0.8545,
"step": 2880
},
{
"epoch": 4.75,
"learning_rate": 0.0001600334448160535,
"loss": 0.8456,
"step": 2890
},
{
"epoch": 4.77,
"learning_rate": 0.00015953177257525082,
"loss": 0.8548,
"step": 2900
},
{
"epoch": 4.79,
"learning_rate": 0.00015903010033444813,
"loss": 0.8515,
"step": 2910
},
{
"epoch": 4.8,
"learning_rate": 0.00015852842809364548,
"loss": 0.8562,
"step": 2920
},
{
"epoch": 4.82,
"learning_rate": 0.0001580267558528428,
"loss": 0.8623,
"step": 2930
},
{
"epoch": 4.84,
"learning_rate": 0.0001575250836120401,
"loss": 0.855,
"step": 2940
},
{
"epoch": 4.85,
"learning_rate": 0.00015702341137123745,
"loss": 0.876,
"step": 2950
},
{
"epoch": 4.87,
"learning_rate": 0.00015652173913043477,
"loss": 0.8582,
"step": 2960
},
{
"epoch": 4.88,
"learning_rate": 0.00015602006688963209,
"loss": 0.8488,
"step": 2970
},
{
"epoch": 4.9,
"learning_rate": 0.0001555183946488294,
"loss": 0.8466,
"step": 2980
},
{
"epoch": 4.92,
"learning_rate": 0.00015501672240802674,
"loss": 0.8407,
"step": 2990
},
{
"epoch": 4.93,
"learning_rate": 0.00015451505016722406,
"loss": 0.8732,
"step": 3000
},
{
"epoch": 4.93,
"eval_loss": 0.9256382584571838,
"eval_runtime": 33.1403,
"eval_samples_per_second": 60.349,
"eval_steps_per_second": 1.267,
"step": 3000
},
{
"epoch": 4.95,
"learning_rate": 0.00015401337792642138,
"loss": 0.8422,
"step": 3010
},
{
"epoch": 4.97,
"learning_rate": 0.00015351170568561872,
"loss": 0.8528,
"step": 3020
},
{
"epoch": 4.98,
"learning_rate": 0.00015301003344481604,
"loss": 0.854,
"step": 3030
},
{
"epoch": 5.0,
"learning_rate": 0.00015250836120401335,
"loss": 0.8598,
"step": 3040
},
{
"epoch": 5.02,
"learning_rate": 0.0001520066889632107,
"loss": 0.843,
"step": 3050
},
{
"epoch": 5.03,
"learning_rate": 0.000151505016722408,
"loss": 0.8394,
"step": 3060
},
{
"epoch": 5.05,
"learning_rate": 0.00015100334448160533,
"loss": 0.8593,
"step": 3070
},
{
"epoch": 5.07,
"learning_rate": 0.00015050167224080267,
"loss": 0.8566,
"step": 3080
},
{
"epoch": 5.08,
"learning_rate": 0.00015,
"loss": 0.8469,
"step": 3090
},
{
"epoch": 5.1,
"learning_rate": 0.0001494983277591973,
"loss": 0.8421,
"step": 3100
},
{
"epoch": 5.12,
"learning_rate": 0.00014899665551839465,
"loss": 0.8425,
"step": 3110
},
{
"epoch": 5.13,
"learning_rate": 0.00014849498327759196,
"loss": 0.8341,
"step": 3120
},
{
"epoch": 5.15,
"learning_rate": 0.00014804347826086954,
"loss": 0.8527,
"step": 3130
},
{
"epoch": 5.16,
"learning_rate": 0.00014754180602006688,
"loss": 0.8517,
"step": 3140
},
{
"epoch": 5.18,
"learning_rate": 0.0001470401337792642,
"loss": 0.8389,
"step": 3150
},
{
"epoch": 5.2,
"learning_rate": 0.00014653846153846151,
"loss": 0.8311,
"step": 3160
},
{
"epoch": 5.21,
"learning_rate": 0.00014603678929765886,
"loss": 0.85,
"step": 3170
},
{
"epoch": 5.23,
"learning_rate": 0.00014553511705685617,
"loss": 0.8469,
"step": 3180
},
{
"epoch": 5.25,
"learning_rate": 0.0001450334448160535,
"loss": 0.8569,
"step": 3190
},
{
"epoch": 5.26,
"learning_rate": 0.00014453177257525083,
"loss": 0.853,
"step": 3200
},
{
"epoch": 5.26,
"eval_loss": 0.9253825545310974,
"eval_runtime": 33.1231,
"eval_samples_per_second": 60.381,
"eval_steps_per_second": 1.268,
"step": 3200
},
{
"epoch": 5.28,
"learning_rate": 0.00014403010033444815,
"loss": 0.8445,
"step": 3210
},
{
"epoch": 5.3,
"learning_rate": 0.00014352842809364547,
"loss": 0.8462,
"step": 3220
},
{
"epoch": 5.31,
"learning_rate": 0.0001430267558528428,
"loss": 0.8638,
"step": 3230
},
{
"epoch": 5.33,
"learning_rate": 0.00014252508361204012,
"loss": 0.8405,
"step": 3240
},
{
"epoch": 5.35,
"learning_rate": 0.00014202341137123744,
"loss": 0.8329,
"step": 3250
},
{
"epoch": 5.36,
"learning_rate": 0.00014152173913043476,
"loss": 0.8502,
"step": 3260
},
{
"epoch": 5.38,
"learning_rate": 0.0001410200668896321,
"loss": 0.8398,
"step": 3270
},
{
"epoch": 5.39,
"learning_rate": 0.00014051839464882942,
"loss": 0.8667,
"step": 3280
},
{
"epoch": 5.41,
"learning_rate": 0.00014001672240802673,
"loss": 0.8393,
"step": 3290
},
{
"epoch": 5.43,
"learning_rate": 0.00013951505016722408,
"loss": 0.8493,
"step": 3300
},
{
"epoch": 5.44,
"learning_rate": 0.0001390133779264214,
"loss": 0.8369,
"step": 3310
},
{
"epoch": 5.46,
"learning_rate": 0.0001385117056856187,
"loss": 0.8305,
"step": 3320
},
{
"epoch": 5.48,
"learning_rate": 0.00013801003344481605,
"loss": 0.8425,
"step": 3330
},
{
"epoch": 5.49,
"learning_rate": 0.00013750836120401337,
"loss": 0.8392,
"step": 3340
},
{
"epoch": 5.51,
"learning_rate": 0.00013700668896321068,
"loss": 0.8354,
"step": 3350
},
{
"epoch": 5.53,
"learning_rate": 0.00013650501672240803,
"loss": 0.8296,
"step": 3360
},
{
"epoch": 5.54,
"learning_rate": 0.00013600334448160534,
"loss": 0.8442,
"step": 3370
},
{
"epoch": 5.56,
"learning_rate": 0.00013550167224080266,
"loss": 0.84,
"step": 3380
},
{
"epoch": 5.58,
"learning_rate": 0.000135,
"loss": 0.8538,
"step": 3390
},
{
"epoch": 5.59,
"learning_rate": 0.00013449832775919732,
"loss": 0.8356,
"step": 3400
},
{
"epoch": 5.59,
"eval_loss": 0.9222854971885681,
"eval_runtime": 33.1332,
"eval_samples_per_second": 60.362,
"eval_steps_per_second": 1.268,
"step": 3400
},
{
"epoch": 5.61,
"learning_rate": 0.00013399665551839463,
"loss": 0.8417,
"step": 3410
},
{
"epoch": 5.62,
"learning_rate": 0.00013349498327759198,
"loss": 0.8287,
"step": 3420
},
{
"epoch": 5.64,
"learning_rate": 0.0001329933110367893,
"loss": 0.8429,
"step": 3430
},
{
"epoch": 5.66,
"learning_rate": 0.0001324916387959866,
"loss": 0.8453,
"step": 3440
},
{
"epoch": 5.67,
"learning_rate": 0.00013198996655518395,
"loss": 0.8436,
"step": 3450
},
{
"epoch": 5.69,
"learning_rate": 0.00013148829431438127,
"loss": 0.8357,
"step": 3460
},
{
"epoch": 5.71,
"learning_rate": 0.00013098662207357858,
"loss": 0.8334,
"step": 3470
},
{
"epoch": 5.72,
"learning_rate": 0.00013048494983277593,
"loss": 0.8401,
"step": 3480
},
{
"epoch": 5.74,
"learning_rate": 0.00012998327759197324,
"loss": 0.8513,
"step": 3490
},
{
"epoch": 5.76,
"learning_rate": 0.00012948160535117056,
"loss": 0.8369,
"step": 3500
},
{
"epoch": 5.77,
"learning_rate": 0.00012897993311036788,
"loss": 0.8437,
"step": 3510
},
{
"epoch": 5.79,
"learning_rate": 0.00012847826086956522,
"loss": 0.8422,
"step": 3520
},
{
"epoch": 5.81,
"learning_rate": 0.00012797658862876254,
"loss": 0.8437,
"step": 3530
},
{
"epoch": 5.82,
"learning_rate": 0.00012747491638795985,
"loss": 0.8277,
"step": 3540
},
{
"epoch": 5.84,
"learning_rate": 0.00012697324414715717,
"loss": 0.8317,
"step": 3550
},
{
"epoch": 5.86,
"learning_rate": 0.00012647157190635448,
"loss": 0.8322,
"step": 3560
},
{
"epoch": 5.87,
"learning_rate": 0.00012596989966555183,
"loss": 0.8221,
"step": 3570
},
{
"epoch": 5.89,
"learning_rate": 0.00012546822742474914,
"loss": 0.8295,
"step": 3580
},
{
"epoch": 5.9,
"learning_rate": 0.00012496655518394646,
"loss": 0.8411,
"step": 3590
},
{
"epoch": 5.92,
"learning_rate": 0.0001244648829431438,
"loss": 0.8279,
"step": 3600
},
{
"epoch": 5.92,
"eval_loss": 0.9197421669960022,
"eval_runtime": 33.1031,
"eval_samples_per_second": 60.417,
"eval_steps_per_second": 1.269,
"step": 3600
},
{
"epoch": 5.94,
"learning_rate": 0.00012396321070234112,
"loss": 0.8296,
"step": 3610
},
{
"epoch": 5.95,
"learning_rate": 0.00012346153846153844,
"loss": 0.8278,
"step": 3620
},
{
"epoch": 5.97,
"learning_rate": 0.00012295986622073578,
"loss": 0.8408,
"step": 3630
},
{
"epoch": 5.99,
"learning_rate": 0.0001224581939799331,
"loss": 0.8232,
"step": 3640
},
{
"epoch": 6.0,
"learning_rate": 0.00012195652173913042,
"loss": 0.826,
"step": 3650
},
{
"epoch": 6.02,
"learning_rate": 0.00012145484949832774,
"loss": 0.811,
"step": 3660
},
{
"epoch": 6.04,
"learning_rate": 0.00012095317725752507,
"loss": 0.85,
"step": 3670
},
{
"epoch": 6.05,
"learning_rate": 0.0001204515050167224,
"loss": 0.8395,
"step": 3680
},
{
"epoch": 6.07,
"learning_rate": 0.00011994983277591972,
"loss": 0.8179,
"step": 3690
},
{
"epoch": 6.09,
"learning_rate": 0.00011944816053511705,
"loss": 0.8279,
"step": 3700
},
{
"epoch": 6.1,
"learning_rate": 0.00011894648829431436,
"loss": 0.8227,
"step": 3710
},
{
"epoch": 6.12,
"learning_rate": 0.00011844481605351169,
"loss": 0.8248,
"step": 3720
},
{
"epoch": 6.13,
"learning_rate": 0.00011794314381270902,
"loss": 0.8332,
"step": 3730
},
{
"epoch": 6.15,
"learning_rate": 0.00011744147157190634,
"loss": 0.8204,
"step": 3740
},
{
"epoch": 6.17,
"learning_rate": 0.00011693979933110367,
"loss": 0.8336,
"step": 3750
},
{
"epoch": 6.18,
"learning_rate": 0.000116438127090301,
"loss": 0.813,
"step": 3760
},
{
"epoch": 6.2,
"learning_rate": 0.00011593645484949831,
"loss": 0.8211,
"step": 3770
},
{
"epoch": 6.22,
"learning_rate": 0.00011543478260869564,
"loss": 0.8193,
"step": 3780
},
{
"epoch": 6.23,
"learning_rate": 0.00011493311036789297,
"loss": 0.8425,
"step": 3790
},
{
"epoch": 6.25,
"learning_rate": 0.00011443143812709029,
"loss": 0.8209,
"step": 3800
},
{
"epoch": 6.25,
"eval_loss": 0.9195341467857361,
"eval_runtime": 33.159,
"eval_samples_per_second": 60.315,
"eval_steps_per_second": 1.267,
"step": 3800
},
{
"epoch": 6.27,
"learning_rate": 0.00011392976588628762,
"loss": 0.8276,
"step": 3810
},
{
"epoch": 6.28,
"learning_rate": 0.00011342809364548495,
"loss": 0.845,
"step": 3820
},
{
"epoch": 6.3,
"learning_rate": 0.00011292642140468226,
"loss": 0.83,
"step": 3830
},
{
"epoch": 6.32,
"learning_rate": 0.00011242474916387959,
"loss": 0.8226,
"step": 3840
},
{
"epoch": 6.33,
"learning_rate": 0.00011197324414715718,
"loss": 0.8329,
"step": 3850
},
{
"epoch": 6.35,
"learning_rate": 0.0001114715719063545,
"loss": 0.8384,
"step": 3860
},
{
"epoch": 6.37,
"learning_rate": 0.00011096989966555183,
"loss": 0.8186,
"step": 3870
},
{
"epoch": 6.38,
"learning_rate": 0.00011046822742474914,
"loss": 0.8027,
"step": 3880
},
{
"epoch": 6.4,
"learning_rate": 0.00010996655518394647,
"loss": 0.8214,
"step": 3890
},
{
"epoch": 6.41,
"learning_rate": 0.0001094648829431438,
"loss": 0.8066,
"step": 3900
},
{
"epoch": 6.43,
"learning_rate": 0.00010896321070234112,
"loss": 0.8303,
"step": 3910
},
{
"epoch": 6.45,
"learning_rate": 0.00010846153846153845,
"loss": 0.8342,
"step": 3920
},
{
"epoch": 6.46,
"learning_rate": 0.00010795986622073578,
"loss": 0.8304,
"step": 3930
},
{
"epoch": 6.48,
"learning_rate": 0.0001074581939799331,
"loss": 0.8279,
"step": 3940
},
{
"epoch": 6.5,
"learning_rate": 0.00010695652173913043,
"loss": 0.8104,
"step": 3950
},
{
"epoch": 6.51,
"learning_rate": 0.00010645484949832776,
"loss": 0.8372,
"step": 3960
},
{
"epoch": 6.53,
"learning_rate": 0.00010595317725752507,
"loss": 0.8239,
"step": 3970
},
{
"epoch": 6.55,
"learning_rate": 0.0001054515050167224,
"loss": 0.8404,
"step": 3980
},
{
"epoch": 6.56,
"learning_rate": 0.00010494983277591972,
"loss": 0.8206,
"step": 3990
},
{
"epoch": 6.58,
"learning_rate": 0.00010444816053511705,
"loss": 0.8174,
"step": 4000
},
{
"epoch": 6.58,
"eval_loss": 0.9175052642822266,
"eval_runtime": 33.1942,
"eval_samples_per_second": 60.251,
"eval_steps_per_second": 1.265,
"step": 4000
},
{
"epoch": 6.6,
"learning_rate": 0.00010394648829431438,
"loss": 0.8335,
"step": 4010
},
{
"epoch": 6.61,
"learning_rate": 0.00010344481605351169,
"loss": 0.84,
"step": 4020
},
{
"epoch": 6.63,
"learning_rate": 0.00010294314381270902,
"loss": 0.815,
"step": 4030
},
{
"epoch": 6.64,
"learning_rate": 0.00010244147157190635,
"loss": 0.8112,
"step": 4040
},
{
"epoch": 6.66,
"learning_rate": 0.00010193979933110367,
"loss": 0.8302,
"step": 4050
},
{
"epoch": 6.68,
"learning_rate": 0.000101438127090301,
"loss": 0.8284,
"step": 4060
},
{
"epoch": 6.69,
"learning_rate": 0.00010093645484949833,
"loss": 0.823,
"step": 4070
},
{
"epoch": 6.71,
"learning_rate": 0.00010043478260869564,
"loss": 0.8192,
"step": 4080
},
{
"epoch": 6.73,
"learning_rate": 9.993311036789297e-05,
"loss": 0.8285,
"step": 4090
},
{
"epoch": 6.74,
"learning_rate": 9.94314381270903e-05,
"loss": 0.8222,
"step": 4100
},
{
"epoch": 6.76,
"learning_rate": 9.892976588628762e-05,
"loss": 0.8273,
"step": 4110
},
{
"epoch": 6.78,
"learning_rate": 9.842809364548495e-05,
"loss": 0.8145,
"step": 4120
},
{
"epoch": 6.79,
"learning_rate": 9.792642140468226e-05,
"loss": 0.8279,
"step": 4130
},
{
"epoch": 6.81,
"learning_rate": 9.74247491638796e-05,
"loss": 0.8193,
"step": 4140
},
{
"epoch": 6.83,
"learning_rate": 9.692307692307692e-05,
"loss": 0.8243,
"step": 4150
},
{
"epoch": 6.84,
"learning_rate": 9.642140468227424e-05,
"loss": 0.825,
"step": 4160
},
{
"epoch": 6.86,
"learning_rate": 9.591973244147157e-05,
"loss": 0.8185,
"step": 4170
},
{
"epoch": 6.88,
"learning_rate": 9.54180602006689e-05,
"loss": 0.8358,
"step": 4180
},
{
"epoch": 6.89,
"learning_rate": 9.491638795986622e-05,
"loss": 0.8164,
"step": 4190
},
{
"epoch": 6.91,
"learning_rate": 9.441471571906355e-05,
"loss": 0.833,
"step": 4200
},
{
"epoch": 6.91,
"eval_loss": 0.9155363440513611,
"eval_runtime": 33.1004,
"eval_samples_per_second": 60.422,
"eval_steps_per_second": 1.269,
"step": 4200
},
{
"epoch": 6.92,
"learning_rate": 9.391304347826087e-05,
"loss": 0.8186,
"step": 4210
},
{
"epoch": 6.94,
"learning_rate": 9.341137123745819e-05,
"loss": 0.8211,
"step": 4220
},
{
"epoch": 6.96,
"learning_rate": 9.290969899665552e-05,
"loss": 0.8216,
"step": 4230
},
{
"epoch": 6.97,
"learning_rate": 9.240802675585282e-05,
"loss": 0.7982,
"step": 4240
},
{
"epoch": 6.99,
"learning_rate": 9.190635451505015e-05,
"loss": 0.8025,
"step": 4250
},
{
"epoch": 7.01,
"learning_rate": 9.140468227424748e-05,
"loss": 0.814,
"step": 4260
},
{
"epoch": 7.02,
"learning_rate": 9.09030100334448e-05,
"loss": 0.8048,
"step": 4270
},
{
"epoch": 7.04,
"learning_rate": 9.040133779264213e-05,
"loss": 0.8279,
"step": 4280
},
{
"epoch": 7.06,
"learning_rate": 8.989966555183944e-05,
"loss": 0.8208,
"step": 4290
},
{
"epoch": 7.07,
"learning_rate": 8.939799331103677e-05,
"loss": 0.8009,
"step": 4300
},
{
"epoch": 7.09,
"learning_rate": 8.88963210702341e-05,
"loss": 0.8151,
"step": 4310
},
{
"epoch": 7.11,
"learning_rate": 8.839464882943142e-05,
"loss": 0.8066,
"step": 4320
},
{
"epoch": 7.12,
"learning_rate": 8.789297658862875e-05,
"loss": 0.8239,
"step": 4330
},
{
"epoch": 7.14,
"learning_rate": 8.739130434782608e-05,
"loss": 0.8055,
"step": 4340
},
{
"epoch": 7.15,
"learning_rate": 8.68896321070234e-05,
"loss": 0.816,
"step": 4350
},
{
"epoch": 7.17,
"learning_rate": 8.638795986622073e-05,
"loss": 0.815,
"step": 4360
},
{
"epoch": 7.19,
"learning_rate": 8.588628762541805e-05,
"loss": 0.7989,
"step": 4370
},
{
"epoch": 7.2,
"learning_rate": 8.538461538461537e-05,
"loss": 0.7924,
"step": 4380
},
{
"epoch": 7.22,
"learning_rate": 8.48829431438127e-05,
"loss": 0.8151,
"step": 4390
},
{
"epoch": 7.24,
"learning_rate": 8.438127090301002e-05,
"loss": 0.82,
"step": 4400
},
{
"epoch": 7.24,
"eval_loss": 0.9152849316596985,
"eval_runtime": 33.2483,
"eval_samples_per_second": 60.153,
"eval_steps_per_second": 1.263,
"step": 4400
},
{
"epoch": 7.25,
"learning_rate": 8.387959866220735e-05,
"loss": 0.8091,
"step": 4410
},
{
"epoch": 7.27,
"learning_rate": 8.337792642140468e-05,
"loss": 0.8195,
"step": 4420
},
{
"epoch": 7.29,
"learning_rate": 8.287625418060199e-05,
"loss": 0.7982,
"step": 4430
},
{
"epoch": 7.3,
"learning_rate": 8.237458193979932e-05,
"loss": 0.8106,
"step": 4440
},
{
"epoch": 7.32,
"learning_rate": 8.187290969899665e-05,
"loss": 0.811,
"step": 4450
},
{
"epoch": 7.34,
"learning_rate": 8.137123745819397e-05,
"loss": 0.8213,
"step": 4460
},
{
"epoch": 7.35,
"learning_rate": 8.08695652173913e-05,
"loss": 0.8143,
"step": 4470
},
{
"epoch": 7.37,
"learning_rate": 8.036789297658863e-05,
"loss": 0.8151,
"step": 4480
},
{
"epoch": 7.38,
"learning_rate": 7.986622073578594e-05,
"loss": 0.8208,
"step": 4490
},
{
"epoch": 7.4,
"learning_rate": 7.936454849498327e-05,
"loss": 0.8198,
"step": 4500
},
{
"epoch": 7.42,
"learning_rate": 7.88628762541806e-05,
"loss": 0.8219,
"step": 4510
},
{
"epoch": 7.43,
"learning_rate": 7.836120401337792e-05,
"loss": 0.7986,
"step": 4520
},
{
"epoch": 7.45,
"learning_rate": 7.785953177257525e-05,
"loss": 0.8054,
"step": 4530
},
{
"epoch": 7.47,
"learning_rate": 7.735785953177256e-05,
"loss": 0.8138,
"step": 4540
},
{
"epoch": 7.48,
"learning_rate": 7.68561872909699e-05,
"loss": 0.8009,
"step": 4550
},
{
"epoch": 7.5,
"learning_rate": 7.635451505016722e-05,
"loss": 0.8142,
"step": 4560
},
{
"epoch": 7.52,
"learning_rate": 7.585284280936454e-05,
"loss": 0.8348,
"step": 4570
},
{
"epoch": 7.53,
"learning_rate": 7.535117056856187e-05,
"loss": 0.8042,
"step": 4580
},
{
"epoch": 7.55,
"learning_rate": 7.48494983277592e-05,
"loss": 0.819,
"step": 4590
},
{
"epoch": 7.57,
"learning_rate": 7.434782608695652e-05,
"loss": 0.8027,
"step": 4600
},
{
"epoch": 7.57,
"eval_loss": 0.914962887763977,
"eval_runtime": 33.1103,
"eval_samples_per_second": 60.404,
"eval_steps_per_second": 1.268,
"step": 4600
},
{
"epoch": 7.58,
"learning_rate": 7.384615384615384e-05,
"loss": 0.7976,
"step": 4610
},
{
"epoch": 7.6,
"learning_rate": 7.334448160535116e-05,
"loss": 0.8168,
"step": 4620
},
{
"epoch": 7.62,
"learning_rate": 7.284280936454849e-05,
"loss": 0.7952,
"step": 4630
},
{
"epoch": 7.63,
"learning_rate": 7.234113712374581e-05,
"loss": 0.8238,
"step": 4640
},
{
"epoch": 7.65,
"learning_rate": 7.183946488294314e-05,
"loss": 0.8196,
"step": 4650
},
{
"epoch": 7.66,
"learning_rate": 7.133779264214047e-05,
"loss": 0.8032,
"step": 4660
},
{
"epoch": 7.68,
"learning_rate": 7.083612040133778e-05,
"loss": 0.8067,
"step": 4670
},
{
"epoch": 7.7,
"learning_rate": 7.033444816053511e-05,
"loss": 0.8167,
"step": 4680
},
{
"epoch": 7.71,
"learning_rate": 6.983277591973243e-05,
"loss": 0.7856,
"step": 4690
},
{
"epoch": 7.73,
"learning_rate": 6.933110367892976e-05,
"loss": 0.8234,
"step": 4700
},
{
"epoch": 7.75,
"learning_rate": 6.882943143812709e-05,
"loss": 0.805,
"step": 4710
},
{
"epoch": 7.76,
"learning_rate": 6.83277591973244e-05,
"loss": 0.8065,
"step": 4720
},
{
"epoch": 7.78,
"learning_rate": 6.782608695652173e-05,
"loss": 0.8084,
"step": 4730
},
{
"epoch": 7.8,
"learning_rate": 6.732441471571906e-05,
"loss": 0.8105,
"step": 4740
},
{
"epoch": 7.81,
"learning_rate": 6.682274247491638e-05,
"loss": 0.8065,
"step": 4750
},
{
"epoch": 7.83,
"learning_rate": 6.632107023411371e-05,
"loss": 0.8007,
"step": 4760
},
{
"epoch": 7.85,
"learning_rate": 6.581939799331104e-05,
"loss": 0.8238,
"step": 4770
},
{
"epoch": 7.86,
"learning_rate": 6.531772575250835e-05,
"loss": 0.8083,
"step": 4780
},
{
"epoch": 7.88,
"learning_rate": 6.481605351170568e-05,
"loss": 0.81,
"step": 4790
},
{
"epoch": 7.89,
"learning_rate": 6.4314381270903e-05,
"loss": 0.7963,
"step": 4800
},
{
"epoch": 7.89,
"eval_loss": 0.9136397838592529,
"eval_runtime": 33.1953,
"eval_samples_per_second": 60.249,
"eval_steps_per_second": 1.265,
"step": 4800
},
{
"epoch": 7.91,
"learning_rate": 6.381270903010033e-05,
"loss": 0.7906,
"step": 4810
},
{
"epoch": 7.93,
"learning_rate": 6.331103678929766e-05,
"loss": 0.8132,
"step": 4820
},
{
"epoch": 7.94,
"learning_rate": 6.280936454849498e-05,
"loss": 0.8175,
"step": 4830
},
{
"epoch": 7.96,
"learning_rate": 6.23076923076923e-05,
"loss": 0.801,
"step": 4840
},
{
"epoch": 7.98,
"learning_rate": 6.180602006688964e-05,
"loss": 0.8117,
"step": 4850
},
{
"epoch": 7.99,
"learning_rate": 6.130434782608695e-05,
"loss": 0.8019,
"step": 4860
},
{
"epoch": 8.01,
"learning_rate": 6.0802675585284274e-05,
"loss": 0.802,
"step": 4870
},
{
"epoch": 8.03,
"learning_rate": 6.03010033444816e-05,
"loss": 0.821,
"step": 4880
},
{
"epoch": 8.04,
"learning_rate": 5.979933110367892e-05,
"loss": 0.7962,
"step": 4890
},
{
"epoch": 8.06,
"learning_rate": 5.929765886287625e-05,
"loss": 0.7992,
"step": 4900
},
{
"epoch": 8.08,
"learning_rate": 5.879598662207357e-05,
"loss": 0.7998,
"step": 4910
},
{
"epoch": 8.09,
"learning_rate": 5.8294314381270895e-05,
"loss": 0.7934,
"step": 4920
},
{
"epoch": 8.11,
"learning_rate": 5.779264214046822e-05,
"loss": 0.801,
"step": 4930
},
{
"epoch": 8.12,
"learning_rate": 5.729096989966555e-05,
"loss": 0.8039,
"step": 4940
},
{
"epoch": 8.14,
"learning_rate": 5.678929765886287e-05,
"loss": 0.802,
"step": 4950
},
{
"epoch": 8.16,
"learning_rate": 5.6287625418060194e-05,
"loss": 0.781,
"step": 4960
},
{
"epoch": 8.17,
"learning_rate": 5.5785953177257517e-05,
"loss": 0.7938,
"step": 4970
},
{
"epoch": 8.19,
"learning_rate": 5.5284280936454846e-05,
"loss": 0.8031,
"step": 4980
},
{
"epoch": 8.21,
"learning_rate": 5.478260869565217e-05,
"loss": 0.7999,
"step": 4990
},
{
"epoch": 8.22,
"learning_rate": 5.428093645484949e-05,
"loss": 0.7985,
"step": 5000
},
{
"epoch": 8.22,
"eval_loss": 0.9126840829849243,
"eval_runtime": 33.2306,
"eval_samples_per_second": 60.185,
"eval_steps_per_second": 1.264,
"step": 5000
},
{
"epoch": 8.24,
"learning_rate": 5.377926421404682e-05,
"loss": 0.8056,
"step": 5010
},
{
"epoch": 8.26,
"learning_rate": 5.3277591973244145e-05,
"loss": 0.8099,
"step": 5020
},
{
"epoch": 8.27,
"learning_rate": 5.277591973244147e-05,
"loss": 0.7982,
"step": 5030
},
{
"epoch": 8.29,
"learning_rate": 5.227424749163879e-05,
"loss": 0.8021,
"step": 5040
},
{
"epoch": 8.31,
"learning_rate": 5.177257525083612e-05,
"loss": 0.8044,
"step": 5050
},
{
"epoch": 8.32,
"learning_rate": 5.127090301003344e-05,
"loss": 0.8099,
"step": 5060
},
{
"epoch": 8.34,
"learning_rate": 5.0769230769230766e-05,
"loss": 0.818,
"step": 5070
},
{
"epoch": 8.36,
"learning_rate": 5.0267558528428095e-05,
"loss": 0.796,
"step": 5080
},
{
"epoch": 8.37,
"learning_rate": 4.976588628762542e-05,
"loss": 0.8102,
"step": 5090
},
{
"epoch": 8.39,
"learning_rate": 4.926421404682274e-05,
"loss": 0.8092,
"step": 5100
},
{
"epoch": 8.4,
"learning_rate": 4.8762541806020064e-05,
"loss": 0.8064,
"step": 5110
},
{
"epoch": 8.42,
"learning_rate": 4.826086956521738e-05,
"loss": 0.8171,
"step": 5120
},
{
"epoch": 8.44,
"learning_rate": 4.775919732441471e-05,
"loss": 0.8077,
"step": 5130
},
{
"epoch": 8.45,
"learning_rate": 4.725752508361203e-05,
"loss": 0.8084,
"step": 5140
},
{
"epoch": 8.47,
"learning_rate": 4.6755852842809356e-05,
"loss": 0.8081,
"step": 5150
},
{
"epoch": 8.49,
"learning_rate": 4.6254180602006685e-05,
"loss": 0.7926,
"step": 5160
},
{
"epoch": 8.5,
"learning_rate": 4.575250836120401e-05,
"loss": 0.7938,
"step": 5170
},
{
"epoch": 8.52,
"learning_rate": 4.525083612040133e-05,
"loss": 0.8196,
"step": 5180
},
{
"epoch": 8.54,
"learning_rate": 4.4749163879598654e-05,
"loss": 0.8059,
"step": 5190
},
{
"epoch": 8.55,
"learning_rate": 4.4247491638795984e-05,
"loss": 0.7997,
"step": 5200
},
{
"epoch": 8.55,
"eval_loss": 0.9122925996780396,
"eval_runtime": 33.1494,
"eval_samples_per_second": 60.333,
"eval_steps_per_second": 1.267,
"step": 5200
},
{
"epoch": 8.57,
"learning_rate": 4.374581939799331e-05,
"loss": 0.79,
"step": 5210
},
{
"epoch": 8.59,
"learning_rate": 4.324414715719063e-05,
"loss": 0.8103,
"step": 5220
},
{
"epoch": 8.6,
"learning_rate": 4.274247491638796e-05,
"loss": 0.7972,
"step": 5230
},
{
"epoch": 8.62,
"learning_rate": 4.224080267558528e-05,
"loss": 0.8117,
"step": 5240
},
{
"epoch": 8.63,
"learning_rate": 4.1739130434782605e-05,
"loss": 0.8058,
"step": 5250
},
{
"epoch": 8.65,
"learning_rate": 4.123745819397993e-05,
"loss": 0.8043,
"step": 5260
},
{
"epoch": 8.67,
"learning_rate": 4.073578595317726e-05,
"loss": 0.7986,
"step": 5270
},
{
"epoch": 8.68,
"learning_rate": 4.023411371237458e-05,
"loss": 0.7967,
"step": 5280
},
{
"epoch": 8.7,
"learning_rate": 3.9732441471571903e-05,
"loss": 0.8072,
"step": 5290
},
{
"epoch": 8.72,
"learning_rate": 3.9230769230769226e-05,
"loss": 0.8061,
"step": 5300
},
{
"epoch": 8.73,
"learning_rate": 3.8729096989966556e-05,
"loss": 0.8101,
"step": 5310
},
{
"epoch": 8.75,
"learning_rate": 3.822742474916388e-05,
"loss": 0.7987,
"step": 5320
},
{
"epoch": 8.77,
"learning_rate": 3.77257525083612e-05,
"loss": 0.7971,
"step": 5330
},
{
"epoch": 8.78,
"learning_rate": 3.7224080267558525e-05,
"loss": 0.7826,
"step": 5340
},
{
"epoch": 8.8,
"learning_rate": 3.672240802675585e-05,
"loss": 0.7956,
"step": 5350
},
{
"epoch": 8.82,
"learning_rate": 3.622073578595318e-05,
"loss": 0.8029,
"step": 5360
},
{
"epoch": 8.83,
"learning_rate": 3.57190635451505e-05,
"loss": 0.8041,
"step": 5370
},
{
"epoch": 8.85,
"learning_rate": 3.521739130434782e-05,
"loss": 0.7844,
"step": 5380
},
{
"epoch": 8.87,
"learning_rate": 3.4715719063545146e-05,
"loss": 0.7914,
"step": 5390
},
{
"epoch": 8.88,
"learning_rate": 3.4214046822742476e-05,
"loss": 0.8197,
"step": 5400
},
{
"epoch": 8.88,
"eval_loss": 0.9114879369735718,
"eval_runtime": 33.1591,
"eval_samples_per_second": 60.315,
"eval_steps_per_second": 1.267,
"step": 5400
},
{
"epoch": 8.9,
"learning_rate": 3.37123745819398e-05,
"loss": 0.8012,
"step": 5410
},
{
"epoch": 8.91,
"learning_rate": 3.321070234113712e-05,
"loss": 0.8069,
"step": 5420
},
{
"epoch": 8.93,
"learning_rate": 3.2709030100334444e-05,
"loss": 0.7984,
"step": 5430
},
{
"epoch": 8.95,
"learning_rate": 3.220735785953177e-05,
"loss": 0.7877,
"step": 5440
},
{
"epoch": 8.96,
"learning_rate": 3.170568561872909e-05,
"loss": 0.7964,
"step": 5450
},
{
"epoch": 8.98,
"learning_rate": 3.120401337792642e-05,
"loss": 0.8064,
"step": 5460
},
{
"epoch": 9.0,
"learning_rate": 3.070234113712374e-05,
"loss": 0.8088,
"step": 5470
},
{
"epoch": 9.01,
"learning_rate": 3.020066889632107e-05,
"loss": 0.7983,
"step": 5480
},
{
"epoch": 9.03,
"learning_rate": 2.9698996655518392e-05,
"loss": 0.7924,
"step": 5490
},
{
"epoch": 9.05,
"learning_rate": 2.9197324414715718e-05,
"loss": 0.7847,
"step": 5500
},
{
"epoch": 9.06,
"learning_rate": 2.869565217391304e-05,
"loss": 0.8061,
"step": 5510
},
{
"epoch": 9.08,
"learning_rate": 2.8193979933110367e-05,
"loss": 0.8038,
"step": 5520
},
{
"epoch": 9.1,
"learning_rate": 2.769230769230769e-05,
"loss": 0.7913,
"step": 5530
},
{
"epoch": 9.11,
"learning_rate": 2.7190635451505016e-05,
"loss": 0.7976,
"step": 5540
},
{
"epoch": 9.13,
"learning_rate": 2.6688963210702336e-05,
"loss": 0.7996,
"step": 5550
},
{
"epoch": 9.14,
"learning_rate": 2.6187290969899662e-05,
"loss": 0.7963,
"step": 5560
},
{
"epoch": 9.16,
"learning_rate": 2.5685618729096985e-05,
"loss": 0.7987,
"step": 5570
},
{
"epoch": 9.18,
"learning_rate": 2.518394648829431e-05,
"loss": 0.7858,
"step": 5580
},
{
"epoch": 9.19,
"learning_rate": 2.4682274247491638e-05,
"loss": 0.8028,
"step": 5590
},
{
"epoch": 9.21,
"learning_rate": 2.418060200668896e-05,
"loss": 0.7899,
"step": 5600
},
{
"epoch": 9.21,
"eval_loss": 0.9114179611206055,
"eval_runtime": 33.1597,
"eval_samples_per_second": 60.314,
"eval_steps_per_second": 1.267,
"step": 5600
},
{
"epoch": 9.23,
"learning_rate": 2.3678929765886287e-05,
"loss": 0.7896,
"step": 5610
},
{
"epoch": 9.24,
"learning_rate": 2.317725752508361e-05,
"loss": 0.7928,
"step": 5620
},
{
"epoch": 9.26,
"learning_rate": 2.2675585284280936e-05,
"loss": 0.8108,
"step": 5630
},
{
"epoch": 9.28,
"learning_rate": 2.217391304347826e-05,
"loss": 0.8011,
"step": 5640
},
{
"epoch": 9.29,
"learning_rate": 2.1672240802675585e-05,
"loss": 0.7965,
"step": 5650
},
{
"epoch": 9.31,
"learning_rate": 2.1170568561872908e-05,
"loss": 0.7967,
"step": 5660
},
{
"epoch": 9.33,
"learning_rate": 2.0668896321070234e-05,
"loss": 0.8006,
"step": 5670
},
{
"epoch": 9.34,
"learning_rate": 2.0167224080267554e-05,
"loss": 0.7938,
"step": 5680
},
{
"epoch": 9.36,
"learning_rate": 1.966555183946488e-05,
"loss": 0.8137,
"step": 5690
},
{
"epoch": 9.38,
"learning_rate": 1.9163879598662203e-05,
"loss": 0.7994,
"step": 5700
},
{
"epoch": 9.39,
"learning_rate": 1.866220735785953e-05,
"loss": 0.7994,
"step": 5710
},
{
"epoch": 9.41,
"learning_rate": 1.8160535117056856e-05,
"loss": 0.7926,
"step": 5720
},
{
"epoch": 9.42,
"learning_rate": 1.765886287625418e-05,
"loss": 0.7918,
"step": 5730
},
{
"epoch": 9.44,
"learning_rate": 1.7157190635451505e-05,
"loss": 0.7811,
"step": 5740
},
{
"epoch": 9.46,
"learning_rate": 1.6655518394648828e-05,
"loss": 0.8001,
"step": 5750
},
{
"epoch": 9.47,
"learning_rate": 1.6153846153846154e-05,
"loss": 0.8106,
"step": 5760
},
{
"epoch": 9.49,
"learning_rate": 1.5652173913043477e-05,
"loss": 0.7882,
"step": 5770
},
{
"epoch": 9.51,
"learning_rate": 1.5150501672240801e-05,
"loss": 0.7873,
"step": 5780
},
{
"epoch": 9.52,
"learning_rate": 1.4648829431438126e-05,
"loss": 0.7976,
"step": 5790
},
{
"epoch": 9.54,
"learning_rate": 1.414715719063545e-05,
"loss": 0.7951,
"step": 5800
},
{
"epoch": 9.54,
"eval_loss": 0.9110632538795471,
"eval_runtime": 33.1804,
"eval_samples_per_second": 60.276,
"eval_steps_per_second": 1.266,
"step": 5800
},
{
"epoch": 9.56,
"learning_rate": 1.3645484949832775e-05,
"loss": 0.7897,
"step": 5810
},
{
"epoch": 9.57,
"learning_rate": 1.31438127090301e-05,
"loss": 0.8037,
"step": 5820
},
{
"epoch": 9.59,
"learning_rate": 1.2642140468227423e-05,
"loss": 0.786,
"step": 5830
},
{
"epoch": 9.61,
"learning_rate": 1.2140468227424747e-05,
"loss": 0.8044,
"step": 5840
},
{
"epoch": 9.62,
"learning_rate": 1.1638795986622072e-05,
"loss": 0.804,
"step": 5850
},
{
"epoch": 9.64,
"learning_rate": 1.1137123745819396e-05,
"loss": 0.8,
"step": 5860
},
{
"epoch": 9.65,
"learning_rate": 1.0635451505016721e-05,
"loss": 0.7957,
"step": 5870
},
{
"epoch": 9.67,
"learning_rate": 1.0133779264214047e-05,
"loss": 0.7875,
"step": 5880
},
{
"epoch": 9.69,
"learning_rate": 9.632107023411369e-06,
"loss": 0.7969,
"step": 5890
},
{
"epoch": 9.7,
"learning_rate": 9.130434782608695e-06,
"loss": 0.805,
"step": 5900
},
{
"epoch": 9.72,
"learning_rate": 8.62876254180602e-06,
"loss": 0.7755,
"step": 5910
},
{
"epoch": 9.74,
"learning_rate": 8.127090301003344e-06,
"loss": 0.7957,
"step": 5920
},
{
"epoch": 9.75,
"learning_rate": 7.625418060200669e-06,
"loss": 0.8089,
"step": 5930
},
{
"epoch": 9.77,
"learning_rate": 7.123745819397992e-06,
"loss": 0.8008,
"step": 5940
},
{
"epoch": 9.79,
"learning_rate": 6.622073578595317e-06,
"loss": 0.7941,
"step": 5950
},
{
"epoch": 9.8,
"learning_rate": 6.1204013377926415e-06,
"loss": 0.7932,
"step": 5960
},
{
"epoch": 9.82,
"learning_rate": 5.618729096989965e-06,
"loss": 0.7993,
"step": 5970
},
{
"epoch": 9.84,
"learning_rate": 5.117056856187291e-06,
"loss": 0.7872,
"step": 5980
},
{
"epoch": 9.85,
"learning_rate": 4.615384615384615e-06,
"loss": 0.8088,
"step": 5990
},
{
"epoch": 9.87,
"learning_rate": 4.11371237458194e-06,
"loss": 0.7947,
"step": 6000
},
{
"epoch": 9.87,
"eval_loss": 0.9106292724609375,
"eval_runtime": 33.2219,
"eval_samples_per_second": 60.201,
"eval_steps_per_second": 1.264,
"step": 6000
}
],
"logging_steps": 10,
"max_steps": 6080,
"num_train_epochs": 10,
"save_steps": 200,
"total_flos": 1.3874338630022988e+19,
"trial_name": null,
"trial_params": null
}