winnieyangwannan's picture
Training in progress, step 1800, checkpoint
14bd8b8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.8907563025210083,
"eval_steps": 50,
"global_step": 1800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01050420168067227,
"grad_norm": 2.691126585006714,
"learning_rate": 4.98249299719888e-05,
"loss": 1.6756,
"step": 10
},
{
"epoch": 0.02100840336134454,
"grad_norm": 1.9124071598052979,
"learning_rate": 4.96498599439776e-05,
"loss": 0.5155,
"step": 20
},
{
"epoch": 0.031512605042016806,
"grad_norm": 1.1180588006973267,
"learning_rate": 4.947478991596639e-05,
"loss": 0.4521,
"step": 30
},
{
"epoch": 0.04201680672268908,
"grad_norm": 0.7634907364845276,
"learning_rate": 4.9299719887955186e-05,
"loss": 0.3732,
"step": 40
},
{
"epoch": 0.052521008403361345,
"grad_norm": 0.6673978567123413,
"learning_rate": 4.912464985994398e-05,
"loss": 0.3832,
"step": 50
},
{
"epoch": 0.052521008403361345,
"eval_loss": 0.424630731344223,
"eval_runtime": 13.4948,
"eval_samples_per_second": 35.569,
"eval_steps_per_second": 2.223,
"step": 50
},
{
"epoch": 0.06302521008403361,
"grad_norm": 0.6445392370223999,
"learning_rate": 4.8949579831932775e-05,
"loss": 0.3386,
"step": 60
},
{
"epoch": 0.07352941176470588,
"grad_norm": 0.5743226408958435,
"learning_rate": 4.877450980392157e-05,
"loss": 0.3443,
"step": 70
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.6735454201698303,
"learning_rate": 4.859943977591036e-05,
"loss": 0.3212,
"step": 80
},
{
"epoch": 0.09453781512605042,
"grad_norm": 0.660349428653717,
"learning_rate": 4.8424369747899164e-05,
"loss": 0.3379,
"step": 90
},
{
"epoch": 0.10504201680672269,
"grad_norm": 0.8036162853240967,
"learning_rate": 4.824929971988796e-05,
"loss": 0.3177,
"step": 100
},
{
"epoch": 0.10504201680672269,
"eval_loss": 0.3798685073852539,
"eval_runtime": 13.5369,
"eval_samples_per_second": 35.459,
"eval_steps_per_second": 2.216,
"step": 100
},
{
"epoch": 0.11554621848739496,
"grad_norm": 0.6827962398529053,
"learning_rate": 4.807422969187675e-05,
"loss": 0.3405,
"step": 110
},
{
"epoch": 0.12605042016806722,
"grad_norm": 0.5923526287078857,
"learning_rate": 4.7899159663865554e-05,
"loss": 0.3324,
"step": 120
},
{
"epoch": 0.13655462184873948,
"grad_norm": 0.6476097106933594,
"learning_rate": 4.772408963585435e-05,
"loss": 0.3216,
"step": 130
},
{
"epoch": 0.14705882352941177,
"grad_norm": 0.5536680221557617,
"learning_rate": 4.7549019607843135e-05,
"loss": 0.3425,
"step": 140
},
{
"epoch": 0.15756302521008403,
"grad_norm": 0.5725419521331787,
"learning_rate": 4.7373949579831936e-05,
"loss": 0.3062,
"step": 150
},
{
"epoch": 0.15756302521008403,
"eval_loss": 0.36464938521385193,
"eval_runtime": 13.5257,
"eval_samples_per_second": 35.488,
"eval_steps_per_second": 2.218,
"step": 150
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.6459401249885559,
"learning_rate": 4.719887955182073e-05,
"loss": 0.327,
"step": 160
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.7749695181846619,
"learning_rate": 4.7023809523809525e-05,
"loss": 0.3146,
"step": 170
},
{
"epoch": 0.18907563025210083,
"grad_norm": 0.7452580332756042,
"learning_rate": 4.684873949579832e-05,
"loss": 0.3024,
"step": 180
},
{
"epoch": 0.19957983193277312,
"grad_norm": 0.632286012172699,
"learning_rate": 4.667366946778712e-05,
"loss": 0.3157,
"step": 190
},
{
"epoch": 0.21008403361344538,
"grad_norm": 0.6443158984184265,
"learning_rate": 4.6498599439775914e-05,
"loss": 0.3039,
"step": 200
},
{
"epoch": 0.21008403361344538,
"eval_loss": 0.35029926896095276,
"eval_runtime": 13.5332,
"eval_samples_per_second": 35.468,
"eval_steps_per_second": 2.217,
"step": 200
},
{
"epoch": 0.22058823529411764,
"grad_norm": 0.6045960783958435,
"learning_rate": 4.632352941176471e-05,
"loss": 0.313,
"step": 210
},
{
"epoch": 0.23109243697478993,
"grad_norm": 0.5544140338897705,
"learning_rate": 4.61484593837535e-05,
"loss": 0.2992,
"step": 220
},
{
"epoch": 0.2415966386554622,
"grad_norm": 0.45881810784339905,
"learning_rate": 4.59733893557423e-05,
"loss": 0.2889,
"step": 230
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.6315861344337463,
"learning_rate": 4.579831932773109e-05,
"loss": 0.3055,
"step": 240
},
{
"epoch": 0.26260504201680673,
"grad_norm": 0.7058202624320984,
"learning_rate": 4.562324929971989e-05,
"loss": 0.2897,
"step": 250
},
{
"epoch": 0.26260504201680673,
"eval_loss": 0.3452460467815399,
"eval_runtime": 13.5256,
"eval_samples_per_second": 35.488,
"eval_steps_per_second": 2.218,
"step": 250
},
{
"epoch": 0.27310924369747897,
"grad_norm": 0.7277679443359375,
"learning_rate": 4.5448179271708687e-05,
"loss": 0.3119,
"step": 260
},
{
"epoch": 0.28361344537815125,
"grad_norm": 0.6311895251274109,
"learning_rate": 4.527310924369748e-05,
"loss": 0.296,
"step": 270
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.5600409507751465,
"learning_rate": 4.5098039215686275e-05,
"loss": 0.3027,
"step": 280
},
{
"epoch": 0.30462184873949577,
"grad_norm": 0.583111047744751,
"learning_rate": 4.4922969187675076e-05,
"loss": 0.3005,
"step": 290
},
{
"epoch": 0.31512605042016806,
"grad_norm": 0.6667417287826538,
"learning_rate": 4.474789915966387e-05,
"loss": 0.2924,
"step": 300
},
{
"epoch": 0.31512605042016806,
"eval_loss": 0.34308406710624695,
"eval_runtime": 13.5267,
"eval_samples_per_second": 35.485,
"eval_steps_per_second": 2.218,
"step": 300
},
{
"epoch": 0.32563025210084034,
"grad_norm": 0.6434429883956909,
"learning_rate": 4.4572829131652665e-05,
"loss": 0.2893,
"step": 310
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.5765148401260376,
"learning_rate": 4.439775910364146e-05,
"loss": 0.2842,
"step": 320
},
{
"epoch": 0.34663865546218486,
"grad_norm": 0.5374988317489624,
"learning_rate": 4.422268907563025e-05,
"loss": 0.2838,
"step": 330
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.5548204779624939,
"learning_rate": 4.404761904761905e-05,
"loss": 0.2708,
"step": 340
},
{
"epoch": 0.36764705882352944,
"grad_norm": 0.5466386079788208,
"learning_rate": 4.387254901960784e-05,
"loss": 0.2833,
"step": 350
},
{
"epoch": 0.36764705882352944,
"eval_loss": 0.3345324695110321,
"eval_runtime": 13.5203,
"eval_samples_per_second": 35.502,
"eval_steps_per_second": 2.219,
"step": 350
},
{
"epoch": 0.37815126050420167,
"grad_norm": 0.655726432800293,
"learning_rate": 4.369747899159664e-05,
"loss": 0.2842,
"step": 360
},
{
"epoch": 0.38865546218487396,
"grad_norm": 0.6633383631706238,
"learning_rate": 4.352240896358544e-05,
"loss": 0.2752,
"step": 370
},
{
"epoch": 0.39915966386554624,
"grad_norm": 0.549430787563324,
"learning_rate": 4.334733893557423e-05,
"loss": 0.2918,
"step": 380
},
{
"epoch": 0.4096638655462185,
"grad_norm": 0.49975866079330444,
"learning_rate": 4.317226890756303e-05,
"loss": 0.2733,
"step": 390
},
{
"epoch": 0.42016806722689076,
"grad_norm": 0.5723116397857666,
"learning_rate": 4.2997198879551826e-05,
"loss": 0.2852,
"step": 400
},
{
"epoch": 0.42016806722689076,
"eval_loss": 0.334602415561676,
"eval_runtime": 13.5187,
"eval_samples_per_second": 35.506,
"eval_steps_per_second": 2.219,
"step": 400
},
{
"epoch": 0.43067226890756305,
"grad_norm": 0.6224997639656067,
"learning_rate": 4.2822128851540614e-05,
"loss": 0.2982,
"step": 410
},
{
"epoch": 0.4411764705882353,
"grad_norm": 0.548751711845398,
"learning_rate": 4.2647058823529415e-05,
"loss": 0.2867,
"step": 420
},
{
"epoch": 0.45168067226890757,
"grad_norm": 0.5709792971611023,
"learning_rate": 4.247198879551821e-05,
"loss": 0.2588,
"step": 430
},
{
"epoch": 0.46218487394957986,
"grad_norm": 0.537171483039856,
"learning_rate": 4.2296918767507e-05,
"loss": 0.2782,
"step": 440
},
{
"epoch": 0.4726890756302521,
"grad_norm": 0.5505544543266296,
"learning_rate": 4.21218487394958e-05,
"loss": 0.2703,
"step": 450
},
{
"epoch": 0.4726890756302521,
"eval_loss": 0.3293306231498718,
"eval_runtime": 13.5091,
"eval_samples_per_second": 35.532,
"eval_steps_per_second": 2.221,
"step": 450
},
{
"epoch": 0.4831932773109244,
"grad_norm": 0.5049700736999512,
"learning_rate": 4.19467787114846e-05,
"loss": 0.2439,
"step": 460
},
{
"epoch": 0.49369747899159666,
"grad_norm": 0.6546272039413452,
"learning_rate": 4.177170868347339e-05,
"loss": 0.2747,
"step": 470
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.4740765690803528,
"learning_rate": 4.159663865546219e-05,
"loss": 0.2692,
"step": 480
},
{
"epoch": 0.5147058823529411,
"grad_norm": 0.5289779901504517,
"learning_rate": 4.142156862745099e-05,
"loss": 0.29,
"step": 490
},
{
"epoch": 0.5252100840336135,
"grad_norm": 0.5422542095184326,
"learning_rate": 4.1246498599439776e-05,
"loss": 0.2721,
"step": 500
},
{
"epoch": 0.5252100840336135,
"eval_loss": 0.32664668560028076,
"eval_runtime": 13.4992,
"eval_samples_per_second": 35.558,
"eval_steps_per_second": 2.222,
"step": 500
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.586115837097168,
"learning_rate": 4.107142857142857e-05,
"loss": 0.2686,
"step": 510
},
{
"epoch": 0.5462184873949579,
"grad_norm": 0.608102560043335,
"learning_rate": 4.089635854341737e-05,
"loss": 0.2712,
"step": 520
},
{
"epoch": 0.5567226890756303,
"grad_norm": 0.5184093117713928,
"learning_rate": 4.0721288515406165e-05,
"loss": 0.2773,
"step": 530
},
{
"epoch": 0.5672268907563025,
"grad_norm": 0.524999737739563,
"learning_rate": 4.054621848739496e-05,
"loss": 0.2725,
"step": 540
},
{
"epoch": 0.5777310924369747,
"grad_norm": 0.6716265082359314,
"learning_rate": 4.0371148459383754e-05,
"loss": 0.2618,
"step": 550
},
{
"epoch": 0.5777310924369747,
"eval_loss": 0.32582366466522217,
"eval_runtime": 13.4847,
"eval_samples_per_second": 35.596,
"eval_steps_per_second": 2.225,
"step": 550
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.5464914441108704,
"learning_rate": 4.0196078431372555e-05,
"loss": 0.2566,
"step": 560
},
{
"epoch": 0.5987394957983193,
"grad_norm": 0.5436871647834778,
"learning_rate": 4.002100840336135e-05,
"loss": 0.2637,
"step": 570
},
{
"epoch": 0.6092436974789915,
"grad_norm": 0.5792732238769531,
"learning_rate": 3.984593837535014e-05,
"loss": 0.2678,
"step": 580
},
{
"epoch": 0.6197478991596639,
"grad_norm": 0.5623672008514404,
"learning_rate": 3.967086834733894e-05,
"loss": 0.2589,
"step": 590
},
{
"epoch": 0.6302521008403361,
"grad_norm": 0.5475742220878601,
"learning_rate": 3.949579831932773e-05,
"loss": 0.2619,
"step": 600
},
{
"epoch": 0.6302521008403361,
"eval_loss": 0.3220784366130829,
"eval_runtime": 13.4828,
"eval_samples_per_second": 35.601,
"eval_steps_per_second": 2.225,
"step": 600
},
{
"epoch": 0.6407563025210085,
"grad_norm": 0.5120112895965576,
"learning_rate": 3.9320728291316526e-05,
"loss": 0.2551,
"step": 610
},
{
"epoch": 0.6512605042016807,
"grad_norm": 0.5247055292129517,
"learning_rate": 3.914565826330533e-05,
"loss": 0.2578,
"step": 620
},
{
"epoch": 0.6617647058823529,
"grad_norm": 0.4846402406692505,
"learning_rate": 3.897058823529412e-05,
"loss": 0.2686,
"step": 630
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.5671103000640869,
"learning_rate": 3.8795518207282915e-05,
"loss": 0.2773,
"step": 640
},
{
"epoch": 0.6827731092436975,
"grad_norm": 0.5367168188095093,
"learning_rate": 3.862044817927171e-05,
"loss": 0.2632,
"step": 650
},
{
"epoch": 0.6827731092436975,
"eval_loss": 0.3196863830089569,
"eval_runtime": 13.4716,
"eval_samples_per_second": 35.631,
"eval_steps_per_second": 2.227,
"step": 650
},
{
"epoch": 0.6932773109243697,
"grad_norm": 0.6599178910255432,
"learning_rate": 3.844537815126051e-05,
"loss": 0.2603,
"step": 660
},
{
"epoch": 0.7037815126050421,
"grad_norm": 0.4438059628009796,
"learning_rate": 3.82703081232493e-05,
"loss": 0.2495,
"step": 670
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.5657041072845459,
"learning_rate": 3.809523809523809e-05,
"loss": 0.2361,
"step": 680
},
{
"epoch": 0.7247899159663865,
"grad_norm": 0.5392199754714966,
"learning_rate": 3.792016806722689e-05,
"loss": 0.2541,
"step": 690
},
{
"epoch": 0.7352941176470589,
"grad_norm": 0.5997769832611084,
"learning_rate": 3.774509803921569e-05,
"loss": 0.2643,
"step": 700
},
{
"epoch": 0.7352941176470589,
"eval_loss": 0.32053324580192566,
"eval_runtime": 13.4863,
"eval_samples_per_second": 35.592,
"eval_steps_per_second": 2.224,
"step": 700
},
{
"epoch": 0.7457983193277311,
"grad_norm": 0.6396230459213257,
"learning_rate": 3.757002801120448e-05,
"loss": 0.2454,
"step": 710
},
{
"epoch": 0.7563025210084033,
"grad_norm": 0.4835526645183563,
"learning_rate": 3.739495798319328e-05,
"loss": 0.2529,
"step": 720
},
{
"epoch": 0.7668067226890757,
"grad_norm": 0.5814074277877808,
"learning_rate": 3.721988795518208e-05,
"loss": 0.2445,
"step": 730
},
{
"epoch": 0.7773109243697479,
"grad_norm": 0.4852081537246704,
"learning_rate": 3.704481792717087e-05,
"loss": 0.2522,
"step": 740
},
{
"epoch": 0.7878151260504201,
"grad_norm": 0.5790079236030579,
"learning_rate": 3.6869747899159665e-05,
"loss": 0.2561,
"step": 750
},
{
"epoch": 0.7878151260504201,
"eval_loss": 0.32097509503364563,
"eval_runtime": 13.4958,
"eval_samples_per_second": 35.567,
"eval_steps_per_second": 2.223,
"step": 750
},
{
"epoch": 0.7983193277310925,
"grad_norm": 0.5733464360237122,
"learning_rate": 3.669467787114846e-05,
"loss": 0.248,
"step": 760
},
{
"epoch": 0.8088235294117647,
"grad_norm": 0.6230357885360718,
"learning_rate": 3.6519607843137254e-05,
"loss": 0.2536,
"step": 770
},
{
"epoch": 0.819327731092437,
"grad_norm": 0.6231834888458252,
"learning_rate": 3.634453781512605e-05,
"loss": 0.258,
"step": 780
},
{
"epoch": 0.8298319327731093,
"grad_norm": 0.5959641933441162,
"learning_rate": 3.616946778711485e-05,
"loss": 0.2412,
"step": 790
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.5139572024345398,
"learning_rate": 3.5994397759103643e-05,
"loss": 0.2516,
"step": 800
},
{
"epoch": 0.8403361344537815,
"eval_loss": 0.3214908242225647,
"eval_runtime": 13.5134,
"eval_samples_per_second": 35.52,
"eval_steps_per_second": 2.22,
"step": 800
},
{
"epoch": 0.8508403361344538,
"grad_norm": 0.5790511965751648,
"learning_rate": 3.581932773109244e-05,
"loss": 0.2427,
"step": 810
},
{
"epoch": 0.8613445378151261,
"grad_norm": 0.5952211618423462,
"learning_rate": 3.564425770308123e-05,
"loss": 0.2673,
"step": 820
},
{
"epoch": 0.8718487394957983,
"grad_norm": 0.5401202440261841,
"learning_rate": 3.546918767507003e-05,
"loss": 0.2392,
"step": 830
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.5725980401039124,
"learning_rate": 3.529411764705883e-05,
"loss": 0.2381,
"step": 840
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.6116746664047241,
"learning_rate": 3.511904761904762e-05,
"loss": 0.2403,
"step": 850
},
{
"epoch": 0.8928571428571429,
"eval_loss": 0.3206528425216675,
"eval_runtime": 13.5142,
"eval_samples_per_second": 35.518,
"eval_steps_per_second": 2.22,
"step": 850
},
{
"epoch": 0.9033613445378151,
"grad_norm": 0.49307090044021606,
"learning_rate": 3.4943977591036416e-05,
"loss": 0.2375,
"step": 860
},
{
"epoch": 0.9138655462184874,
"grad_norm": 0.6631060242652893,
"learning_rate": 3.476890756302521e-05,
"loss": 0.2416,
"step": 870
},
{
"epoch": 0.9243697478991597,
"grad_norm": 0.5449891090393066,
"learning_rate": 3.4593837535014004e-05,
"loss": 0.2614,
"step": 880
},
{
"epoch": 0.9348739495798319,
"grad_norm": 0.5289740562438965,
"learning_rate": 3.4418767507002805e-05,
"loss": 0.2388,
"step": 890
},
{
"epoch": 0.9453781512605042,
"grad_norm": 0.5789199471473694,
"learning_rate": 3.42436974789916e-05,
"loss": 0.2461,
"step": 900
},
{
"epoch": 0.9453781512605042,
"eval_loss": 0.32027536630630493,
"eval_runtime": 13.5157,
"eval_samples_per_second": 35.514,
"eval_steps_per_second": 2.22,
"step": 900
},
{
"epoch": 0.9558823529411765,
"grad_norm": 0.5729469060897827,
"learning_rate": 3.4068627450980394e-05,
"loss": 0.2393,
"step": 910
},
{
"epoch": 0.9663865546218487,
"grad_norm": 0.6298367977142334,
"learning_rate": 3.389355742296919e-05,
"loss": 0.2512,
"step": 920
},
{
"epoch": 0.976890756302521,
"grad_norm": 0.6292509436607361,
"learning_rate": 3.371848739495799e-05,
"loss": 0.2374,
"step": 930
},
{
"epoch": 0.9873949579831933,
"grad_norm": 0.6284404397010803,
"learning_rate": 3.3543417366946776e-05,
"loss": 0.2391,
"step": 940
},
{
"epoch": 0.9978991596638656,
"grad_norm": 0.5431928634643555,
"learning_rate": 3.336834733893557e-05,
"loss": 0.2487,
"step": 950
},
{
"epoch": 0.9978991596638656,
"eval_loss": 0.31819915771484375,
"eval_runtime": 13.5091,
"eval_samples_per_second": 35.532,
"eval_steps_per_second": 2.221,
"step": 950
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.47679176926612854,
"learning_rate": 3.319327731092437e-05,
"loss": 0.2011,
"step": 960
},
{
"epoch": 1.01890756302521,
"grad_norm": 0.7469049096107483,
"learning_rate": 3.3018207282913166e-05,
"loss": 0.1979,
"step": 970
},
{
"epoch": 1.0294117647058822,
"grad_norm": 0.5701093077659607,
"learning_rate": 3.284313725490196e-05,
"loss": 0.1986,
"step": 980
},
{
"epoch": 1.0399159663865547,
"grad_norm": 0.5581035017967224,
"learning_rate": 3.266806722689076e-05,
"loss": 0.2039,
"step": 990
},
{
"epoch": 1.050420168067227,
"grad_norm": 0.6445596218109131,
"learning_rate": 3.2492997198879555e-05,
"loss": 0.2183,
"step": 1000
},
{
"epoch": 1.050420168067227,
"eval_loss": 0.3302117586135864,
"eval_runtime": 13.4775,
"eval_samples_per_second": 35.615,
"eval_steps_per_second": 2.226,
"step": 1000
},
{
"epoch": 1.0609243697478992,
"grad_norm": 0.48644450306892395,
"learning_rate": 3.231792717086835e-05,
"loss": 0.2044,
"step": 1010
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.555849552154541,
"learning_rate": 3.2142857142857144e-05,
"loss": 0.2029,
"step": 1020
},
{
"epoch": 1.0819327731092436,
"grad_norm": 0.657832145690918,
"learning_rate": 3.196778711484594e-05,
"loss": 0.2022,
"step": 1030
},
{
"epoch": 1.092436974789916,
"grad_norm": 0.5357531309127808,
"learning_rate": 3.179271708683473e-05,
"loss": 0.2033,
"step": 1040
},
{
"epoch": 1.1029411764705883,
"grad_norm": 0.6504517793655396,
"learning_rate": 3.161764705882353e-05,
"loss": 0.2029,
"step": 1050
},
{
"epoch": 1.1029411764705883,
"eval_loss": 0.33139654994010925,
"eval_runtime": 13.4851,
"eval_samples_per_second": 35.595,
"eval_steps_per_second": 2.225,
"step": 1050
},
{
"epoch": 1.1134453781512605,
"grad_norm": 0.6484816670417786,
"learning_rate": 3.144257703081233e-05,
"loss": 0.2,
"step": 1060
},
{
"epoch": 1.1239495798319328,
"grad_norm": 0.5106396079063416,
"learning_rate": 3.126750700280112e-05,
"loss": 0.1903,
"step": 1070
},
{
"epoch": 1.134453781512605,
"grad_norm": 0.624066948890686,
"learning_rate": 3.1092436974789916e-05,
"loss": 0.201,
"step": 1080
},
{
"epoch": 1.1449579831932772,
"grad_norm": 0.6842947602272034,
"learning_rate": 3.091736694677872e-05,
"loss": 0.2057,
"step": 1090
},
{
"epoch": 1.1554621848739495,
"grad_norm": 0.5321388244628906,
"learning_rate": 3.074229691876751e-05,
"loss": 0.2037,
"step": 1100
},
{
"epoch": 1.1554621848739495,
"eval_loss": 0.33459416031837463,
"eval_runtime": 13.4814,
"eval_samples_per_second": 35.605,
"eval_steps_per_second": 2.225,
"step": 1100
},
{
"epoch": 1.165966386554622,
"grad_norm": 0.6166321635246277,
"learning_rate": 3.0567226890756306e-05,
"loss": 0.1956,
"step": 1110
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.5974798798561096,
"learning_rate": 3.0392156862745097e-05,
"loss": 0.2099,
"step": 1120
},
{
"epoch": 1.1869747899159664,
"grad_norm": 0.6835567951202393,
"learning_rate": 3.0217086834733894e-05,
"loss": 0.2029,
"step": 1130
},
{
"epoch": 1.1974789915966386,
"grad_norm": 0.6046004295349121,
"learning_rate": 3.004201680672269e-05,
"loss": 0.1952,
"step": 1140
},
{
"epoch": 1.2079831932773109,
"grad_norm": 0.6110156178474426,
"learning_rate": 2.9866946778711486e-05,
"loss": 0.209,
"step": 1150
},
{
"epoch": 1.2079831932773109,
"eval_loss": 0.3331316411495209,
"eval_runtime": 13.4871,
"eval_samples_per_second": 35.59,
"eval_steps_per_second": 2.224,
"step": 1150
},
{
"epoch": 1.2184873949579833,
"grad_norm": 0.6486967206001282,
"learning_rate": 2.969187675070028e-05,
"loss": 0.2068,
"step": 1160
},
{
"epoch": 1.2289915966386555,
"grad_norm": 0.6242414712905884,
"learning_rate": 2.9516806722689078e-05,
"loss": 0.1926,
"step": 1170
},
{
"epoch": 1.2394957983193278,
"grad_norm": 0.6824544072151184,
"learning_rate": 2.9341736694677872e-05,
"loss": 0.1985,
"step": 1180
},
{
"epoch": 1.25,
"grad_norm": 0.6598100662231445,
"learning_rate": 2.916666666666667e-05,
"loss": 0.1968,
"step": 1190
},
{
"epoch": 1.2605042016806722,
"grad_norm": 0.5652765035629272,
"learning_rate": 2.8991596638655467e-05,
"loss": 0.2055,
"step": 1200
},
{
"epoch": 1.2605042016806722,
"eval_loss": 0.337155282497406,
"eval_runtime": 13.5062,
"eval_samples_per_second": 35.539,
"eval_steps_per_second": 2.221,
"step": 1200
},
{
"epoch": 1.2710084033613445,
"grad_norm": 0.6030492186546326,
"learning_rate": 2.8816526610644258e-05,
"loss": 0.2006,
"step": 1210
},
{
"epoch": 1.2815126050420167,
"grad_norm": 0.6905779242515564,
"learning_rate": 2.8641456582633052e-05,
"loss": 0.1956,
"step": 1220
},
{
"epoch": 1.2920168067226891,
"grad_norm": 0.6660848259925842,
"learning_rate": 2.846638655462185e-05,
"loss": 0.2072,
"step": 1230
},
{
"epoch": 1.3025210084033614,
"grad_norm": 0.561509370803833,
"learning_rate": 2.8291316526610644e-05,
"loss": 0.1935,
"step": 1240
},
{
"epoch": 1.3130252100840336,
"grad_norm": 0.9078445434570312,
"learning_rate": 2.8116246498599442e-05,
"loss": 0.2102,
"step": 1250
},
{
"epoch": 1.3130252100840336,
"eval_loss": 0.3378737270832062,
"eval_runtime": 13.5144,
"eval_samples_per_second": 35.518,
"eval_steps_per_second": 2.22,
"step": 1250
},
{
"epoch": 1.3235294117647058,
"grad_norm": 0.782431960105896,
"learning_rate": 2.7941176470588236e-05,
"loss": 0.2044,
"step": 1260
},
{
"epoch": 1.334033613445378,
"grad_norm": 0.7222577929496765,
"learning_rate": 2.7766106442577034e-05,
"loss": 0.2023,
"step": 1270
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.7043188810348511,
"learning_rate": 2.7591036414565828e-05,
"loss": 0.2002,
"step": 1280
},
{
"epoch": 1.3550420168067228,
"grad_norm": 0.5669278502464294,
"learning_rate": 2.7415966386554626e-05,
"loss": 0.1903,
"step": 1290
},
{
"epoch": 1.365546218487395,
"grad_norm": 0.6404176950454712,
"learning_rate": 2.7240896358543417e-05,
"loss": 0.1982,
"step": 1300
},
{
"epoch": 1.365546218487395,
"eval_loss": 0.33893150091171265,
"eval_runtime": 13.5229,
"eval_samples_per_second": 35.495,
"eval_steps_per_second": 2.218,
"step": 1300
},
{
"epoch": 1.3760504201680672,
"grad_norm": 0.6714435815811157,
"learning_rate": 2.706582633053221e-05,
"loss": 0.1954,
"step": 1310
},
{
"epoch": 1.3865546218487395,
"grad_norm": 0.5384875535964966,
"learning_rate": 2.689075630252101e-05,
"loss": 0.201,
"step": 1320
},
{
"epoch": 1.3970588235294117,
"grad_norm": 0.7454679012298584,
"learning_rate": 2.6715686274509806e-05,
"loss": 0.1973,
"step": 1330
},
{
"epoch": 1.407563025210084,
"grad_norm": 0.7705392837524414,
"learning_rate": 2.65406162464986e-05,
"loss": 0.2001,
"step": 1340
},
{
"epoch": 1.4180672268907564,
"grad_norm": 0.6535789370536804,
"learning_rate": 2.6365546218487398e-05,
"loss": 0.189,
"step": 1350
},
{
"epoch": 1.4180672268907564,
"eval_loss": 0.33827126026153564,
"eval_runtime": 13.5137,
"eval_samples_per_second": 35.52,
"eval_steps_per_second": 2.22,
"step": 1350
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.6305943727493286,
"learning_rate": 2.6190476190476192e-05,
"loss": 0.1894,
"step": 1360
},
{
"epoch": 1.4390756302521008,
"grad_norm": 0.7726532816886902,
"learning_rate": 2.601540616246499e-05,
"loss": 0.1971,
"step": 1370
},
{
"epoch": 1.449579831932773,
"grad_norm": 0.814151406288147,
"learning_rate": 2.5840336134453784e-05,
"loss": 0.198,
"step": 1380
},
{
"epoch": 1.4600840336134453,
"grad_norm": 0.6826948523521423,
"learning_rate": 2.5665266106442575e-05,
"loss": 0.1913,
"step": 1390
},
{
"epoch": 1.4705882352941178,
"grad_norm": 0.8195087909698486,
"learning_rate": 2.5490196078431373e-05,
"loss": 0.1814,
"step": 1400
},
{
"epoch": 1.4705882352941178,
"eval_loss": 0.3441857695579529,
"eval_runtime": 13.5128,
"eval_samples_per_second": 35.522,
"eval_steps_per_second": 2.22,
"step": 1400
},
{
"epoch": 1.48109243697479,
"grad_norm": 0.54868084192276,
"learning_rate": 2.5315126050420167e-05,
"loss": 0.1954,
"step": 1410
},
{
"epoch": 1.4915966386554622,
"grad_norm": 0.7310366630554199,
"learning_rate": 2.5140056022408964e-05,
"loss": 0.1986,
"step": 1420
},
{
"epoch": 1.5021008403361344,
"grad_norm": 0.7019846439361572,
"learning_rate": 2.4964985994397762e-05,
"loss": 0.1946,
"step": 1430
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.839182436466217,
"learning_rate": 2.4789915966386556e-05,
"loss": 0.196,
"step": 1440
},
{
"epoch": 1.523109243697479,
"grad_norm": 0.6928364634513855,
"learning_rate": 2.4614845938375354e-05,
"loss": 0.2009,
"step": 1450
},
{
"epoch": 1.523109243697479,
"eval_loss": 0.3419071137905121,
"eval_runtime": 13.5014,
"eval_samples_per_second": 35.552,
"eval_steps_per_second": 2.222,
"step": 1450
},
{
"epoch": 1.5336134453781511,
"grad_norm": 0.6039723753929138,
"learning_rate": 2.4439775910364145e-05,
"loss": 0.191,
"step": 1460
},
{
"epoch": 1.5441176470588234,
"grad_norm": 0.6368713974952698,
"learning_rate": 2.4264705882352942e-05,
"loss": 0.1927,
"step": 1470
},
{
"epoch": 1.5546218487394958,
"grad_norm": 0.7114837765693665,
"learning_rate": 2.4089635854341737e-05,
"loss": 0.1884,
"step": 1480
},
{
"epoch": 1.565126050420168,
"grad_norm": 0.7814504504203796,
"learning_rate": 2.3914565826330534e-05,
"loss": 0.1974,
"step": 1490
},
{
"epoch": 1.5756302521008403,
"grad_norm": 0.738370954990387,
"learning_rate": 2.373949579831933e-05,
"loss": 0.202,
"step": 1500
},
{
"epoch": 1.5756302521008403,
"eval_loss": 0.34229767322540283,
"eval_runtime": 13.5047,
"eval_samples_per_second": 35.543,
"eval_steps_per_second": 2.221,
"step": 1500
},
{
"epoch": 1.5861344537815127,
"grad_norm": 0.7205056548118591,
"learning_rate": 2.3564425770308123e-05,
"loss": 0.1975,
"step": 1510
},
{
"epoch": 1.596638655462185,
"grad_norm": 0.8840786218643188,
"learning_rate": 2.338935574229692e-05,
"loss": 0.1876,
"step": 1520
},
{
"epoch": 1.6071428571428572,
"grad_norm": 0.7528152465820312,
"learning_rate": 2.3214285714285715e-05,
"loss": 0.193,
"step": 1530
},
{
"epoch": 1.6176470588235294,
"grad_norm": 0.6973660588264465,
"learning_rate": 2.303921568627451e-05,
"loss": 0.1768,
"step": 1540
},
{
"epoch": 1.6281512605042017,
"grad_norm": 0.8335407376289368,
"learning_rate": 2.2864145658263307e-05,
"loss": 0.1827,
"step": 1550
},
{
"epoch": 1.6281512605042017,
"eval_loss": 0.3449813425540924,
"eval_runtime": 13.4784,
"eval_samples_per_second": 35.612,
"eval_steps_per_second": 2.226,
"step": 1550
},
{
"epoch": 1.638655462184874,
"grad_norm": 0.834244430065155,
"learning_rate": 2.26890756302521e-05,
"loss": 0.1908,
"step": 1560
},
{
"epoch": 1.6491596638655461,
"grad_norm": 0.8551121354103088,
"learning_rate": 2.25140056022409e-05,
"loss": 0.186,
"step": 1570
},
{
"epoch": 1.6596638655462184,
"grad_norm": 0.6920718550682068,
"learning_rate": 2.2338935574229693e-05,
"loss": 0.1841,
"step": 1580
},
{
"epoch": 1.6701680672268906,
"grad_norm": 0.7398483157157898,
"learning_rate": 2.2163865546218487e-05,
"loss": 0.1897,
"step": 1590
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.9140138030052185,
"learning_rate": 2.1988795518207285e-05,
"loss": 0.1922,
"step": 1600
},
{
"epoch": 1.680672268907563,
"eval_loss": 0.3434249460697174,
"eval_runtime": 13.4753,
"eval_samples_per_second": 35.621,
"eval_steps_per_second": 2.226,
"step": 1600
},
{
"epoch": 1.6911764705882353,
"grad_norm": 0.6723321080207825,
"learning_rate": 2.181372549019608e-05,
"loss": 0.1857,
"step": 1610
},
{
"epoch": 1.7016806722689075,
"grad_norm": 0.6897566318511963,
"learning_rate": 2.1638655462184876e-05,
"loss": 0.1776,
"step": 1620
},
{
"epoch": 1.71218487394958,
"grad_norm": 0.6626412272453308,
"learning_rate": 2.146358543417367e-05,
"loss": 0.1854,
"step": 1630
},
{
"epoch": 1.7226890756302522,
"grad_norm": 0.7039874792098999,
"learning_rate": 2.1288515406162465e-05,
"loss": 0.1915,
"step": 1640
},
{
"epoch": 1.7331932773109244,
"grad_norm": 0.7689247727394104,
"learning_rate": 2.1113445378151263e-05,
"loss": 0.1753,
"step": 1650
},
{
"epoch": 1.7331932773109244,
"eval_loss": 0.3487314283847809,
"eval_runtime": 13.4843,
"eval_samples_per_second": 35.597,
"eval_steps_per_second": 2.225,
"step": 1650
},
{
"epoch": 1.7436974789915967,
"grad_norm": 0.6633319854736328,
"learning_rate": 2.0938375350140057e-05,
"loss": 0.187,
"step": 1660
},
{
"epoch": 1.754201680672269,
"grad_norm": 0.7929345369338989,
"learning_rate": 2.0763305322128854e-05,
"loss": 0.1796,
"step": 1670
},
{
"epoch": 1.7647058823529411,
"grad_norm": 0.6714622378349304,
"learning_rate": 2.058823529411765e-05,
"loss": 0.181,
"step": 1680
},
{
"epoch": 1.7752100840336134,
"grad_norm": 0.6277990341186523,
"learning_rate": 2.0413165266106443e-05,
"loss": 0.1852,
"step": 1690
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.6239778995513916,
"learning_rate": 2.023809523809524e-05,
"loss": 0.1929,
"step": 1700
},
{
"epoch": 1.7857142857142856,
"eval_loss": 0.3445141911506653,
"eval_runtime": 13.4945,
"eval_samples_per_second": 35.57,
"eval_steps_per_second": 2.223,
"step": 1700
},
{
"epoch": 1.7962184873949578,
"grad_norm": 0.6844814419746399,
"learning_rate": 2.0063025210084035e-05,
"loss": 0.1821,
"step": 1710
},
{
"epoch": 1.8067226890756303,
"grad_norm": 0.663127601146698,
"learning_rate": 1.988795518207283e-05,
"loss": 0.1837,
"step": 1720
},
{
"epoch": 1.8172268907563025,
"grad_norm": 0.5808536410331726,
"learning_rate": 1.9712885154061627e-05,
"loss": 0.1775,
"step": 1730
},
{
"epoch": 1.8277310924369747,
"grad_norm": 0.7033660411834717,
"learning_rate": 1.953781512605042e-05,
"loss": 0.1791,
"step": 1740
},
{
"epoch": 1.8382352941176472,
"grad_norm": 0.8857737183570862,
"learning_rate": 1.936274509803922e-05,
"loss": 0.1842,
"step": 1750
},
{
"epoch": 1.8382352941176472,
"eval_loss": 0.35038644075393677,
"eval_runtime": 13.508,
"eval_samples_per_second": 35.534,
"eval_steps_per_second": 2.221,
"step": 1750
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.7379726767539978,
"learning_rate": 1.9187675070028013e-05,
"loss": 0.184,
"step": 1760
},
{
"epoch": 1.8592436974789917,
"grad_norm": 0.704282283782959,
"learning_rate": 1.9012605042016807e-05,
"loss": 0.1761,
"step": 1770
},
{
"epoch": 1.8697478991596639,
"grad_norm": 0.6162270307540894,
"learning_rate": 1.88375350140056e-05,
"loss": 0.1804,
"step": 1780
},
{
"epoch": 1.8802521008403361,
"grad_norm": 0.8076377511024475,
"learning_rate": 1.86624649859944e-05,
"loss": 0.1747,
"step": 1790
},
{
"epoch": 1.8907563025210083,
"grad_norm": 0.6141780018806458,
"learning_rate": 1.8487394957983196e-05,
"loss": 0.1717,
"step": 1800
},
{
"epoch": 1.8907563025210083,
"eval_loss": 0.3557831943035126,
"eval_runtime": 13.5096,
"eval_samples_per_second": 35.53,
"eval_steps_per_second": 2.221,
"step": 1800
}
],
"logging_steps": 10,
"max_steps": 2856,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.15822545580458e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}