winnieyangwannan's picture
Training in progress, step 900, checkpoint
1add8e0 verified
raw
history blame
20.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9453781512605042,
"eval_steps": 50,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01050420168067227,
"grad_norm": 2.691126585006714,
"learning_rate": 4.98249299719888e-05,
"loss": 1.6756,
"step": 10
},
{
"epoch": 0.02100840336134454,
"grad_norm": 1.9124071598052979,
"learning_rate": 4.96498599439776e-05,
"loss": 0.5155,
"step": 20
},
{
"epoch": 0.031512605042016806,
"grad_norm": 1.1180588006973267,
"learning_rate": 4.947478991596639e-05,
"loss": 0.4521,
"step": 30
},
{
"epoch": 0.04201680672268908,
"grad_norm": 0.7634907364845276,
"learning_rate": 4.9299719887955186e-05,
"loss": 0.3732,
"step": 40
},
{
"epoch": 0.052521008403361345,
"grad_norm": 0.6673978567123413,
"learning_rate": 4.912464985994398e-05,
"loss": 0.3832,
"step": 50
},
{
"epoch": 0.052521008403361345,
"eval_loss": 0.424630731344223,
"eval_runtime": 13.4948,
"eval_samples_per_second": 35.569,
"eval_steps_per_second": 2.223,
"step": 50
},
{
"epoch": 0.06302521008403361,
"grad_norm": 0.6445392370223999,
"learning_rate": 4.8949579831932775e-05,
"loss": 0.3386,
"step": 60
},
{
"epoch": 0.07352941176470588,
"grad_norm": 0.5743226408958435,
"learning_rate": 4.877450980392157e-05,
"loss": 0.3443,
"step": 70
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.6735454201698303,
"learning_rate": 4.859943977591036e-05,
"loss": 0.3212,
"step": 80
},
{
"epoch": 0.09453781512605042,
"grad_norm": 0.660349428653717,
"learning_rate": 4.8424369747899164e-05,
"loss": 0.3379,
"step": 90
},
{
"epoch": 0.10504201680672269,
"grad_norm": 0.8036162853240967,
"learning_rate": 4.824929971988796e-05,
"loss": 0.3177,
"step": 100
},
{
"epoch": 0.10504201680672269,
"eval_loss": 0.3798685073852539,
"eval_runtime": 13.5369,
"eval_samples_per_second": 35.459,
"eval_steps_per_second": 2.216,
"step": 100
},
{
"epoch": 0.11554621848739496,
"grad_norm": 0.6827962398529053,
"learning_rate": 4.807422969187675e-05,
"loss": 0.3405,
"step": 110
},
{
"epoch": 0.12605042016806722,
"grad_norm": 0.5923526287078857,
"learning_rate": 4.7899159663865554e-05,
"loss": 0.3324,
"step": 120
},
{
"epoch": 0.13655462184873948,
"grad_norm": 0.6476097106933594,
"learning_rate": 4.772408963585435e-05,
"loss": 0.3216,
"step": 130
},
{
"epoch": 0.14705882352941177,
"grad_norm": 0.5536680221557617,
"learning_rate": 4.7549019607843135e-05,
"loss": 0.3425,
"step": 140
},
{
"epoch": 0.15756302521008403,
"grad_norm": 0.5725419521331787,
"learning_rate": 4.7373949579831936e-05,
"loss": 0.3062,
"step": 150
},
{
"epoch": 0.15756302521008403,
"eval_loss": 0.36464938521385193,
"eval_runtime": 13.5257,
"eval_samples_per_second": 35.488,
"eval_steps_per_second": 2.218,
"step": 150
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.6459401249885559,
"learning_rate": 4.719887955182073e-05,
"loss": 0.327,
"step": 160
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.7749695181846619,
"learning_rate": 4.7023809523809525e-05,
"loss": 0.3146,
"step": 170
},
{
"epoch": 0.18907563025210083,
"grad_norm": 0.7452580332756042,
"learning_rate": 4.684873949579832e-05,
"loss": 0.3024,
"step": 180
},
{
"epoch": 0.19957983193277312,
"grad_norm": 0.632286012172699,
"learning_rate": 4.667366946778712e-05,
"loss": 0.3157,
"step": 190
},
{
"epoch": 0.21008403361344538,
"grad_norm": 0.6443158984184265,
"learning_rate": 4.6498599439775914e-05,
"loss": 0.3039,
"step": 200
},
{
"epoch": 0.21008403361344538,
"eval_loss": 0.35029926896095276,
"eval_runtime": 13.5332,
"eval_samples_per_second": 35.468,
"eval_steps_per_second": 2.217,
"step": 200
},
{
"epoch": 0.22058823529411764,
"grad_norm": 0.6045960783958435,
"learning_rate": 4.632352941176471e-05,
"loss": 0.313,
"step": 210
},
{
"epoch": 0.23109243697478993,
"grad_norm": 0.5544140338897705,
"learning_rate": 4.61484593837535e-05,
"loss": 0.2992,
"step": 220
},
{
"epoch": 0.2415966386554622,
"grad_norm": 0.45881810784339905,
"learning_rate": 4.59733893557423e-05,
"loss": 0.2889,
"step": 230
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.6315861344337463,
"learning_rate": 4.579831932773109e-05,
"loss": 0.3055,
"step": 240
},
{
"epoch": 0.26260504201680673,
"grad_norm": 0.7058202624320984,
"learning_rate": 4.562324929971989e-05,
"loss": 0.2897,
"step": 250
},
{
"epoch": 0.26260504201680673,
"eval_loss": 0.3452460467815399,
"eval_runtime": 13.5256,
"eval_samples_per_second": 35.488,
"eval_steps_per_second": 2.218,
"step": 250
},
{
"epoch": 0.27310924369747897,
"grad_norm": 0.7277679443359375,
"learning_rate": 4.5448179271708687e-05,
"loss": 0.3119,
"step": 260
},
{
"epoch": 0.28361344537815125,
"grad_norm": 0.6311895251274109,
"learning_rate": 4.527310924369748e-05,
"loss": 0.296,
"step": 270
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.5600409507751465,
"learning_rate": 4.5098039215686275e-05,
"loss": 0.3027,
"step": 280
},
{
"epoch": 0.30462184873949577,
"grad_norm": 0.583111047744751,
"learning_rate": 4.4922969187675076e-05,
"loss": 0.3005,
"step": 290
},
{
"epoch": 0.31512605042016806,
"grad_norm": 0.6667417287826538,
"learning_rate": 4.474789915966387e-05,
"loss": 0.2924,
"step": 300
},
{
"epoch": 0.31512605042016806,
"eval_loss": 0.34308406710624695,
"eval_runtime": 13.5267,
"eval_samples_per_second": 35.485,
"eval_steps_per_second": 2.218,
"step": 300
},
{
"epoch": 0.32563025210084034,
"grad_norm": 0.6434429883956909,
"learning_rate": 4.4572829131652665e-05,
"loss": 0.2893,
"step": 310
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.5765148401260376,
"learning_rate": 4.439775910364146e-05,
"loss": 0.2842,
"step": 320
},
{
"epoch": 0.34663865546218486,
"grad_norm": 0.5374988317489624,
"learning_rate": 4.422268907563025e-05,
"loss": 0.2838,
"step": 330
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.5548204779624939,
"learning_rate": 4.404761904761905e-05,
"loss": 0.2708,
"step": 340
},
{
"epoch": 0.36764705882352944,
"grad_norm": 0.5466386079788208,
"learning_rate": 4.387254901960784e-05,
"loss": 0.2833,
"step": 350
},
{
"epoch": 0.36764705882352944,
"eval_loss": 0.3345324695110321,
"eval_runtime": 13.5203,
"eval_samples_per_second": 35.502,
"eval_steps_per_second": 2.219,
"step": 350
},
{
"epoch": 0.37815126050420167,
"grad_norm": 0.655726432800293,
"learning_rate": 4.369747899159664e-05,
"loss": 0.2842,
"step": 360
},
{
"epoch": 0.38865546218487396,
"grad_norm": 0.6633383631706238,
"learning_rate": 4.352240896358544e-05,
"loss": 0.2752,
"step": 370
},
{
"epoch": 0.39915966386554624,
"grad_norm": 0.549430787563324,
"learning_rate": 4.334733893557423e-05,
"loss": 0.2918,
"step": 380
},
{
"epoch": 0.4096638655462185,
"grad_norm": 0.49975866079330444,
"learning_rate": 4.317226890756303e-05,
"loss": 0.2733,
"step": 390
},
{
"epoch": 0.42016806722689076,
"grad_norm": 0.5723116397857666,
"learning_rate": 4.2997198879551826e-05,
"loss": 0.2852,
"step": 400
},
{
"epoch": 0.42016806722689076,
"eval_loss": 0.334602415561676,
"eval_runtime": 13.5187,
"eval_samples_per_second": 35.506,
"eval_steps_per_second": 2.219,
"step": 400
},
{
"epoch": 0.43067226890756305,
"grad_norm": 0.6224997639656067,
"learning_rate": 4.2822128851540614e-05,
"loss": 0.2982,
"step": 410
},
{
"epoch": 0.4411764705882353,
"grad_norm": 0.548751711845398,
"learning_rate": 4.2647058823529415e-05,
"loss": 0.2867,
"step": 420
},
{
"epoch": 0.45168067226890757,
"grad_norm": 0.5709792971611023,
"learning_rate": 4.247198879551821e-05,
"loss": 0.2588,
"step": 430
},
{
"epoch": 0.46218487394957986,
"grad_norm": 0.537171483039856,
"learning_rate": 4.2296918767507e-05,
"loss": 0.2782,
"step": 440
},
{
"epoch": 0.4726890756302521,
"grad_norm": 0.5505544543266296,
"learning_rate": 4.21218487394958e-05,
"loss": 0.2703,
"step": 450
},
{
"epoch": 0.4726890756302521,
"eval_loss": 0.3293306231498718,
"eval_runtime": 13.5091,
"eval_samples_per_second": 35.532,
"eval_steps_per_second": 2.221,
"step": 450
},
{
"epoch": 0.4831932773109244,
"grad_norm": 0.5049700736999512,
"learning_rate": 4.19467787114846e-05,
"loss": 0.2439,
"step": 460
},
{
"epoch": 0.49369747899159666,
"grad_norm": 0.6546272039413452,
"learning_rate": 4.177170868347339e-05,
"loss": 0.2747,
"step": 470
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.4740765690803528,
"learning_rate": 4.159663865546219e-05,
"loss": 0.2692,
"step": 480
},
{
"epoch": 0.5147058823529411,
"grad_norm": 0.5289779901504517,
"learning_rate": 4.142156862745099e-05,
"loss": 0.29,
"step": 490
},
{
"epoch": 0.5252100840336135,
"grad_norm": 0.5422542095184326,
"learning_rate": 4.1246498599439776e-05,
"loss": 0.2721,
"step": 500
},
{
"epoch": 0.5252100840336135,
"eval_loss": 0.32664668560028076,
"eval_runtime": 13.4992,
"eval_samples_per_second": 35.558,
"eval_steps_per_second": 2.222,
"step": 500
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.586115837097168,
"learning_rate": 4.107142857142857e-05,
"loss": 0.2686,
"step": 510
},
{
"epoch": 0.5462184873949579,
"grad_norm": 0.608102560043335,
"learning_rate": 4.089635854341737e-05,
"loss": 0.2712,
"step": 520
},
{
"epoch": 0.5567226890756303,
"grad_norm": 0.5184093117713928,
"learning_rate": 4.0721288515406165e-05,
"loss": 0.2773,
"step": 530
},
{
"epoch": 0.5672268907563025,
"grad_norm": 0.524999737739563,
"learning_rate": 4.054621848739496e-05,
"loss": 0.2725,
"step": 540
},
{
"epoch": 0.5777310924369747,
"grad_norm": 0.6716265082359314,
"learning_rate": 4.0371148459383754e-05,
"loss": 0.2618,
"step": 550
},
{
"epoch": 0.5777310924369747,
"eval_loss": 0.32582366466522217,
"eval_runtime": 13.4847,
"eval_samples_per_second": 35.596,
"eval_steps_per_second": 2.225,
"step": 550
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.5464914441108704,
"learning_rate": 4.0196078431372555e-05,
"loss": 0.2566,
"step": 560
},
{
"epoch": 0.5987394957983193,
"grad_norm": 0.5436871647834778,
"learning_rate": 4.002100840336135e-05,
"loss": 0.2637,
"step": 570
},
{
"epoch": 0.6092436974789915,
"grad_norm": 0.5792732238769531,
"learning_rate": 3.984593837535014e-05,
"loss": 0.2678,
"step": 580
},
{
"epoch": 0.6197478991596639,
"grad_norm": 0.5623672008514404,
"learning_rate": 3.967086834733894e-05,
"loss": 0.2589,
"step": 590
},
{
"epoch": 0.6302521008403361,
"grad_norm": 0.5475742220878601,
"learning_rate": 3.949579831932773e-05,
"loss": 0.2619,
"step": 600
},
{
"epoch": 0.6302521008403361,
"eval_loss": 0.3220784366130829,
"eval_runtime": 13.4828,
"eval_samples_per_second": 35.601,
"eval_steps_per_second": 2.225,
"step": 600
},
{
"epoch": 0.6407563025210085,
"grad_norm": 0.5120112895965576,
"learning_rate": 3.9320728291316526e-05,
"loss": 0.2551,
"step": 610
},
{
"epoch": 0.6512605042016807,
"grad_norm": 0.5247055292129517,
"learning_rate": 3.914565826330533e-05,
"loss": 0.2578,
"step": 620
},
{
"epoch": 0.6617647058823529,
"grad_norm": 0.4846402406692505,
"learning_rate": 3.897058823529412e-05,
"loss": 0.2686,
"step": 630
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.5671103000640869,
"learning_rate": 3.8795518207282915e-05,
"loss": 0.2773,
"step": 640
},
{
"epoch": 0.6827731092436975,
"grad_norm": 0.5367168188095093,
"learning_rate": 3.862044817927171e-05,
"loss": 0.2632,
"step": 650
},
{
"epoch": 0.6827731092436975,
"eval_loss": 0.3196863830089569,
"eval_runtime": 13.4716,
"eval_samples_per_second": 35.631,
"eval_steps_per_second": 2.227,
"step": 650
},
{
"epoch": 0.6932773109243697,
"grad_norm": 0.6599178910255432,
"learning_rate": 3.844537815126051e-05,
"loss": 0.2603,
"step": 660
},
{
"epoch": 0.7037815126050421,
"grad_norm": 0.4438059628009796,
"learning_rate": 3.82703081232493e-05,
"loss": 0.2495,
"step": 670
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.5657041072845459,
"learning_rate": 3.809523809523809e-05,
"loss": 0.2361,
"step": 680
},
{
"epoch": 0.7247899159663865,
"grad_norm": 0.5392199754714966,
"learning_rate": 3.792016806722689e-05,
"loss": 0.2541,
"step": 690
},
{
"epoch": 0.7352941176470589,
"grad_norm": 0.5997769832611084,
"learning_rate": 3.774509803921569e-05,
"loss": 0.2643,
"step": 700
},
{
"epoch": 0.7352941176470589,
"eval_loss": 0.32053324580192566,
"eval_runtime": 13.4863,
"eval_samples_per_second": 35.592,
"eval_steps_per_second": 2.224,
"step": 700
},
{
"epoch": 0.7457983193277311,
"grad_norm": 0.6396230459213257,
"learning_rate": 3.757002801120448e-05,
"loss": 0.2454,
"step": 710
},
{
"epoch": 0.7563025210084033,
"grad_norm": 0.4835526645183563,
"learning_rate": 3.739495798319328e-05,
"loss": 0.2529,
"step": 720
},
{
"epoch": 0.7668067226890757,
"grad_norm": 0.5814074277877808,
"learning_rate": 3.721988795518208e-05,
"loss": 0.2445,
"step": 730
},
{
"epoch": 0.7773109243697479,
"grad_norm": 0.4852081537246704,
"learning_rate": 3.704481792717087e-05,
"loss": 0.2522,
"step": 740
},
{
"epoch": 0.7878151260504201,
"grad_norm": 0.5790079236030579,
"learning_rate": 3.6869747899159665e-05,
"loss": 0.2561,
"step": 750
},
{
"epoch": 0.7878151260504201,
"eval_loss": 0.32097509503364563,
"eval_runtime": 13.4958,
"eval_samples_per_second": 35.567,
"eval_steps_per_second": 2.223,
"step": 750
},
{
"epoch": 0.7983193277310925,
"grad_norm": 0.5733464360237122,
"learning_rate": 3.669467787114846e-05,
"loss": 0.248,
"step": 760
},
{
"epoch": 0.8088235294117647,
"grad_norm": 0.6230357885360718,
"learning_rate": 3.6519607843137254e-05,
"loss": 0.2536,
"step": 770
},
{
"epoch": 0.819327731092437,
"grad_norm": 0.6231834888458252,
"learning_rate": 3.634453781512605e-05,
"loss": 0.258,
"step": 780
},
{
"epoch": 0.8298319327731093,
"grad_norm": 0.5959641933441162,
"learning_rate": 3.616946778711485e-05,
"loss": 0.2412,
"step": 790
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.5139572024345398,
"learning_rate": 3.5994397759103643e-05,
"loss": 0.2516,
"step": 800
},
{
"epoch": 0.8403361344537815,
"eval_loss": 0.3214908242225647,
"eval_runtime": 13.5134,
"eval_samples_per_second": 35.52,
"eval_steps_per_second": 2.22,
"step": 800
},
{
"epoch": 0.8508403361344538,
"grad_norm": 0.5790511965751648,
"learning_rate": 3.581932773109244e-05,
"loss": 0.2427,
"step": 810
},
{
"epoch": 0.8613445378151261,
"grad_norm": 0.5952211618423462,
"learning_rate": 3.564425770308123e-05,
"loss": 0.2673,
"step": 820
},
{
"epoch": 0.8718487394957983,
"grad_norm": 0.5401202440261841,
"learning_rate": 3.546918767507003e-05,
"loss": 0.2392,
"step": 830
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.5725980401039124,
"learning_rate": 3.529411764705883e-05,
"loss": 0.2381,
"step": 840
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.6116746664047241,
"learning_rate": 3.511904761904762e-05,
"loss": 0.2403,
"step": 850
},
{
"epoch": 0.8928571428571429,
"eval_loss": 0.3206528425216675,
"eval_runtime": 13.5142,
"eval_samples_per_second": 35.518,
"eval_steps_per_second": 2.22,
"step": 850
},
{
"epoch": 0.9033613445378151,
"grad_norm": 0.49307090044021606,
"learning_rate": 3.4943977591036416e-05,
"loss": 0.2375,
"step": 860
},
{
"epoch": 0.9138655462184874,
"grad_norm": 0.6631060242652893,
"learning_rate": 3.476890756302521e-05,
"loss": 0.2416,
"step": 870
},
{
"epoch": 0.9243697478991597,
"grad_norm": 0.5449891090393066,
"learning_rate": 3.4593837535014004e-05,
"loss": 0.2614,
"step": 880
},
{
"epoch": 0.9348739495798319,
"grad_norm": 0.5289740562438965,
"learning_rate": 3.4418767507002805e-05,
"loss": 0.2388,
"step": 890
},
{
"epoch": 0.9453781512605042,
"grad_norm": 0.5789199471473694,
"learning_rate": 3.42436974789916e-05,
"loss": 0.2461,
"step": 900
},
{
"epoch": 0.9453781512605042,
"eval_loss": 0.32027536630630493,
"eval_runtime": 13.5157,
"eval_samples_per_second": 35.514,
"eval_steps_per_second": 2.22,
"step": 900
}
],
"logging_steps": 10,
"max_steps": 2856,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0789026231051878e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}