winnieyangwannan's picture
Training in progress, step 300, checkpoint
ca45b70 verified
raw
history blame
7.27 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.31512605042016806,
"eval_steps": 50,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01050420168067227,
"grad_norm": 2.691126585006714,
"learning_rate": 4.98249299719888e-05,
"loss": 1.6756,
"step": 10
},
{
"epoch": 0.02100840336134454,
"grad_norm": 1.9124071598052979,
"learning_rate": 4.96498599439776e-05,
"loss": 0.5155,
"step": 20
},
{
"epoch": 0.031512605042016806,
"grad_norm": 1.1180588006973267,
"learning_rate": 4.947478991596639e-05,
"loss": 0.4521,
"step": 30
},
{
"epoch": 0.04201680672268908,
"grad_norm": 0.7634907364845276,
"learning_rate": 4.9299719887955186e-05,
"loss": 0.3732,
"step": 40
},
{
"epoch": 0.052521008403361345,
"grad_norm": 0.6673978567123413,
"learning_rate": 4.912464985994398e-05,
"loss": 0.3832,
"step": 50
},
{
"epoch": 0.052521008403361345,
"eval_loss": 0.424630731344223,
"eval_runtime": 13.4948,
"eval_samples_per_second": 35.569,
"eval_steps_per_second": 2.223,
"step": 50
},
{
"epoch": 0.06302521008403361,
"grad_norm": 0.6445392370223999,
"learning_rate": 4.8949579831932775e-05,
"loss": 0.3386,
"step": 60
},
{
"epoch": 0.07352941176470588,
"grad_norm": 0.5743226408958435,
"learning_rate": 4.877450980392157e-05,
"loss": 0.3443,
"step": 70
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.6735454201698303,
"learning_rate": 4.859943977591036e-05,
"loss": 0.3212,
"step": 80
},
{
"epoch": 0.09453781512605042,
"grad_norm": 0.660349428653717,
"learning_rate": 4.8424369747899164e-05,
"loss": 0.3379,
"step": 90
},
{
"epoch": 0.10504201680672269,
"grad_norm": 0.8036162853240967,
"learning_rate": 4.824929971988796e-05,
"loss": 0.3177,
"step": 100
},
{
"epoch": 0.10504201680672269,
"eval_loss": 0.3798685073852539,
"eval_runtime": 13.5369,
"eval_samples_per_second": 35.459,
"eval_steps_per_second": 2.216,
"step": 100
},
{
"epoch": 0.11554621848739496,
"grad_norm": 0.6827962398529053,
"learning_rate": 4.807422969187675e-05,
"loss": 0.3405,
"step": 110
},
{
"epoch": 0.12605042016806722,
"grad_norm": 0.5923526287078857,
"learning_rate": 4.7899159663865554e-05,
"loss": 0.3324,
"step": 120
},
{
"epoch": 0.13655462184873948,
"grad_norm": 0.6476097106933594,
"learning_rate": 4.772408963585435e-05,
"loss": 0.3216,
"step": 130
},
{
"epoch": 0.14705882352941177,
"grad_norm": 0.5536680221557617,
"learning_rate": 4.7549019607843135e-05,
"loss": 0.3425,
"step": 140
},
{
"epoch": 0.15756302521008403,
"grad_norm": 0.5725419521331787,
"learning_rate": 4.7373949579831936e-05,
"loss": 0.3062,
"step": 150
},
{
"epoch": 0.15756302521008403,
"eval_loss": 0.36464938521385193,
"eval_runtime": 13.5257,
"eval_samples_per_second": 35.488,
"eval_steps_per_second": 2.218,
"step": 150
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.6459401249885559,
"learning_rate": 4.719887955182073e-05,
"loss": 0.327,
"step": 160
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.7749695181846619,
"learning_rate": 4.7023809523809525e-05,
"loss": 0.3146,
"step": 170
},
{
"epoch": 0.18907563025210083,
"grad_norm": 0.7452580332756042,
"learning_rate": 4.684873949579832e-05,
"loss": 0.3024,
"step": 180
},
{
"epoch": 0.19957983193277312,
"grad_norm": 0.632286012172699,
"learning_rate": 4.667366946778712e-05,
"loss": 0.3157,
"step": 190
},
{
"epoch": 0.21008403361344538,
"grad_norm": 0.6443158984184265,
"learning_rate": 4.6498599439775914e-05,
"loss": 0.3039,
"step": 200
},
{
"epoch": 0.21008403361344538,
"eval_loss": 0.35029926896095276,
"eval_runtime": 13.5332,
"eval_samples_per_second": 35.468,
"eval_steps_per_second": 2.217,
"step": 200
},
{
"epoch": 0.22058823529411764,
"grad_norm": 0.6045960783958435,
"learning_rate": 4.632352941176471e-05,
"loss": 0.313,
"step": 210
},
{
"epoch": 0.23109243697478993,
"grad_norm": 0.5544140338897705,
"learning_rate": 4.61484593837535e-05,
"loss": 0.2992,
"step": 220
},
{
"epoch": 0.2415966386554622,
"grad_norm": 0.45881810784339905,
"learning_rate": 4.59733893557423e-05,
"loss": 0.2889,
"step": 230
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.6315861344337463,
"learning_rate": 4.579831932773109e-05,
"loss": 0.3055,
"step": 240
},
{
"epoch": 0.26260504201680673,
"grad_norm": 0.7058202624320984,
"learning_rate": 4.562324929971989e-05,
"loss": 0.2897,
"step": 250
},
{
"epoch": 0.26260504201680673,
"eval_loss": 0.3452460467815399,
"eval_runtime": 13.5256,
"eval_samples_per_second": 35.488,
"eval_steps_per_second": 2.218,
"step": 250
},
{
"epoch": 0.27310924369747897,
"grad_norm": 0.7277679443359375,
"learning_rate": 4.5448179271708687e-05,
"loss": 0.3119,
"step": 260
},
{
"epoch": 0.28361344537815125,
"grad_norm": 0.6311895251274109,
"learning_rate": 4.527310924369748e-05,
"loss": 0.296,
"step": 270
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.5600409507751465,
"learning_rate": 4.5098039215686275e-05,
"loss": 0.3027,
"step": 280
},
{
"epoch": 0.30462184873949577,
"grad_norm": 0.583111047744751,
"learning_rate": 4.4922969187675076e-05,
"loss": 0.3005,
"step": 290
},
{
"epoch": 0.31512605042016806,
"grad_norm": 0.6667417287826538,
"learning_rate": 4.474789915966387e-05,
"loss": 0.2924,
"step": 300
},
{
"epoch": 0.31512605042016806,
"eval_loss": 0.34308406710624695,
"eval_runtime": 13.5267,
"eval_samples_per_second": 35.485,
"eval_steps_per_second": 2.218,
"step": 300
}
],
"logging_steps": 10,
"max_steps": 2856,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.591343031844864e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}