jfranklin-foundry's picture
Upload folder using huggingface_hub
f95b67e verified
raw
history blame contribute delete
No virus
9.51 kB
{
"best_metric": 1.035351276397705,
"best_model_checkpoint": "outputs/checkpoint-463",
"epoch": 14.898785425101215,
"eval_steps": 500,
"global_step": 690,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4318488529014845,
"grad_norm": 0.8678001165390015,
"learning_rate": 1e-05,
"loss": 2.3338,
"step": 20
},
{
"epoch": 0.863697705802969,
"grad_norm": 0.6083477139472961,
"learning_rate": 2e-05,
"loss": 2.2493,
"step": 40
},
{
"epoch": 0.9932523616734144,
"eval_loss": 1.9349104166030884,
"eval_runtime": 11.681,
"eval_samples_per_second": 31.847,
"eval_steps_per_second": 4.024,
"step": 46
},
{
"epoch": 1.2955465587044535,
"grad_norm": 0.508277416229248,
"learning_rate": 3e-05,
"loss": 2.1059,
"step": 60
},
{
"epoch": 1.7273954116059378,
"grad_norm": 0.5420729517936707,
"learning_rate": 4e-05,
"loss": 1.9279,
"step": 80
},
{
"epoch": 1.9865047233468287,
"eval_loss": 1.6580256223678589,
"eval_runtime": 11.7033,
"eval_samples_per_second": 31.786,
"eval_steps_per_second": 4.016,
"step": 92
},
{
"epoch": 2.1592442645074224,
"grad_norm": 0.6726937294006348,
"learning_rate": 5e-05,
"loss": 1.8699,
"step": 100
},
{
"epoch": 2.591093117408907,
"grad_norm": 0.6740496158599854,
"learning_rate": 4.985837000525343e-05,
"loss": 1.7469,
"step": 120
},
{
"epoch": 2.979757085020243,
"eval_loss": 1.4485936164855957,
"eval_runtime": 11.6969,
"eval_samples_per_second": 31.803,
"eval_steps_per_second": 4.018,
"step": 138
},
{
"epoch": 3.0229419703103915,
"grad_norm": 1.0841119289398193,
"learning_rate": 4.9435084745446666e-05,
"loss": 1.7191,
"step": 140
},
{
"epoch": 3.454790823211876,
"grad_norm": 1.1675149202346802,
"learning_rate": 4.873494021170953e-05,
"loss": 1.5895,
"step": 160
},
{
"epoch": 3.8866396761133606,
"grad_norm": 1.175584077835083,
"learning_rate": 4.7765869321372836e-05,
"loss": 1.5507,
"step": 180
},
{
"epoch": 3.9946018893387314,
"eval_loss": 1.2826032638549805,
"eval_runtime": 11.7024,
"eval_samples_per_second": 31.788,
"eval_steps_per_second": 4.016,
"step": 185
},
{
"epoch": 4.318488529014845,
"grad_norm": 1.4653639793395996,
"learning_rate": 4.653885203484515e-05,
"loss": 1.4345,
"step": 200
},
{
"epoch": 4.75033738191633,
"grad_norm": 1.7988545894622803,
"learning_rate": 4.5067790948274094e-05,
"loss": 1.4308,
"step": 220
},
{
"epoch": 4.987854251012146,
"eval_loss": 1.1710984706878662,
"eval_runtime": 11.7104,
"eval_samples_per_second": 31.767,
"eval_steps_per_second": 4.014,
"step": 231
},
{
"epoch": 5.182186234817814,
"grad_norm": 1.7637828588485718,
"learning_rate": 4.336935377157668e-05,
"loss": 1.3828,
"step": 240
},
{
"epoch": 5.614035087719298,
"grad_norm": 1.7401658296585083,
"learning_rate": 4.146278447662597e-05,
"loss": 1.2948,
"step": 260
},
{
"epoch": 5.98110661268556,
"eval_loss": 1.1053041219711304,
"eval_runtime": 11.7036,
"eval_samples_per_second": 31.785,
"eval_steps_per_second": 4.016,
"step": 277
},
{
"epoch": 6.045883940620783,
"grad_norm": 1.7593504190444946,
"learning_rate": 3.9369685255360175e-05,
"loss": 1.2478,
"step": 280
},
{
"epoch": 6.477732793522267,
"grad_norm": 1.7287532091140747,
"learning_rate": 3.711377175831626e-05,
"loss": 1.1565,
"step": 300
},
{
"epoch": 6.909581646423752,
"grad_norm": 1.9551547765731812,
"learning_rate": 3.472060438683302e-05,
"loss": 1.2179,
"step": 320
},
{
"epoch": 6.995951417004049,
"eval_loss": 1.072460651397705,
"eval_runtime": 11.7026,
"eval_samples_per_second": 31.788,
"eval_steps_per_second": 4.016,
"step": 324
},
{
"epoch": 7.341430499325236,
"grad_norm": 2.535532236099243,
"learning_rate": 3.2217298683490525e-05,
"loss": 1.1197,
"step": 340
},
{
"epoch": 7.77327935222672,
"grad_norm": 2.150266170501709,
"learning_rate": 2.9632218102177862e-05,
"loss": 1.1465,
"step": 360
},
{
"epoch": 7.989203778677463,
"eval_loss": 1.0444090366363525,
"eval_runtime": 11.6993,
"eval_samples_per_second": 31.797,
"eval_steps_per_second": 4.017,
"step": 370
},
{
"epoch": 8.205128205128204,
"grad_norm": 2.408799409866333,
"learning_rate": 2.6994652638827078e-05,
"loss": 1.0879,
"step": 380
},
{
"epoch": 8.63697705802969,
"grad_norm": 2.7303247451782227,
"learning_rate": 2.433448696405563e-05,
"loss": 1.0654,
"step": 400
},
{
"epoch": 8.982456140350877,
"eval_loss": 1.0364046096801758,
"eval_runtime": 11.6996,
"eval_samples_per_second": 31.796,
"eval_steps_per_second": 4.017,
"step": 416
},
{
"epoch": 9.068825910931174,
"grad_norm": 2.2821261882781982,
"learning_rate": 2.1681861817906954e-05,
"loss": 1.0641,
"step": 420
},
{
"epoch": 9.50067476383266,
"grad_norm": 2.2011351585388184,
"learning_rate": 1.9066832503222128e-05,
"loss": 1.0035,
"step": 440
},
{
"epoch": 9.932523616734143,
"grad_norm": 2.181905508041382,
"learning_rate": 1.651902834704924e-05,
"loss": 1.036,
"step": 460
},
{
"epoch": 9.997300944669366,
"eval_loss": 1.035351276397705,
"eval_runtime": 11.6997,
"eval_samples_per_second": 31.796,
"eval_steps_per_second": 4.017,
"step": 463
},
{
"epoch": 10.364372469635628,
"grad_norm": 2.7207131385803223,
"learning_rate": 1.4067316988528617e-05,
"loss": 0.9789,
"step": 480
},
{
"epoch": 10.796221322537113,
"grad_norm": 2.6506447792053223,
"learning_rate": 1.173947729700644e-05,
"loss": 0.9563,
"step": 500
},
{
"epoch": 10.99055330634278,
"eval_loss": 1.0434376001358032,
"eval_runtime": 11.6958,
"eval_samples_per_second": 31.806,
"eval_steps_per_second": 4.019,
"step": 509
},
{
"epoch": 11.228070175438596,
"grad_norm": 2.4060709476470947,
"learning_rate": 9.561884626345205e-06,
"loss": 1.0145,
"step": 520
},
{
"epoch": 11.65991902834008,
"grad_norm": 2.9326512813568115,
"learning_rate": 7.5592119716253855e-06,
"loss": 0.9698,
"step": 540
},
{
"epoch": 11.983805668016194,
"eval_loss": 1.0567598342895508,
"eval_runtime": 11.7029,
"eval_samples_per_second": 31.787,
"eval_steps_per_second": 4.016,
"step": 555
},
{
"epoch": 12.091767881241566,
"grad_norm": 2.6050772666931152,
"learning_rate": 5.75415041425234e-06,
"loss": 0.9381,
"step": 560
},
{
"epoch": 12.523616734143049,
"grad_norm": 2.099339008331299,
"learning_rate": 4.167152022937124e-06,
"loss": 0.9417,
"step": 580
},
{
"epoch": 12.955465587044534,
"grad_norm": 3.280305862426758,
"learning_rate": 2.8161981235857143e-06,
"loss": 0.9234,
"step": 600
},
{
"epoch": 12.998650472334683,
"eval_loss": 1.0533791780471802,
"eval_runtime": 11.6821,
"eval_samples_per_second": 31.844,
"eval_steps_per_second": 4.023,
"step": 602
},
{
"epoch": 13.387314439946019,
"grad_norm": 2.247828483581543,
"learning_rate": 1.7165955636915392e-06,
"loss": 0.9119,
"step": 620
},
{
"epoch": 13.819163292847504,
"grad_norm": 2.479389190673828,
"learning_rate": 8.808032796371019e-07,
"loss": 0.9713,
"step": 640
},
{
"epoch": 13.991902834008098,
"eval_loss": 1.059370517730713,
"eval_runtime": 11.7334,
"eval_samples_per_second": 31.704,
"eval_steps_per_second": 4.006,
"step": 648
},
{
"epoch": 14.251012145748987,
"grad_norm": 2.459200382232666,
"learning_rate": 3.1829113196638614e-07,
"loss": 0.9512,
"step": 660
},
{
"epoch": 14.682860998650472,
"grad_norm": 2.204571485519409,
"learning_rate": 3.543260808095139e-08,
"loss": 0.9197,
"step": 680
},
{
"epoch": 14.898785425101215,
"eval_loss": 1.0609511137008667,
"eval_runtime": 11.7362,
"eval_samples_per_second": 31.697,
"eval_steps_per_second": 4.005,
"step": 690
}
],
"logging_steps": 20,
"max_steps": 690,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"total_flos": 6.133360120720589e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}