jfranklin-foundry's picture
Upload folder using huggingface_hub
1794980 verified
raw
history blame contribute delete
No virus
10.8 kB
{
"best_metric": 1.2095868587493896,
"best_model_checkpoint": "outputs/checkpoint-782",
"epoch": 16.885290148448043,
"eval_steps": 500,
"global_step": 782,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4318488529014845,
"grad_norm": 1.8696340322494507,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.0163,
"step": 20
},
{
"epoch": 0.863697705802969,
"grad_norm": 0.5766105651855469,
"learning_rate": 4.000000000000001e-06,
"loss": 1.9853,
"step": 40
},
{
"epoch": 0.9932523616734144,
"eval_loss": 1.8159902095794678,
"eval_runtime": 12.9003,
"eval_samples_per_second": 28.837,
"eval_steps_per_second": 3.643,
"step": 46
},
{
"epoch": 1.2955465587044535,
"grad_norm": 0.4374605715274811,
"learning_rate": 6e-06,
"loss": 1.9778,
"step": 60
},
{
"epoch": 1.7273954116059378,
"grad_norm": 0.46712979674339294,
"learning_rate": 8.000000000000001e-06,
"loss": 1.8764,
"step": 80
},
{
"epoch": 1.9865047233468287,
"eval_loss": 1.6934317350387573,
"eval_runtime": 12.8941,
"eval_samples_per_second": 28.85,
"eval_steps_per_second": 3.645,
"step": 92
},
{
"epoch": 2.1592442645074224,
"grad_norm": 0.49692320823669434,
"learning_rate": 1e-05,
"loss": 1.86,
"step": 100
},
{
"epoch": 2.591093117408907,
"grad_norm": 0.46815183758735657,
"learning_rate": 9.978795707151492e-06,
"loss": 1.7959,
"step": 120
},
{
"epoch": 2.979757085020243,
"eval_loss": 1.5927985906600952,
"eval_runtime": 12.8879,
"eval_samples_per_second": 28.864,
"eval_steps_per_second": 3.647,
"step": 138
},
{
"epoch": 3.0229419703103915,
"grad_norm": 0.5940990447998047,
"learning_rate": 9.915362677420045e-06,
"loss": 1.8186,
"step": 140
},
{
"epoch": 3.454790823211876,
"grad_norm": 0.5652334690093994,
"learning_rate": 9.810238931821139e-06,
"loss": 1.7388,
"step": 160
},
{
"epoch": 3.8866396761133606,
"grad_norm": 0.5389769077301025,
"learning_rate": 9.664316100229578e-06,
"loss": 1.7176,
"step": 180
},
{
"epoch": 3.9946018893387314,
"eval_loss": 1.5169272422790527,
"eval_runtime": 12.8974,
"eval_samples_per_second": 28.843,
"eval_steps_per_second": 3.644,
"step": 185
},
{
"epoch": 4.318488529014845,
"grad_norm": 0.6361340880393982,
"learning_rate": 9.478831858827105e-06,
"loss": 1.6774,
"step": 200
},
{
"epoch": 4.75033738191633,
"grad_norm": 0.766372561454773,
"learning_rate": 9.255359432483106e-06,
"loss": 1.6827,
"step": 220
},
{
"epoch": 4.987854251012146,
"eval_loss": 1.4580748081207275,
"eval_runtime": 12.9055,
"eval_samples_per_second": 28.825,
"eval_steps_per_second": 3.642,
"step": 231
},
{
"epoch": 5.182186234817814,
"grad_norm": 0.6636700630187988,
"learning_rate": 8.995794251106295e-06,
"loss": 1.6551,
"step": 240
},
{
"epoch": 5.614035087719298,
"grad_norm": 0.7402545809745789,
"learning_rate": 8.702337873144343e-06,
"loss": 1.6079,
"step": 260
},
{
"epoch": 5.98110661268556,
"eval_loss": 1.4053566455841064,
"eval_runtime": 12.8973,
"eval_samples_per_second": 28.843,
"eval_steps_per_second": 3.644,
"step": 277
},
{
"epoch": 6.045883940620783,
"grad_norm": 0.7764894366264343,
"learning_rate": 8.37747931258788e-06,
"loss": 1.6001,
"step": 280
},
{
"epoch": 6.477732793522267,
"grad_norm": 0.8082602024078369,
"learning_rate": 8.023973927857857e-06,
"loss": 1.5414,
"step": 300
},
{
"epoch": 6.909581646423752,
"grad_norm": 0.8577279448509216,
"learning_rate": 7.644820051634813e-06,
"loss": 1.5751,
"step": 320
},
{
"epoch": 6.995951417004049,
"eval_loss": 1.3543951511383057,
"eval_runtime": 12.9006,
"eval_samples_per_second": 28.836,
"eval_steps_per_second": 3.643,
"step": 324
},
{
"epoch": 7.341430499325236,
"grad_norm": 1.0512497425079346,
"learning_rate": 7.243233559849179e-06,
"loss": 1.5131,
"step": 340
},
{
"epoch": 7.77327935222672,
"grad_norm": 0.8688454627990723,
"learning_rate": 6.822620595531286e-06,
"loss": 1.5286,
"step": 360
},
{
"epoch": 7.989203778677463,
"eval_loss": 1.3146252632141113,
"eval_runtime": 12.9044,
"eval_samples_per_second": 28.827,
"eval_steps_per_second": 3.642,
"step": 370
},
{
"epoch": 8.205128205128204,
"grad_norm": 0.9062187671661377,
"learning_rate": 6.386548678869644e-06,
"loss": 1.4984,
"step": 380
},
{
"epoch": 8.63697705802969,
"grad_norm": 1.1049658060073853,
"learning_rate": 5.938716448513819e-06,
"loss": 1.4733,
"step": 400
},
{
"epoch": 8.982456140350877,
"eval_loss": 1.2819702625274658,
"eval_runtime": 12.9073,
"eval_samples_per_second": 28.821,
"eval_steps_per_second": 3.641,
"step": 416
},
{
"epoch": 9.068825910931174,
"grad_norm": 0.8910373449325562,
"learning_rate": 5.4829222907675895e-06,
"loss": 1.4715,
"step": 420
},
{
"epoch": 9.50067476383266,
"grad_norm": 1.2289608716964722,
"learning_rate": 5.0230321227507595e-06,
"loss": 1.4326,
"step": 440
},
{
"epoch": 9.932523616734143,
"grad_norm": 1.0093016624450684,
"learning_rate": 4.562946602783637e-06,
"loss": 1.4576,
"step": 460
},
{
"epoch": 9.997300944669366,
"eval_loss": 1.2573164701461792,
"eval_runtime": 12.8922,
"eval_samples_per_second": 28.855,
"eval_steps_per_second": 3.646,
"step": 463
},
{
"epoch": 10.364372469635628,
"grad_norm": 1.175689697265625,
"learning_rate": 4.10656804610652e-06,
"loss": 1.4222,
"step": 480
},
{
"epoch": 10.796221322537113,
"grad_norm": 1.1676870584487915,
"learning_rate": 3.6577673265456296e-06,
"loss": 1.3863,
"step": 500
},
{
"epoch": 10.99055330634278,
"eval_loss": 1.2392951250076294,
"eval_runtime": 12.9835,
"eval_samples_per_second": 28.652,
"eval_steps_per_second": 3.62,
"step": 509
},
{
"epoch": 11.228070175438596,
"grad_norm": 1.2346034049987793,
"learning_rate": 3.220351044856247e-06,
"loss": 1.4357,
"step": 520
},
{
"epoch": 11.65991902834008,
"grad_norm": 1.4561930894851685,
"learning_rate": 2.7980292422118282e-06,
"loss": 1.414,
"step": 540
},
{
"epoch": 11.983805668016194,
"eval_loss": 1.226759672164917,
"eval_runtime": 12.9423,
"eval_samples_per_second": 28.743,
"eval_steps_per_second": 3.631,
"step": 555
},
{
"epoch": 12.091767881241566,
"grad_norm": 1.2641685009002686,
"learning_rate": 2.3943839326842096e-06,
"loss": 1.3907,
"step": 560
},
{
"epoch": 12.523616734143049,
"grad_norm": 0.9569095373153687,
"learning_rate": 2.012838721613447e-06,
"loss": 1.3752,
"step": 580
},
{
"epoch": 12.955465587044534,
"grad_norm": 1.5854405164718628,
"learning_rate": 1.6566297675557392e-06,
"loss": 1.3707,
"step": 600
},
{
"epoch": 12.998650472334683,
"eval_loss": 1.2172602415084839,
"eval_runtime": 12.9465,
"eval_samples_per_second": 28.734,
"eval_steps_per_second": 3.63,
"step": 602
},
{
"epoch": 13.387314439946019,
"grad_norm": 1.2519913911819458,
"learning_rate": 1.3287783341019278e-06,
"loss": 1.3512,
"step": 620
},
{
"epoch": 13.819163292847504,
"grad_norm": 1.2418749332427979,
"learning_rate": 1.0320651643743128e-06,
"loss": 1.4134,
"step": 640
},
{
"epoch": 13.991902834008098,
"eval_loss": 1.2128875255584717,
"eval_runtime": 12.9446,
"eval_samples_per_second": 28.738,
"eval_steps_per_second": 3.631,
"step": 648
},
{
"epoch": 14.251012145748987,
"grad_norm": 1.0899240970611572,
"learning_rate": 7.690068955500623e-07,
"loss": 1.3818,
"step": 660
},
{
"epoch": 14.682860998650472,
"grad_norm": 1.3283452987670898,
"learning_rate": 5.418347134565249e-07,
"loss": 1.3678,
"step": 680
},
{
"epoch": 14.98515519568151,
"eval_loss": 1.2105530500411987,
"eval_runtime": 12.9404,
"eval_samples_per_second": 28.747,
"eval_steps_per_second": 3.632,
"step": 694
},
{
"epoch": 15.114709851551957,
"grad_norm": 1.2848026752471924,
"learning_rate": 3.524754282841575e-07,
"loss": 1.3814,
"step": 700
},
{
"epoch": 15.54655870445344,
"grad_norm": 1.1922675371170044,
"learning_rate": 2.0253513192751374e-07,
"loss": 1.3559,
"step": 720
},
{
"epoch": 15.978407557354926,
"grad_norm": 1.3479186296463013,
"learning_rate": 9.32855755680867e-08,
"loss": 1.4042,
"step": 740
},
{
"epoch": 16.0,
"eval_loss": 1.2096946239471436,
"eval_runtime": 12.8982,
"eval_samples_per_second": 28.841,
"eval_steps_per_second": 3.644,
"step": 741
},
{
"epoch": 16.41025641025641,
"grad_norm": 1.2240372896194458,
"learning_rate": 2.5653383040524228e-08,
"loss": 1.3493,
"step": 760
},
{
"epoch": 16.842105263157894,
"grad_norm": 1.1864194869995117,
"learning_rate": 2.1219147136264383e-10,
"loss": 1.3851,
"step": 780
},
{
"epoch": 16.885290148448043,
"eval_loss": 1.2095868587493896,
"eval_runtime": 12.9772,
"eval_samples_per_second": 28.666,
"eval_steps_per_second": 3.622,
"step": 782
}
],
"logging_steps": 20,
"max_steps": 782,
"num_input_tokens_seen": 0,
"num_train_epochs": 17,
"save_steps": 500,
"total_flos": 6.028322251419648e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}