jfranklin-foundry's picture
Upload folder using huggingface_hub
160bf60 verified
raw
history blame
9.55 kB
{
"best_metric": 1.0150929689407349,
"best_model_checkpoint": "outputs/checkpoint-370",
"epoch": 14.98515519568151,
"eval_steps": 500,
"global_step": 694,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4318488529014845,
"grad_norm": 0.8248411417007446,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.3247,
"step": 20
},
{
"epoch": 0.863697705802969,
"grad_norm": 0.49438855051994324,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.1902,
"step": 40
},
{
"epoch": 0.9932523616734144,
"eval_loss": 1.8555350303649902,
"eval_runtime": 11.6415,
"eval_samples_per_second": 31.955,
"eval_steps_per_second": 4.037,
"step": 46
},
{
"epoch": 1.2955465587044535,
"grad_norm": 0.47723057866096497,
"learning_rate": 4.8e-05,
"loss": 2.034,
"step": 60
},
{
"epoch": 1.7273954116059378,
"grad_norm": 0.593337893486023,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8608,
"step": 80
},
{
"epoch": 1.9865047233468287,
"eval_loss": 1.5587676763534546,
"eval_runtime": 11.6248,
"eval_samples_per_second": 32.001,
"eval_steps_per_second": 4.043,
"step": 92
},
{
"epoch": 2.1592442645074224,
"grad_norm": 0.8223298192024231,
"learning_rate": 8e-05,
"loss": 1.7855,
"step": 100
},
{
"epoch": 2.591093117408907,
"grad_norm": 0.817817747592926,
"learning_rate": 7.985111279768106e-05,
"loss": 1.6414,
"step": 120
},
{
"epoch": 2.979757085020243,
"eval_loss": 1.313427448272705,
"eval_runtime": 11.6243,
"eval_samples_per_second": 32.002,
"eval_steps_per_second": 4.043,
"step": 138
},
{
"epoch": 3.0229419703103915,
"grad_norm": 1.2319685220718384,
"learning_rate": 7.940555956067495e-05,
"loss": 1.5914,
"step": 140
},
{
"epoch": 3.454790823211876,
"grad_norm": 1.4404529333114624,
"learning_rate": 7.866665714772879e-05,
"loss": 1.4412,
"step": 160
},
{
"epoch": 3.8866396761133606,
"grad_norm": 1.4236242771148682,
"learning_rate": 7.763990621449507e-05,
"loss": 1.4051,
"step": 180
},
{
"epoch": 3.9946018893387314,
"eval_loss": 1.1534684896469116,
"eval_runtime": 11.6309,
"eval_samples_per_second": 31.984,
"eval_steps_per_second": 4.041,
"step": 185
},
{
"epoch": 4.318488529014845,
"grad_norm": 1.6882641315460205,
"learning_rate": 7.633295026467016e-05,
"loss": 1.2741,
"step": 200
},
{
"epoch": 4.75033738191633,
"grad_norm": 1.8958238363265991,
"learning_rate": 7.475551874900027e-05,
"loss": 1.2849,
"step": 220
},
{
"epoch": 4.987854251012146,
"eval_loss": 1.0627214908599854,
"eval_runtime": 11.6391,
"eval_samples_per_second": 31.961,
"eval_steps_per_second": 4.038,
"step": 231
},
{
"epoch": 5.182186234817814,
"grad_norm": 2.1620829105377197,
"learning_rate": 7.291935463574626e-05,
"loss": 1.2407,
"step": 240
},
{
"epoch": 5.614035087719298,
"grad_norm": 2.1012117862701416,
"learning_rate": 7.083812699179919e-05,
"loss": 1.1518,
"step": 260
},
{
"epoch": 5.98110661268556,
"eval_loss": 1.028646469116211,
"eval_runtime": 11.6328,
"eval_samples_per_second": 31.978,
"eval_steps_per_second": 4.04,
"step": 277
},
{
"epoch": 6.045883940620783,
"grad_norm": 1.7699904441833496,
"learning_rate": 6.852732922522385e-05,
"loss": 1.103,
"step": 280
},
{
"epoch": 6.477732793522267,
"grad_norm": 1.8991751670837402,
"learning_rate": 6.600416374674978e-05,
"loss": 1.0045,
"step": 300
},
{
"epoch": 6.909581646423752,
"grad_norm": 2.116734027862549,
"learning_rate": 6.328741390883084e-05,
"loss": 1.0787,
"step": 320
},
{
"epoch": 6.995951417004049,
"eval_loss": 1.0193792581558228,
"eval_runtime": 11.638,
"eval_samples_per_second": 31.964,
"eval_steps_per_second": 4.038,
"step": 324
},
{
"epoch": 7.341430499325236,
"grad_norm": 2.5775842666625977,
"learning_rate": 6.0397304175605444e-05,
"loss": 0.9657,
"step": 340
},
{
"epoch": 7.77327935222672,
"grad_norm": 2.1000559329986572,
"learning_rate": 5.735534956470233e-05,
"loss": 0.9986,
"step": 360
},
{
"epoch": 7.989203778677463,
"eval_loss": 1.0150929689407349,
"eval_runtime": 11.632,
"eval_samples_per_second": 31.981,
"eval_steps_per_second": 4.041,
"step": 370
},
{
"epoch": 8.205128205128204,
"grad_norm": 2.1909162998199463,
"learning_rate": 5.4184195481701425e-05,
"loss": 0.9297,
"step": 380
},
{
"epoch": 8.63697705802969,
"grad_norm": 2.5721118450164795,
"learning_rate": 5.0907449139579755e-05,
"loss": 0.9043,
"step": 400
},
{
"epoch": 8.982456140350877,
"eval_loss": 1.0347795486450195,
"eval_runtime": 11.6414,
"eval_samples_per_second": 31.955,
"eval_steps_per_second": 4.037,
"step": 416
},
{
"epoch": 9.068825910931174,
"grad_norm": 2.0946638584136963,
"learning_rate": 4.754950381811667e-05,
"loss": 0.9027,
"step": 420
},
{
"epoch": 9.50067476383266,
"grad_norm": 2.279616117477417,
"learning_rate": 4.41353572715348e-05,
"loss": 0.8271,
"step": 440
},
{
"epoch": 9.932523616734143,
"grad_norm": 2.285020351409912,
"learning_rate": 4.069042563621555e-05,
"loss": 0.8635,
"step": 460
},
{
"epoch": 9.997300944669366,
"eval_loss": 1.044842004776001,
"eval_runtime": 11.6375,
"eval_samples_per_second": 31.966,
"eval_steps_per_second": 4.039,
"step": 463
},
{
"epoch": 10.364372469635628,
"grad_norm": 2.5461602210998535,
"learning_rate": 3.7240354223827004e-05,
"loss": 0.7875,
"step": 480
},
{
"epoch": 10.796221322537113,
"grad_norm": 2.521639823913574,
"learning_rate": 3.381082660838875e-05,
"loss": 0.7705,
"step": 500
},
{
"epoch": 10.99055330634278,
"eval_loss": 1.092528223991394,
"eval_runtime": 11.637,
"eval_samples_per_second": 31.967,
"eval_steps_per_second": 4.039,
"step": 509
},
{
"epoch": 11.228070175438596,
"grad_norm": 2.474837303161621,
"learning_rate": 3.0427373428497704e-05,
"loss": 0.8147,
"step": 520
},
{
"epoch": 11.65991902834008,
"grad_norm": 2.7452375888824463,
"learning_rate": 2.7115182328060385e-05,
"loss": 0.7627,
"step": 540
},
{
"epoch": 11.983805668016194,
"eval_loss": 1.1271629333496094,
"eval_runtime": 11.6156,
"eval_samples_per_second": 32.026,
"eval_steps_per_second": 4.046,
"step": 555
},
{
"epoch": 12.091767881241566,
"grad_norm": 2.392009973526001,
"learning_rate": 2.3898910450401306e-05,
"loss": 0.7224,
"step": 560
},
{
"epoch": 12.523616734143049,
"grad_norm": 2.6347837448120117,
"learning_rate": 2.0802500881608557e-05,
"loss": 0.7138,
"step": 580
},
{
"epoch": 12.955465587044534,
"grad_norm": 2.9887828826904297,
"learning_rate": 1.7849004409578678e-05,
"loss": 0.6921,
"step": 600
},
{
"epoch": 12.998650472334683,
"eval_loss": 1.1484102010726929,
"eval_runtime": 11.6137,
"eval_samples_per_second": 32.031,
"eval_steps_per_second": 4.047,
"step": 602
},
{
"epoch": 13.387314439946019,
"grad_norm": 2.1394331455230713,
"learning_rate": 1.5060407925650662e-05,
"loss": 0.6668,
"step": 620
},
{
"epoch": 13.819163292847504,
"grad_norm": 2.4540863037109375,
"learning_rate": 1.2457470746268912e-05,
"loss": 0.7211,
"step": 640
},
{
"epoch": 13.991902834008098,
"eval_loss": 1.1996936798095703,
"eval_runtime": 11.6138,
"eval_samples_per_second": 32.031,
"eval_steps_per_second": 4.047,
"step": 648
},
{
"epoch": 14.251012145748987,
"grad_norm": 2.405277967453003,
"learning_rate": 1.0059570073155953e-05,
"loss": 0.6974,
"step": 660
},
{
"epoch": 14.682860998650472,
"grad_norm": 2.132652521133423,
"learning_rate": 7.884556742444704e-06,
"loss": 0.6592,
"step": 680
},
{
"epoch": 14.98515519568151,
"eval_loss": 1.2192020416259766,
"eval_runtime": 11.6048,
"eval_samples_per_second": 32.056,
"eval_steps_per_second": 4.05,
"step": 694
}
],
"logging_steps": 20,
"max_steps": 828,
"num_input_tokens_seen": 0,
"num_train_epochs": 18,
"save_steps": 500,
"total_flos": 6.173417896887091e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}