jfranklin-foundry's picture
Upload folder using huggingface_hub
160bf60 verified
{
"best_metric": 1.021238923072815,
"best_model_checkpoint": "outputs/checkpoint-174",
"epoch": 5.849462365591398,
"eval_steps": 500,
"global_step": 204,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.5734767025089605,
"grad_norm": 1.5929073095321655,
"learning_rate": 4e-05,
"loss": 2.8166,
"step": 20
},
{
"epoch": 0.974910394265233,
"eval_loss": 1.9175890684127808,
"eval_runtime": 11.59,
"eval_samples_per_second": 32.097,
"eval_steps_per_second": 4.055,
"step": 34
},
{
"epoch": 1.146953405017921,
"grad_norm": 0.6563653945922852,
"learning_rate": 8e-05,
"loss": 2.0599,
"step": 40
},
{
"epoch": 1.7204301075268817,
"grad_norm": 0.7280052304267883,
"learning_rate": 0.00012,
"loss": 1.7423,
"step": 60
},
{
"epoch": 1.978494623655914,
"eval_loss": 1.5290530920028687,
"eval_runtime": 11.5703,
"eval_samples_per_second": 32.151,
"eval_steps_per_second": 4.062,
"step": 69
},
{
"epoch": 2.293906810035842,
"grad_norm": 1.1366034746170044,
"learning_rate": 0.00016,
"loss": 1.5151,
"step": 80
},
{
"epoch": 2.867383512544803,
"grad_norm": 1.3949623107910156,
"learning_rate": 0.0002,
"loss": 1.2694,
"step": 100
},
{
"epoch": 2.982078853046595,
"eval_loss": 1.2222951650619507,
"eval_runtime": 11.5734,
"eval_samples_per_second": 32.143,
"eval_steps_per_second": 4.061,
"step": 104
},
{
"epoch": 3.4408602150537635,
"grad_norm": 1.5031639337539673,
"learning_rate": 0.00018229838658936564,
"loss": 1.0282,
"step": 120
},
{
"epoch": 3.985663082437276,
"eval_loss": 1.0663368701934814,
"eval_runtime": 11.5671,
"eval_samples_per_second": 32.16,
"eval_steps_per_second": 4.063,
"step": 139
},
{
"epoch": 4.014336917562724,
"grad_norm": 1.577106237411499,
"learning_rate": 0.00013546048870425356,
"loss": 0.9281,
"step": 140
},
{
"epoch": 4.587813620071684,
"grad_norm": 1.994376301765442,
"learning_rate": 7.606843357124426e-05,
"loss": 0.7084,
"step": 160
},
{
"epoch": 4.989247311827957,
"eval_loss": 1.021238923072815,
"eval_runtime": 11.604,
"eval_samples_per_second": 32.058,
"eval_steps_per_second": 4.05,
"step": 174
},
{
"epoch": 5.161290322580645,
"grad_norm": 1.5775189399719238,
"learning_rate": 2.514892518288988e-05,
"loss": 0.6572,
"step": 180
},
{
"epoch": 5.734767025089606,
"grad_norm": 1.6861282587051392,
"learning_rate": 7.291125901946027e-07,
"loss": 0.5642,
"step": 200
},
{
"epoch": 5.849462365591398,
"eval_loss": 1.0425939559936523,
"eval_runtime": 11.584,
"eval_samples_per_second": 32.113,
"eval_steps_per_second": 4.057,
"step": 204
}
],
"logging_steps": 20,
"max_steps": 204,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"total_flos": 1.073987375468544e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}