jfranklin-foundry's picture
Upload folder using huggingface_hub
f95b67e verified
{
"best_metric": 1.8080483675003052,
"best_model_checkpoint": "outputs/checkpoint-174",
"epoch": 4.989247311827957,
"eval_steps": 500,
"global_step": 174,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.5734767025089605,
"grad_norm": 0.8502072691917419,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.925,
"step": 20
},
{
"epoch": 0.974910394265233,
"eval_loss": 2.817981243133545,
"eval_runtime": 11.5812,
"eval_samples_per_second": 32.121,
"eval_steps_per_second": 4.058,
"step": 34
},
{
"epoch": 1.146953405017921,
"grad_norm": 0.916117787361145,
"learning_rate": 4.000000000000001e-06,
"loss": 2.8937,
"step": 40
},
{
"epoch": 1.7204301075268817,
"grad_norm": 1.073569416999817,
"learning_rate": 6e-06,
"loss": 2.8277,
"step": 60
},
{
"epoch": 1.978494623655914,
"eval_loss": 2.588735342025757,
"eval_runtime": 11.5877,
"eval_samples_per_second": 32.103,
"eval_steps_per_second": 4.056,
"step": 69
},
{
"epoch": 2.293906810035842,
"grad_norm": 1.3921942710876465,
"learning_rate": 8.000000000000001e-06,
"loss": 2.6548,
"step": 80
},
{
"epoch": 2.867383512544803,
"grad_norm": 1.2179991006851196,
"learning_rate": 1e-05,
"loss": 2.2871,
"step": 100
},
{
"epoch": 2.982078853046595,
"eval_loss": 1.9897719621658325,
"eval_runtime": 11.5904,
"eval_samples_per_second": 32.096,
"eval_steps_per_second": 4.055,
"step": 104
},
{
"epoch": 3.4408602150537635,
"grad_norm": 0.5500979423522949,
"learning_rate": 9.829629131445342e-06,
"loss": 2.0042,
"step": 120
},
{
"epoch": 3.985663082437276,
"eval_loss": 1.8679059743881226,
"eval_runtime": 11.5852,
"eval_samples_per_second": 32.11,
"eval_steps_per_second": 4.057,
"step": 139
},
{
"epoch": 4.014336917562724,
"grad_norm": 0.46987271308898926,
"learning_rate": 9.330127018922195e-06,
"loss": 1.9362,
"step": 140
},
{
"epoch": 4.587813620071684,
"grad_norm": 0.44649583101272583,
"learning_rate": 8.535533905932739e-06,
"loss": 1.891,
"step": 160
},
{
"epoch": 4.989247311827957,
"eval_loss": 1.8080483675003052,
"eval_runtime": 11.5897,
"eval_samples_per_second": 32.097,
"eval_steps_per_second": 4.055,
"step": 174
}
],
"logging_steps": 20,
"max_steps": 340,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 9223279682985984.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}