lora-grade-highschool-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
56a99b7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.028169014084507,
"eval_steps": 54,
"global_step": 432,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 2.8726260662078857,
"learning_rate": 1.267605633802817e-05,
"loss": 1.2091,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 1.1433706283569336,
"eval_runtime": 53.694,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 1.062,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 2.3356099128723145,
"learning_rate": 2.535211267605634e-05,
"loss": 1.0323,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 0.9531688690185547,
"eval_runtime": 53.7017,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 1.061,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.767425060272217,
"learning_rate": 3.802816901408451e-05,
"loss": 0.9487,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 0.9003196954727173,
"eval_runtime": 53.6993,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 1.061,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 2.3933067321777344,
"learning_rate": 4.992175273865415e-05,
"loss": 0.9267,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 0.8847324848175049,
"eval_runtime": 53.6972,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 1.062,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 1.8673744201660156,
"learning_rate": 4.85133020344288e-05,
"loss": 0.8364,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 0.8761005997657776,
"eval_runtime": 53.6965,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 1.062,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.8745174407958984,
"learning_rate": 4.710485133020345e-05,
"loss": 0.8535,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 0.8724045157432556,
"eval_runtime": 53.7012,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 1.061,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.5754685401916504,
"learning_rate": 4.569640062597809e-05,
"loss": 0.8884,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 0.8635247349739075,
"eval_runtime": 53.7138,
"eval_samples_per_second": 4.189,
"eval_steps_per_second": 1.061,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 3.002856492996216,
"learning_rate": 4.428794992175274e-05,
"loss": 0.8498,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 0.8616425395011902,
"eval_runtime": 53.7191,
"eval_samples_per_second": 4.188,
"eval_steps_per_second": 1.061,
"step": 432
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 3.10783405694976e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}