gpt4-x-vicuna-13B-GPTQ / trainer_state.json
TheBloke's picture
Upload folder using huggingface_hub
b926638
raw
history blame
1.46 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.9973568281938325,
"global_step": 3545,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.7,
"learning_rate": 8.85980221058755e-06,
"loss": 0.7246,
"step": 500
},
{
"epoch": 1.41,
"learning_rate": 7.405468295520653e-06,
"loss": 0.5968,
"step": 1000
},
{
"epoch": 2.11,
"learning_rate": 5.9511343804537524e-06,
"loss": 0.5052,
"step": 1500
},
{
"epoch": 2.82,
"learning_rate": 4.496800465386853e-06,
"loss": 0.3611,
"step": 2000
},
{
"epoch": 3.52,
"learning_rate": 3.0424665503199537e-06,
"loss": 0.2573,
"step": 2500
},
{
"epoch": 4.23,
"learning_rate": 1.5881326352530543e-06,
"loss": 0.1943,
"step": 3000
},
{
"epoch": 4.93,
"learning_rate": 1.3379872018615477e-07,
"loss": 0.1354,
"step": 3500
},
{
"epoch": 5.0,
"step": 3545,
"total_flos": 897107395346432.0,
"train_loss": 0.3929895871785195,
"train_runtime": 101143.6052,
"train_samples_per_second": 8.977,
"train_steps_per_second": 0.035
}
],
"max_steps": 3545,
"num_train_epochs": 5,
"total_flos": 897107395346432.0,
"trial_name": null,
"trial_params": null
}