llama-momo-2.5 / checkpoint-100 /trainer_state.json
PhantHive's picture
Upload folder using huggingface_hub
5221ce0
raw
history blame
1.47 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2518891687657431,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 7.8137,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 2.5574,
"step": 20
},
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 2.3516,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 2.0521,
"step": 40
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 1.5531,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 2.0324,
"step": 60
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 1.704,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.0002,
"loss": 1.8253,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 2.1076,
"step": 90
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 1.8701,
"step": 100
}
],
"logging_steps": 10,
"max_steps": 100,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 1507829183594496.0,
"trial_name": null,
"trial_params": null
}