category_cleaning / trainer_state.json
kpalczewski-displate's picture
End of training
95294cf verified
raw
history blame
1.88 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.5,
"grad_norm": 37.25093460083008,
"learning_rate": 4.375e-05,
"loss": 2.2626,
"step": 5
},
{
"epoch": 5.0,
"grad_norm": 49.72227096557617,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.8291,
"step": 10
},
{
"epoch": 7.5,
"grad_norm": 16.81373405456543,
"learning_rate": 3.125e-05,
"loss": 0.8602,
"step": 15
},
{
"epoch": 10.0,
"grad_norm": 7.272302627563477,
"learning_rate": 2.5e-05,
"loss": 0.5567,
"step": 20
},
{
"epoch": 12.5,
"grad_norm": 9.128313064575195,
"learning_rate": 1.8750000000000002e-05,
"loss": 0.7576,
"step": 25
},
{
"epoch": 15.0,
"grad_norm": 14.296534538269043,
"learning_rate": 1.25e-05,
"loss": 0.4636,
"step": 30
},
{
"epoch": 17.5,
"grad_norm": 5.532127380371094,
"learning_rate": 6.25e-06,
"loss": 0.6616,
"step": 35
},
{
"epoch": 20.0,
"grad_norm": 0.8484093546867371,
"learning_rate": 0.0,
"loss": 0.3679,
"step": 40
},
{
"epoch": 20.0,
"step": 40,
"total_flos": 87250496949000.0,
"train_loss": 0.8449213564395904,
"train_runtime": 26.3276,
"train_samples_per_second": 56.974,
"train_steps_per_second": 1.519
}
],
"logging_steps": 5,
"max_steps": 40,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 87250496949000.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}