ckpts / trainer_state.json
Gizachew's picture
End of training
ec7e849 verified
raw
history blame
3.15 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.98989898989899,
"eval_steps": 500,
"global_step": 1235,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4,
"grad_norm": 0.006182590499520302,
"learning_rate": 9.206477732793523e-06,
"loss": 0.0729,
"step": 100
},
{
"epoch": 0.81,
"grad_norm": 21.23886489868164,
"learning_rate": 8.39676113360324e-06,
"loss": 0.0698,
"step": 200
},
{
"epoch": 1.21,
"grad_norm": 0.006576939485967159,
"learning_rate": 7.587044534412956e-06,
"loss": 0.0464,
"step": 300
},
{
"epoch": 1.62,
"grad_norm": 0.009822617284953594,
"learning_rate": 6.785425101214575e-06,
"loss": 0.0689,
"step": 400
},
{
"epoch": 2.02,
"grad_norm": 0.00560363894328475,
"learning_rate": 5.975708502024292e-06,
"loss": 0.0228,
"step": 500
},
{
"epoch": 2.02,
"eval_accuracy": 0.973737359046936,
"eval_loss": 0.17728736996650696,
"eval_runtime": 23.1344,
"eval_samples_per_second": 21.397,
"eval_steps_per_second": 5.36,
"step": 500
},
{
"epoch": 2.42,
"grad_norm": 0.010259171947836876,
"learning_rate": 5.165991902834009e-06,
"loss": 0.0253,
"step": 600
},
{
"epoch": 2.83,
"grad_norm": 0.5489621758460999,
"learning_rate": 4.356275303643725e-06,
"loss": 0.0227,
"step": 700
},
{
"epoch": 3.23,
"grad_norm": 0.003071287414059043,
"learning_rate": 3.5465587044534415e-06,
"loss": 0.0398,
"step": 800
},
{
"epoch": 3.64,
"grad_norm": 0.0034055381547659636,
"learning_rate": 2.7368421052631583e-06,
"loss": 0.0299,
"step": 900
},
{
"epoch": 4.04,
"grad_norm": 0.005517472513020039,
"learning_rate": 1.9271255060728746e-06,
"loss": 0.0385,
"step": 1000
},
{
"epoch": 4.04,
"eval_accuracy": 0.9696969985961914,
"eval_loss": 0.190963014960289,
"eval_runtime": 23.4329,
"eval_samples_per_second": 21.124,
"eval_steps_per_second": 5.292,
"step": 1000
},
{
"epoch": 4.44,
"grad_norm": 19.359519958496094,
"learning_rate": 1.1174089068825912e-06,
"loss": 0.0365,
"step": 1100
},
{
"epoch": 4.85,
"grad_norm": 1.323317527770996,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.0064,
"step": 1200
},
{
"epoch": 4.99,
"step": 1235,
"total_flos": 3.160649695892544e+17,
"train_loss": 0.03992950008948322,
"train_runtime": 666.1547,
"train_samples_per_second": 14.854,
"train_steps_per_second": 1.854
}
],
"logging_steps": 100,
"max_steps": 1235,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 3.160649695892544e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}