whisper-small-hi / trainer_state.json
elsayedissa's picture
End of training
f13be24 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7699115044247788,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.44,
"grad_norm": 9.63073444366455,
"learning_rate": 5e-06,
"loss": 0.7305,
"step": 25
},
{
"epoch": 0.88,
"grad_norm": 2.945356607437134,
"learning_rate": 1e-05,
"loss": 0.1912,
"step": 50
},
{
"epoch": 0.88,
"eval_loss": 3.5328922271728516,
"eval_runtime": 111.5535,
"eval_samples_per_second": 0.887,
"eval_steps_per_second": 0.063,
"eval_wer": 1.2171099928109275,
"step": 50
},
{
"epoch": 1.33,
"grad_norm": 2.5617167949676514,
"learning_rate": 5e-06,
"loss": 0.0998,
"step": 75
},
{
"epoch": 1.77,
"grad_norm": 1.9796466827392578,
"learning_rate": 0.0,
"loss": 0.0828,
"step": 100
},
{
"epoch": 1.77,
"eval_loss": 3.6638808250427246,
"eval_runtime": 106.6363,
"eval_samples_per_second": 0.928,
"eval_steps_per_second": 0.066,
"eval_wer": 1.216391085549964,
"step": 100
},
{
"epoch": 1.77,
"step": 100,
"total_flos": 9.23473281024e+17,
"train_loss": 0.2760725998878479,
"train_runtime": 1580.9218,
"train_samples_per_second": 2.024,
"train_steps_per_second": 0.063
}
],
"logging_steps": 25,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"total_flos": 9.23473281024e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}