|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 5400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 4.924999999999999e-05, |
|
"loss": 11.9966, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 9.924999999999999e-05, |
|
"loss": 3.4607, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"eval_loss": 2.7746095657348633, |
|
"eval_runtime": 13.5943, |
|
"eval_samples_per_second": 26.482, |
|
"eval_steps_per_second": 3.31, |
|
"eval_wer": 1.0415523324186593, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 0.00014925, |
|
"loss": 2.4077, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.81, |
|
"learning_rate": 0.00019925, |
|
"loss": 0.5977, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 18.52, |
|
"learning_rate": 0.00024924999999999996, |
|
"loss": 0.3442, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 18.52, |
|
"eval_loss": 0.9114237427711487, |
|
"eval_runtime": 13.1778, |
|
"eval_samples_per_second": 27.319, |
|
"eval_steps_per_second": 3.415, |
|
"eval_wer": 0.591140729125833, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"learning_rate": 0.00029925, |
|
"loss": 0.28, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 25.93, |
|
"learning_rate": 0.0002859285714285714, |
|
"loss": 0.2213, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"eval_loss": 0.9686752557754517, |
|
"eval_runtime": 12.9893, |
|
"eval_samples_per_second": 27.715, |
|
"eval_steps_per_second": 3.464, |
|
"eval_wer": 0.5750686005488044, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 29.63, |
|
"learning_rate": 0.0002716428571428571, |
|
"loss": 0.1746, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 0.00025735714285714283, |
|
"loss": 0.1472, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 37.04, |
|
"learning_rate": 0.00024307142857142854, |
|
"loss": 0.1242, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 37.04, |
|
"eval_loss": 1.0203704833984375, |
|
"eval_runtime": 13.2024, |
|
"eval_samples_per_second": 27.268, |
|
"eval_steps_per_second": 3.408, |
|
"eval_wer": 0.5460603684829478, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 40.74, |
|
"learning_rate": 0.00022878571428571427, |
|
"loss": 0.1135, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"learning_rate": 0.00021449999999999998, |
|
"loss": 0.0998, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 46.3, |
|
"eval_loss": 1.0250211954116821, |
|
"eval_runtime": 13.0242, |
|
"eval_samples_per_second": 27.641, |
|
"eval_steps_per_second": 3.455, |
|
"eval_wer": 0.5233241865934928, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 48.15, |
|
"learning_rate": 0.00020021428571428572, |
|
"loss": 0.0893, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 51.85, |
|
"learning_rate": 0.0001859285714285714, |
|
"loss": 0.0759, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"learning_rate": 0.00017164285714285713, |
|
"loss": 0.0727, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"eval_loss": 1.1072067022323608, |
|
"eval_runtime": 13.8552, |
|
"eval_samples_per_second": 25.983, |
|
"eval_steps_per_second": 3.248, |
|
"eval_wer": 0.5382203057624461, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 59.26, |
|
"learning_rate": 0.00015735714285714284, |
|
"loss": 0.0694, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 62.96, |
|
"learning_rate": 0.00014307142857142855, |
|
"loss": 0.0605, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 64.81, |
|
"eval_loss": 1.0588016510009766, |
|
"eval_runtime": 13.0366, |
|
"eval_samples_per_second": 27.614, |
|
"eval_steps_per_second": 3.452, |
|
"eval_wer": 0.5072520580164641, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 0.00012878571428571426, |
|
"loss": 0.0558, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 70.37, |
|
"learning_rate": 0.00011449999999999999, |
|
"loss": 0.0481, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 74.07, |
|
"learning_rate": 0.0001002142857142857, |
|
"loss": 0.0458, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 74.07, |
|
"eval_loss": 1.0818374156951904, |
|
"eval_runtime": 12.9679, |
|
"eval_samples_per_second": 27.761, |
|
"eval_steps_per_second": 3.47, |
|
"eval_wer": 0.5068600548804391, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 77.78, |
|
"learning_rate": 8.592857142857142e-05, |
|
"loss": 0.0382, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 81.48, |
|
"learning_rate": 7.164285714285714e-05, |
|
"loss": 0.0338, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_loss": 1.094814419746399, |
|
"eval_runtime": 13.1264, |
|
"eval_samples_per_second": 27.426, |
|
"eval_steps_per_second": 3.428, |
|
"eval_wer": 0.5107800862406899, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 85.19, |
|
"learning_rate": 5.735714285714285e-05, |
|
"loss": 0.0308, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 88.89, |
|
"learning_rate": 4.307142857142857e-05, |
|
"loss": 0.0263, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 92.59, |
|
"learning_rate": 2.8785714285714286e-05, |
|
"loss": 0.0223, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 92.59, |
|
"eval_loss": 1.098594307899475, |
|
"eval_runtime": 13.0424, |
|
"eval_samples_per_second": 27.602, |
|
"eval_steps_per_second": 3.45, |
|
"eval_wer": 0.47745981967855744, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 96.3, |
|
"learning_rate": 1.4499999999999998e-05, |
|
"loss": 0.0205, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 0.0206, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 5400, |
|
"total_flos": 1.3559072525710848e+19, |
|
"train_loss": 0.7658213693124276, |
|
"train_runtime": 5843.2467, |
|
"train_samples_per_second": 14.649, |
|
"train_steps_per_second": 0.924 |
|
} |
|
], |
|
"max_steps": 5400, |
|
"num_train_epochs": 100, |
|
"total_flos": 1.3559072525710848e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|