|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 99.9728, |
|
"global_step": 46800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 2.976e-05, |
|
"loss": 9.3762, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 2.9678617710583153e-05, |
|
"loss": 2.6416, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_cer": 0.15406803531769678, |
|
"eval_loss": 0.8599101305007935, |
|
"eval_runtime": 83.463, |
|
"eval_samples_per_second": 17.972, |
|
"eval_steps_per_second": 2.252, |
|
"eval_wer": 0.644871073981846, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 2.9354643628509722e-05, |
|
"loss": 0.506, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 2.9030669546436284e-05, |
|
"loss": 0.2633, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"eval_cer": 0.033489518444287956, |
|
"eval_loss": 0.18973715603351593, |
|
"eval_runtime": 81.5499, |
|
"eval_samples_per_second": 18.394, |
|
"eval_steps_per_second": 2.305, |
|
"eval_wer": 0.14306369989557394, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 2.8706695464362852e-05, |
|
"loss": 0.2016, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 2.8383369330453565e-05, |
|
"loss": 0.1739, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"eval_cer": 0.028912301608796924, |
|
"eval_loss": 0.17320764064788818, |
|
"eval_runtime": 78.6521, |
|
"eval_samples_per_second": 19.071, |
|
"eval_steps_per_second": 2.39, |
|
"eval_wer": 0.11454735320106033, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 2.805939524838013e-05, |
|
"loss": 0.1493, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 2.7735421166306696e-05, |
|
"loss": 0.1378, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"eval_cer": 0.027585179567737393, |
|
"eval_loss": 0.17293226718902588, |
|
"eval_runtime": 72.8342, |
|
"eval_samples_per_second": 20.595, |
|
"eval_steps_per_second": 2.581, |
|
"eval_wer": 0.10659490722146357, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 2.741144708423326e-05, |
|
"loss": 0.127, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 2.708747300215983e-05, |
|
"loss": 0.1172, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"eval_cer": 0.02684036617734684, |
|
"eval_loss": 0.17730334401130676, |
|
"eval_runtime": 69.8987, |
|
"eval_samples_per_second": 21.46, |
|
"eval_steps_per_second": 2.69, |
|
"eval_wer": 0.10185557072857257, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 11.73, |
|
"learning_rate": 2.676349892008639e-05, |
|
"loss": 0.1112, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 2.643952483801296e-05, |
|
"loss": 0.1049, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"eval_cer": 0.02552678619793077, |
|
"eval_loss": 0.1701209545135498, |
|
"eval_runtime": 70.2839, |
|
"eval_samples_per_second": 21.342, |
|
"eval_steps_per_second": 2.675, |
|
"eval_wer": 0.09374246927464053, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 13.87, |
|
"learning_rate": 2.6115550755939525e-05, |
|
"loss": 0.1026, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 14.93, |
|
"learning_rate": 2.579157667386609e-05, |
|
"loss": 0.0951, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 14.93, |
|
"eval_cer": 0.025296571149991873, |
|
"eval_loss": 0.17183555662631989, |
|
"eval_runtime": 70.8748, |
|
"eval_samples_per_second": 21.164, |
|
"eval_steps_per_second": 2.653, |
|
"eval_wer": 0.09326050285163467, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.546760259179266e-05, |
|
"loss": 0.0894, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 2.514362850971922e-05, |
|
"loss": 0.0851, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"eval_cer": 0.02386111261578463, |
|
"eval_loss": 0.17866037786006927, |
|
"eval_runtime": 70.4791, |
|
"eval_samples_per_second": 21.283, |
|
"eval_steps_per_second": 2.667, |
|
"eval_wer": 0.08338019118001445, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 18.13, |
|
"learning_rate": 2.481965442764579e-05, |
|
"loss": 0.0841, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 2.4495680345572355e-05, |
|
"loss": 0.0809, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"eval_cer": 0.023549645197984942, |
|
"eval_loss": 0.18024054169654846, |
|
"eval_runtime": 70.9657, |
|
"eval_samples_per_second": 21.137, |
|
"eval_steps_per_second": 2.649, |
|
"eval_wer": 0.08354084665434974, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 20.27, |
|
"learning_rate": 2.417170626349892e-05, |
|
"loss": 0.0768, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 2.384773218142549e-05, |
|
"loss": 0.0756, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"eval_cer": 0.02386111261578463, |
|
"eval_loss": 0.17842911183834076, |
|
"eval_runtime": 71.3112, |
|
"eval_samples_per_second": 21.035, |
|
"eval_steps_per_second": 2.636, |
|
"eval_wer": 0.08554904008354085, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 2.352375809935205e-05, |
|
"loss": 0.0726, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 23.47, |
|
"learning_rate": 2.319978401727862e-05, |
|
"loss": 0.0708, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 23.47, |
|
"eval_cer": 0.023522561074698013, |
|
"eval_loss": 0.1748334914445877, |
|
"eval_runtime": 70.5277, |
|
"eval_samples_per_second": 21.268, |
|
"eval_steps_per_second": 2.666, |
|
"eval_wer": 0.08241625833400273, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 24.53, |
|
"learning_rate": 2.2875809935205184e-05, |
|
"loss": 0.0723, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 2.255183585313175e-05, |
|
"loss": 0.0657, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"eval_cer": 0.022764205622663996, |
|
"eval_loss": 0.18295687437057495, |
|
"eval_runtime": 70.62, |
|
"eval_samples_per_second": 21.24, |
|
"eval_steps_per_second": 2.662, |
|
"eval_wer": 0.07960478753313519, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 2.2227861771058315e-05, |
|
"loss": 0.0673, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 27.73, |
|
"learning_rate": 2.1903887688984883e-05, |
|
"loss": 0.0605, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 27.73, |
|
"eval_cer": 0.022967336547315965, |
|
"eval_loss": 0.18958806991577148, |
|
"eval_runtime": 70.8663, |
|
"eval_samples_per_second": 21.167, |
|
"eval_steps_per_second": 2.653, |
|
"eval_wer": 0.07976544300747047, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 2.157991360691145e-05, |
|
"loss": 0.0629, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 29.87, |
|
"learning_rate": 2.1257235421166306e-05, |
|
"loss": 0.0583, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 29.87, |
|
"eval_cer": 0.02242565408157738, |
|
"eval_loss": 0.18885250389575958, |
|
"eval_runtime": 71.1848, |
|
"eval_samples_per_second": 21.072, |
|
"eval_steps_per_second": 2.641, |
|
"eval_wer": 0.07775724957827938, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 30.93, |
|
"learning_rate": 2.0933261339092874e-05, |
|
"loss": 0.0611, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 2.060928725701944e-05, |
|
"loss": 0.0608, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_cer": 0.022263149341855804, |
|
"eval_loss": 0.1849208027124405, |
|
"eval_runtime": 72.432, |
|
"eval_samples_per_second": 20.709, |
|
"eval_steps_per_second": 2.596, |
|
"eval_wer": 0.07574905614908828, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 33.07, |
|
"learning_rate": 2.0285313174946005e-05, |
|
"loss": 0.0574, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 34.13, |
|
"learning_rate": 1.9961987041036718e-05, |
|
"loss": 0.0556, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 34.13, |
|
"eval_cer": 0.0223037755267862, |
|
"eval_loss": 0.18723702430725098, |
|
"eval_runtime": 71.3886, |
|
"eval_samples_per_second": 21.012, |
|
"eval_steps_per_second": 2.633, |
|
"eval_wer": 0.07671298899510001, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 35.2, |
|
"learning_rate": 1.9638012958963286e-05, |
|
"loss": 0.0534, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 36.27, |
|
"learning_rate": 1.9314038876889848e-05, |
|
"loss": 0.0534, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 36.27, |
|
"eval_cer": 0.022141270787064623, |
|
"eval_loss": 0.1893489509820938, |
|
"eval_runtime": 71.3518, |
|
"eval_samples_per_second": 21.023, |
|
"eval_steps_per_second": 2.635, |
|
"eval_wer": 0.07510643425174712, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 1.8990064794816417e-05, |
|
"loss": 0.0528, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 38.4, |
|
"learning_rate": 1.8666090712742982e-05, |
|
"loss": 0.0523, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 38.4, |
|
"eval_cer": 0.021789177184334544, |
|
"eval_loss": 0.19247621297836304, |
|
"eval_runtime": 71.3463, |
|
"eval_samples_per_second": 21.024, |
|
"eval_steps_per_second": 2.635, |
|
"eval_wer": 0.07293758534822074, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 39.47, |
|
"learning_rate": 1.8342116630669547e-05, |
|
"loss": 0.0507, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 40.53, |
|
"learning_rate": 1.801879049676026e-05, |
|
"loss": 0.0494, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 40.53, |
|
"eval_cer": 0.0220735604788473, |
|
"eval_loss": 0.1956777721643448, |
|
"eval_runtime": 71.5278, |
|
"eval_samples_per_second": 20.971, |
|
"eval_steps_per_second": 2.628, |
|
"eval_wer": 0.07454414009157362, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 41.6, |
|
"learning_rate": 1.7694816414686825e-05, |
|
"loss": 0.0504, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 1.7370842332613394e-05, |
|
"loss": 0.0475, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"eval_cer": 0.021748550999404148, |
|
"eval_loss": 0.19612860679626465, |
|
"eval_runtime": 71.56, |
|
"eval_samples_per_second": 20.961, |
|
"eval_steps_per_second": 2.627, |
|
"eval_wer": 0.07398184593140011, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 43.73, |
|
"learning_rate": 1.7046868250539956e-05, |
|
"loss": 0.0489, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 44.8, |
|
"learning_rate": 1.6722894168466524e-05, |
|
"loss": 0.048, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 44.8, |
|
"eval_cer": 0.021355831211743677, |
|
"eval_loss": 0.19570261240005493, |
|
"eval_runtime": 71.295, |
|
"eval_samples_per_second": 21.039, |
|
"eval_steps_per_second": 2.637, |
|
"eval_wer": 0.0714113583420355, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 45.87, |
|
"learning_rate": 1.639892008639309e-05, |
|
"loss": 0.0453, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 46.93, |
|
"learning_rate": 1.6074946004319655e-05, |
|
"loss": 0.0459, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 46.93, |
|
"eval_cer": 0.021450625643247928, |
|
"eval_loss": 0.19677624106407166, |
|
"eval_runtime": 71.8431, |
|
"eval_samples_per_second": 20.879, |
|
"eval_steps_per_second": 2.617, |
|
"eval_wer": 0.07165234155353843, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.5750971922246223e-05, |
|
"loss": 0.045, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 49.07, |
|
"learning_rate": 1.5426997840172785e-05, |
|
"loss": 0.0435, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 49.07, |
|
"eval_cer": 0.02172146687611722, |
|
"eval_loss": 0.20079585909843445, |
|
"eval_runtime": 74.2582, |
|
"eval_samples_per_second": 20.2, |
|
"eval_steps_per_second": 2.532, |
|
"eval_wer": 0.07173266929070608, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 50.13, |
|
"learning_rate": 1.5103023758099354e-05, |
|
"loss": 0.043, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 51.2, |
|
"learning_rate": 1.4779049676025917e-05, |
|
"loss": 0.0428, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 51.2, |
|
"eval_cer": 0.021233952656952493, |
|
"eval_loss": 0.19910749793052673, |
|
"eval_runtime": 73.3587, |
|
"eval_samples_per_second": 20.447, |
|
"eval_steps_per_second": 2.563, |
|
"eval_wer": 0.06964414812434734, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 52.27, |
|
"learning_rate": 1.4455075593952484e-05, |
|
"loss": 0.0436, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 1.4131101511879051e-05, |
|
"loss": 0.0418, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"eval_cer": 0.02154542007475218, |
|
"eval_loss": 0.2034166008234024, |
|
"eval_runtime": 75.9701, |
|
"eval_samples_per_second": 19.745, |
|
"eval_steps_per_second": 2.475, |
|
"eval_wer": 0.0714113583420355, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 54.4, |
|
"learning_rate": 1.3807127429805616e-05, |
|
"loss": 0.0415, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 55.47, |
|
"learning_rate": 1.3483153347732181e-05, |
|
"loss": 0.0404, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 55.47, |
|
"eval_cer": 0.02099019554737013, |
|
"eval_loss": 0.20137159526348114, |
|
"eval_runtime": 89.8064, |
|
"eval_samples_per_second": 16.703, |
|
"eval_steps_per_second": 2.093, |
|
"eval_wer": 0.06843923206683268, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 56.53, |
|
"learning_rate": 1.3159827213822896e-05, |
|
"loss": 0.0415, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 57.6, |
|
"learning_rate": 1.2836501079913607e-05, |
|
"loss": 0.0394, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 57.6, |
|
"eval_cer": 0.021003737609013595, |
|
"eval_loss": 0.20499302446842194, |
|
"eval_runtime": 84.8526, |
|
"eval_samples_per_second": 17.678, |
|
"eval_steps_per_second": 2.216, |
|
"eval_wer": 0.0681179211181621, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 58.67, |
|
"learning_rate": 1.2512526997840172e-05, |
|
"loss": 0.0396, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 59.73, |
|
"learning_rate": 1.218855291576674e-05, |
|
"loss": 0.0399, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 59.73, |
|
"eval_cer": 0.021112074102161312, |
|
"eval_loss": 0.2038762867450714, |
|
"eval_runtime": 88.8478, |
|
"eval_samples_per_second": 16.883, |
|
"eval_steps_per_second": 2.116, |
|
"eval_wer": 0.07004578681018556, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 60.8, |
|
"learning_rate": 1.1864578833693305e-05, |
|
"loss": 0.0367, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 61.87, |
|
"learning_rate": 1.1541252699784017e-05, |
|
"loss": 0.0389, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 61.87, |
|
"eval_cer": 0.021355831211743677, |
|
"eval_loss": 0.20906661450862885, |
|
"eval_runtime": 92.2839, |
|
"eval_samples_per_second": 16.254, |
|
"eval_steps_per_second": 2.037, |
|
"eval_wer": 0.0694031649128444, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 62.93, |
|
"learning_rate": 1.1217278617710583e-05, |
|
"loss": 0.0381, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 1.089330453563715e-05, |
|
"loss": 0.038, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_cer": 0.02097665348572667, |
|
"eval_loss": 0.21002130210399628, |
|
"eval_runtime": 89.2275, |
|
"eval_samples_per_second": 16.811, |
|
"eval_steps_per_second": 2.107, |
|
"eval_wer": 0.07020644228452085, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 65.07, |
|
"learning_rate": 1.0569978401727863e-05, |
|
"loss": 0.0367, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 66.13, |
|
"learning_rate": 1.0246004319654428e-05, |
|
"loss": 0.0361, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 66.13, |
|
"eval_cer": 0.021450625643247928, |
|
"eval_loss": 0.21194988489151, |
|
"eval_runtime": 81.4435, |
|
"eval_samples_per_second": 18.418, |
|
"eval_steps_per_second": 2.308, |
|
"eval_wer": 0.07028677002168848, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 67.2, |
|
"learning_rate": 9.922030237580995e-06, |
|
"loss": 0.0346, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 68.27, |
|
"learning_rate": 9.59805615550756e-06, |
|
"loss": 0.0359, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 68.27, |
|
"eval_cer": 0.02134228915010021, |
|
"eval_loss": 0.21081893146038055, |
|
"eval_runtime": 80.4987, |
|
"eval_samples_per_second": 18.634, |
|
"eval_steps_per_second": 2.335, |
|
"eval_wer": 0.0714113583420355, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 69.33, |
|
"learning_rate": 9.274082073434125e-06, |
|
"loss": 0.0363, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 70.4, |
|
"learning_rate": 8.95010799136069e-06, |
|
"loss": 0.0354, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 70.4, |
|
"eval_cer": 0.02112561616380478, |
|
"eval_loss": 0.2120192050933838, |
|
"eval_runtime": 87.4388, |
|
"eval_samples_per_second": 17.155, |
|
"eval_steps_per_second": 2.15, |
|
"eval_wer": 0.06988513133585027, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 71.47, |
|
"learning_rate": 8.626133909287257e-06, |
|
"loss": 0.0349, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 72.53, |
|
"learning_rate": 8.302159827213824e-06, |
|
"loss": 0.0364, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 72.53, |
|
"eval_cer": 0.02107144791723092, |
|
"eval_loss": 0.21277020871639252, |
|
"eval_runtime": 86.1965, |
|
"eval_samples_per_second": 17.402, |
|
"eval_steps_per_second": 2.181, |
|
"eval_wer": 0.0688408707526709, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 73.6, |
|
"learning_rate": 7.97818574514039e-06, |
|
"loss": 0.0332, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 74.67, |
|
"learning_rate": 7.654211663066955e-06, |
|
"loss": 0.0361, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 74.67, |
|
"eval_cer": 0.02124749471859596, |
|
"eval_loss": 0.2133743017911911, |
|
"eval_runtime": 84.4863, |
|
"eval_samples_per_second": 17.754, |
|
"eval_steps_per_second": 2.225, |
|
"eval_wer": 0.0694031649128444, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 75.73, |
|
"learning_rate": 7.330885529157668e-06, |
|
"loss": 0.0349, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 76.8, |
|
"learning_rate": 7.0069114470842335e-06, |
|
"loss": 0.0332, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 76.8, |
|
"eval_cer": 0.02104436379394399, |
|
"eval_loss": 0.2175784856081009, |
|
"eval_runtime": 74.8124, |
|
"eval_samples_per_second": 20.05, |
|
"eval_steps_per_second": 2.513, |
|
"eval_wer": 0.06980480359868263, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 77.87, |
|
"learning_rate": 6.6829373650108e-06, |
|
"loss": 0.032, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 78.93, |
|
"learning_rate": 6.358963282937365e-06, |
|
"loss": 0.0341, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 78.93, |
|
"eval_cer": 0.020827690807648555, |
|
"eval_loss": 0.21700014173984528, |
|
"eval_runtime": 79.1989, |
|
"eval_samples_per_second": 18.94, |
|
"eval_steps_per_second": 2.374, |
|
"eval_wer": 0.0688408707526709, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 6.034989200863931e-06, |
|
"loss": 0.0323, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 81.07, |
|
"learning_rate": 5.711015118790497e-06, |
|
"loss": 0.032, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 81.07, |
|
"eval_cer": 0.020908943177509343, |
|
"eval_loss": 0.215650275349617, |
|
"eval_runtime": 74.4056, |
|
"eval_samples_per_second": 20.16, |
|
"eval_steps_per_second": 2.527, |
|
"eval_wer": 0.06859988754116797, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 82.26, |
|
"learning_rate": 5.387041036717062e-06, |
|
"loss": 0.0321, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 5.063066954643629e-06, |
|
"loss": 0.0318, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_cer": 0.020908943177509343, |
|
"eval_loss": 0.2165554016828537, |
|
"eval_runtime": 69.4748, |
|
"eval_samples_per_second": 21.591, |
|
"eval_steps_per_second": 2.706, |
|
"eval_wer": 0.06851955980400032, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 84.4, |
|
"learning_rate": 4.739092872570194e-06, |
|
"loss": 0.0338, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 85.47, |
|
"learning_rate": 4.415766738660907e-06, |
|
"loss": 0.0325, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 85.47, |
|
"eval_cer": 0.020881859054222414, |
|
"eval_loss": 0.21718913316726685, |
|
"eval_runtime": 70.6439, |
|
"eval_samples_per_second": 21.233, |
|
"eval_steps_per_second": 2.661, |
|
"eval_wer": 0.06868021527833561, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 86.53, |
|
"learning_rate": 4.09244060475162e-06, |
|
"loss": 0.0318, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 87.6, |
|
"learning_rate": 3.7684665226781857e-06, |
|
"loss": 0.0316, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 87.6, |
|
"eval_cer": 0.020800606684361626, |
|
"eval_loss": 0.21813979744911194, |
|
"eval_runtime": 81.1292, |
|
"eval_samples_per_second": 18.489, |
|
"eval_steps_per_second": 2.317, |
|
"eval_wer": 0.06779661016949153, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 88.67, |
|
"learning_rate": 3.4444924406047518e-06, |
|
"loss": 0.0325, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 89.73, |
|
"learning_rate": 3.1205183585313174e-06, |
|
"loss": 0.0302, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 89.73, |
|
"eval_cer": 0.02084123286929202, |
|
"eval_loss": 0.21708616614341736, |
|
"eval_runtime": 81.2548, |
|
"eval_samples_per_second": 18.46, |
|
"eval_steps_per_second": 2.314, |
|
"eval_wer": 0.06787693790665916, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 90.8, |
|
"learning_rate": 2.7971922246220303e-06, |
|
"loss": 0.031, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 91.87, |
|
"learning_rate": 2.473218142548596e-06, |
|
"loss": 0.0318, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 91.87, |
|
"eval_cer": 0.02112561616380478, |
|
"eval_loss": 0.21793220937252045, |
|
"eval_runtime": 81.745, |
|
"eval_samples_per_second": 18.35, |
|
"eval_steps_per_second": 2.3, |
|
"eval_wer": 0.07020644228452085, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 92.93, |
|
"learning_rate": 2.149244060475162e-06, |
|
"loss": 0.0309, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.8252699784017279e-06, |
|
"loss": 0.0314, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"eval_cer": 0.020800606684361626, |
|
"eval_loss": 0.21862204372882843, |
|
"eval_runtime": 82.362, |
|
"eval_samples_per_second": 18.212, |
|
"eval_steps_per_second": 2.283, |
|
"eval_wer": 0.06900152622700619, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 95.07, |
|
"learning_rate": 1.5012958963282937e-06, |
|
"loss": 0.0299, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 96.13, |
|
"learning_rate": 1.1779697624190064e-06, |
|
"loss": 0.0309, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 96.13, |
|
"eval_cer": 0.02097665348572667, |
|
"eval_loss": 0.219330295920372, |
|
"eval_runtime": 80.0094, |
|
"eval_samples_per_second": 18.748, |
|
"eval_steps_per_second": 2.35, |
|
"eval_wer": 0.06964414812434734, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 97.2, |
|
"learning_rate": 8.539956803455724e-07, |
|
"loss": 0.0304, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 98.27, |
|
"learning_rate": 5.300215982721382e-07, |
|
"loss": 0.031, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 98.27, |
|
"eval_cer": 0.0207735225610747, |
|
"eval_loss": 0.219051793217659, |
|
"eval_runtime": 82.8771, |
|
"eval_samples_per_second": 18.099, |
|
"eval_steps_per_second": 2.268, |
|
"eval_wer": 0.06859988754116797, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 99.33, |
|
"learning_rate": 2.0604751619870412e-07, |
|
"loss": 0.0309, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 99.97, |
|
"step": 46800, |
|
"total_flos": 2.6879054505409987e+20, |
|
"train_loss": 0.005243639966361543, |
|
"train_runtime": 17869.2377, |
|
"train_samples_per_second": 83.943, |
|
"train_steps_per_second": 2.619 |
|
} |
|
], |
|
"max_steps": 46800, |
|
"num_train_epochs": 100, |
|
"total_flos": 2.6879054505409987e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|