|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.999443888332777, |
|
"global_step": 22475, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3e-05, |
|
"loss": 0.1124, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.9317406143344712e-05, |
|
"loss": 0.1142, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_cer": 0.012641370841580549, |
|
"eval_loss": 0.04826715216040611, |
|
"eval_runtime": 1346.7575, |
|
"eval_samples_per_second": 11.058, |
|
"eval_steps_per_second": 1.383, |
|
"eval_wer": 0.07066222104846163, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.863481228668942e-05, |
|
"loss": 0.113, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 2.7953583617747444e-05, |
|
"loss": 0.1049, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_cer": 0.01225587771877786, |
|
"eval_loss": 0.04738128185272217, |
|
"eval_runtime": 1341.1997, |
|
"eval_samples_per_second": 11.103, |
|
"eval_steps_per_second": 1.388, |
|
"eval_wer": 0.06750957348474845, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.727098976109215e-05, |
|
"loss": 0.0991, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.6588395904436862e-05, |
|
"loss": 0.0982, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_cer": 0.011982528049881409, |
|
"eval_loss": 0.04708929359912872, |
|
"eval_runtime": 1340.0171, |
|
"eval_samples_per_second": 11.113, |
|
"eval_steps_per_second": 1.39, |
|
"eval_wer": 0.0663871649280338, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.5905802047781573e-05, |
|
"loss": 0.0996, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.522457337883959e-05, |
|
"loss": 0.092, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_cer": 0.01171338376050644, |
|
"eval_loss": 0.04587830230593681, |
|
"eval_runtime": 1347.3232, |
|
"eval_samples_per_second": 11.053, |
|
"eval_steps_per_second": 1.382, |
|
"eval_wer": 0.0639855407368282, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 2.45419795221843e-05, |
|
"loss": 0.0896, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 2.385938566552901e-05, |
|
"loss": 0.0847, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_cer": 0.011517132716170525, |
|
"eval_loss": 0.04586043953895569, |
|
"eval_runtime": 1342.7891, |
|
"eval_samples_per_second": 11.09, |
|
"eval_steps_per_second": 1.387, |
|
"eval_wer": 0.06308596329063779, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.317679180887372e-05, |
|
"loss": 0.0838, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 2.249419795221843e-05, |
|
"loss": 0.0837, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"eval_cer": 0.011336301396746719, |
|
"eval_loss": 0.04530934989452362, |
|
"eval_runtime": 1343.699, |
|
"eval_samples_per_second": 11.083, |
|
"eval_steps_per_second": 1.386, |
|
"eval_wer": 0.06242572296315859, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.181160409556314e-05, |
|
"loss": 0.0822, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 2.1130375426621163e-05, |
|
"loss": 0.0803, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_cer": 0.010879316822078803, |
|
"eval_loss": 0.04430945962667465, |
|
"eval_runtime": 1343.5225, |
|
"eval_samples_per_second": 11.084, |
|
"eval_steps_per_second": 1.386, |
|
"eval_wer": 0.0597682556450548, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.044778156996587e-05, |
|
"loss": 0.0811, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.976518771331058e-05, |
|
"loss": 0.0826, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"eval_cer": 0.011001272828201836, |
|
"eval_loss": 0.044084154069423676, |
|
"eval_runtime": 1339.8673, |
|
"eval_samples_per_second": 11.115, |
|
"eval_steps_per_second": 1.39, |
|
"eval_wer": 0.06039548395616004, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.9082593856655292e-05, |
|
"loss": 0.0795, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.84e-05, |
|
"loss": 0.0809, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_cer": 0.01099146027598504, |
|
"eval_loss": 0.04365098848938942, |
|
"eval_runtime": 1342.6486, |
|
"eval_samples_per_second": 11.092, |
|
"eval_steps_per_second": 1.387, |
|
"eval_wer": 0.06050277300937541, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.771740614334471e-05, |
|
"loss": 0.0726, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.7034812286689422e-05, |
|
"loss": 0.0728, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_cer": 0.010905950892381534, |
|
"eval_loss": 0.045100387185811996, |
|
"eval_runtime": 1342.5545, |
|
"eval_samples_per_second": 11.092, |
|
"eval_steps_per_second": 1.387, |
|
"eval_wer": 0.05974349663277433, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.6352218430034133e-05, |
|
"loss": 0.072, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.567098976109215e-05, |
|
"loss": 0.0707, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"eval_cer": 0.010758762609129599, |
|
"eval_loss": 0.04439757764339447, |
|
"eval_runtime": 1341.1302, |
|
"eval_samples_per_second": 11.104, |
|
"eval_steps_per_second": 1.388, |
|
"eval_wer": 0.05909976231348211, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.4988395904436861e-05, |
|
"loss": 0.072, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 1.430580204778157e-05, |
|
"loss": 0.0698, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"eval_cer": 0.010531672114969469, |
|
"eval_loss": 0.04415421932935715, |
|
"eval_runtime": 1339.1928, |
|
"eval_samples_per_second": 11.12, |
|
"eval_steps_per_second": 1.39, |
|
"eval_wer": 0.05758120956027994, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.3624573378839592e-05, |
|
"loss": 0.0958, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.29419795221843e-05, |
|
"loss": 0.0981, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_cer": 0.010388689211239017, |
|
"eval_loss": 0.04108743742108345, |
|
"eval_runtime": 1339.0236, |
|
"eval_samples_per_second": 11.122, |
|
"eval_steps_per_second": 1.391, |
|
"eval_wer": 0.05717681235969893, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1.225938566552901e-05, |
|
"loss": 0.0991, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.1576791808873721e-05, |
|
"loss": 0.0928, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"eval_cer": 0.010234491962117941, |
|
"eval_loss": 0.04134270176291466, |
|
"eval_runtime": 1343.5325, |
|
"eval_samples_per_second": 11.084, |
|
"eval_steps_per_second": 1.386, |
|
"eval_wer": 0.056062656807077775, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.089556313993174e-05, |
|
"loss": 0.0919, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.0212969283276451e-05, |
|
"loss": 0.0927, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"eval_cer": 0.010227482996248802, |
|
"eval_loss": 0.041016656905412674, |
|
"eval_runtime": 1340.2656, |
|
"eval_samples_per_second": 11.111, |
|
"eval_steps_per_second": 1.389, |
|
"eval_wer": 0.0564588010035653, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 9.53037542662116e-06, |
|
"loss": 0.0896, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 8.84778156996587e-06, |
|
"loss": 0.0886, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"eval_cer": 0.01016019692390506, |
|
"eval_loss": 0.040184538811445236, |
|
"eval_runtime": 1343.3925, |
|
"eval_samples_per_second": 11.085, |
|
"eval_steps_per_second": 1.386, |
|
"eval_wer": 0.055798560676086094, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 8.16518771331058e-06, |
|
"loss": 0.0872, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 7.482593856655291e-06, |
|
"loss": 0.091, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"eval_cer": 0.010059267815389445, |
|
"eval_loss": 0.03996647894382477, |
|
"eval_runtime": 1343.622, |
|
"eval_samples_per_second": 11.083, |
|
"eval_steps_per_second": 1.386, |
|
"eval_wer": 0.05525386240591575, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 6.8e-06, |
|
"loss": 0.0882, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 6.11740614334471e-06, |
|
"loss": 0.0888, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_cer": 0.009962544086395317, |
|
"eval_loss": 0.039790086448192596, |
|
"eval_runtime": 1340.4959, |
|
"eval_samples_per_second": 11.109, |
|
"eval_steps_per_second": 1.389, |
|
"eval_wer": 0.05461838109071702, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 5.43617747440273e-06, |
|
"loss": 0.0872, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 4.75358361774744e-06, |
|
"loss": 0.0885, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"eval_cer": 0.009899463393573059, |
|
"eval_loss": 0.039466630667448044, |
|
"eval_runtime": 1341.4804, |
|
"eval_samples_per_second": 11.101, |
|
"eval_steps_per_second": 1.388, |
|
"eval_wer": 0.05423048989832299, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 4.0709897610921505e-06, |
|
"loss": 0.0885, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 3.3897610921501705e-06, |
|
"loss": 0.0869, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"eval_cer": 0.009868623943748843, |
|
"eval_loss": 0.0393984392285347, |
|
"eval_runtime": 1341.5672, |
|
"eval_samples_per_second": 11.1, |
|
"eval_steps_per_second": 1.388, |
|
"eval_wer": 0.05402416479598574, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.708532423208191e-06, |
|
"loss": 0.0845, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 2.025938566552901e-06, |
|
"loss": 0.0844, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"eval_cer": 0.009830775528055488, |
|
"eval_loss": 0.03927738592028618, |
|
"eval_runtime": 1339.2196, |
|
"eval_samples_per_second": 11.12, |
|
"eval_steps_per_second": 1.39, |
|
"eval_wer": 0.05393338175095735, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.3433447098976108e-06, |
|
"loss": 0.0866, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 6.607508532423209e-07, |
|
"loss": 0.0882, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"eval_cer": 0.009790123526014478, |
|
"eval_loss": 0.039073262363672256, |
|
"eval_runtime": 1341.9861, |
|
"eval_samples_per_second": 11.097, |
|
"eval_steps_per_second": 1.387, |
|
"eval_wer": 0.053735309652713587, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 22475, |
|
"total_flos": 1.3602795268583408e+20, |
|
"train_loss": 0.08836701542702612, |
|
"train_runtime": 131691.1828, |
|
"train_samples_per_second": 5.462, |
|
"train_steps_per_second": 0.171 |
|
} |
|
], |
|
"max_steps": 22475, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.3602795268583408e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|