|
{ |
|
"best_metric": 0.5411643981933594, |
|
"best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-bemgen-balanced-model/checkpoint-1000", |
|
"epoch": 3.1683168316831685, |
|
"eval_steps": 200, |
|
"global_step": 1600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04950495049504951, |
|
"grad_norm": 121.81847381591797, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 11.6499, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09900990099009901, |
|
"grad_norm": 104.46240234375, |
|
"learning_rate": 9.000000000000001e-07, |
|
"loss": 9.6493, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1485148514851485, |
|
"grad_norm": 105.9942626953125, |
|
"learning_rate": 1.3800000000000001e-06, |
|
"loss": 7.7745, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.19801980198019803, |
|
"grad_norm": 86.44505310058594, |
|
"learning_rate": 1.8800000000000002e-06, |
|
"loss": 6.0363, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24752475247524752, |
|
"grad_norm": 89.36949920654297, |
|
"learning_rate": 2.38e-06, |
|
"loss": 4.9289, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.297029702970297, |
|
"grad_norm": 67.95636749267578, |
|
"learning_rate": 2.88e-06, |
|
"loss": 4.2784, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3465346534653465, |
|
"grad_norm": 71.39818572998047, |
|
"learning_rate": 3.3800000000000007e-06, |
|
"loss": 3.8983, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.39603960396039606, |
|
"grad_norm": 77.7619400024414, |
|
"learning_rate": 3.88e-06, |
|
"loss": 3.7045, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.39603960396039606, |
|
"eval_loss": 0.9162073135375977, |
|
"eval_runtime": 279.8105, |
|
"eval_samples_per_second": 1.801, |
|
"eval_steps_per_second": 0.901, |
|
"eval_wer": 0.6826695371367061, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44554455445544555, |
|
"grad_norm": 65.45413208007812, |
|
"learning_rate": 4.38e-06, |
|
"loss": 3.4515, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.49504950495049505, |
|
"grad_norm": 83.95701599121094, |
|
"learning_rate": 4.880000000000001e-06, |
|
"loss": 3.3691, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5445544554455446, |
|
"grad_norm": 60.99998092651367, |
|
"learning_rate": 5.380000000000001e-06, |
|
"loss": 3.1637, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.594059405940594, |
|
"grad_norm": 86.96224212646484, |
|
"learning_rate": 5.8800000000000005e-06, |
|
"loss": 3.015, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6435643564356436, |
|
"grad_norm": 75.70954132080078, |
|
"learning_rate": 6.380000000000001e-06, |
|
"loss": 2.9369, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.693069306930693, |
|
"grad_norm": 68.08271026611328, |
|
"learning_rate": 6.88e-06, |
|
"loss": 2.7672, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7425742574257426, |
|
"grad_norm": 59.94082260131836, |
|
"learning_rate": 7.3800000000000005e-06, |
|
"loss": 2.7812, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.7920792079207921, |
|
"grad_norm": 63.15815353393555, |
|
"learning_rate": 7.88e-06, |
|
"loss": 2.687, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7920792079207921, |
|
"eval_loss": 0.6817564368247986, |
|
"eval_runtime": 278.5541, |
|
"eval_samples_per_second": 1.809, |
|
"eval_steps_per_second": 0.905, |
|
"eval_wer": 0.5351991388589882, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8415841584158416, |
|
"grad_norm": 66.49015808105469, |
|
"learning_rate": 8.380000000000001e-06, |
|
"loss": 2.6355, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.8910891089108911, |
|
"grad_norm": 69.2128677368164, |
|
"learning_rate": 8.880000000000001e-06, |
|
"loss": 2.5594, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9405940594059405, |
|
"grad_norm": 82.29959106445312, |
|
"learning_rate": 9.38e-06, |
|
"loss": 2.6779, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.9900990099009901, |
|
"grad_norm": 75.67899322509766, |
|
"learning_rate": 9.88e-06, |
|
"loss": 2.6395, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0396039603960396, |
|
"grad_norm": 74.15483093261719, |
|
"learning_rate": 9.957777777777779e-06, |
|
"loss": 1.9931, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.0891089108910892, |
|
"grad_norm": 49.705169677734375, |
|
"learning_rate": 9.902222222222223e-06, |
|
"loss": 1.7258, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.1386138613861387, |
|
"grad_norm": 37.47819519042969, |
|
"learning_rate": 9.846666666666668e-06, |
|
"loss": 1.8517, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.188118811881188, |
|
"grad_norm": 50.20145034790039, |
|
"learning_rate": 9.791111111111112e-06, |
|
"loss": 1.7185, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.188118811881188, |
|
"eval_loss": 0.6266342401504517, |
|
"eval_runtime": 282.9063, |
|
"eval_samples_per_second": 1.782, |
|
"eval_steps_per_second": 0.891, |
|
"eval_wer": 0.4988159311087191, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2376237623762376, |
|
"grad_norm": 77.02053833007812, |
|
"learning_rate": 9.735555555555556e-06, |
|
"loss": 1.8482, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.2871287128712872, |
|
"grad_norm": 73.36985778808594, |
|
"learning_rate": 9.68e-06, |
|
"loss": 1.834, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.3366336633663367, |
|
"grad_norm": 48.02408981323242, |
|
"learning_rate": 9.624444444444445e-06, |
|
"loss": 1.6974, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.386138613861386, |
|
"grad_norm": 54.08694076538086, |
|
"learning_rate": 9.56888888888889e-06, |
|
"loss": 1.7489, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.4356435643564356, |
|
"grad_norm": 39.281005859375, |
|
"learning_rate": 9.513333333333334e-06, |
|
"loss": 1.8013, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.4851485148514851, |
|
"grad_norm": 43.782196044921875, |
|
"learning_rate": 9.457777777777778e-06, |
|
"loss": 1.7419, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.5346534653465347, |
|
"grad_norm": 40.640323638916016, |
|
"learning_rate": 9.402222222222222e-06, |
|
"loss": 1.7863, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 1.5841584158415842, |
|
"grad_norm": 43.944881439208984, |
|
"learning_rate": 9.346666666666666e-06, |
|
"loss": 1.7232, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.5841584158415842, |
|
"eval_loss": 0.5673945546150208, |
|
"eval_runtime": 279.2797, |
|
"eval_samples_per_second": 1.805, |
|
"eval_steps_per_second": 0.902, |
|
"eval_wer": 0.4592034445640474, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.6336633663366338, |
|
"grad_norm": 53.22232437133789, |
|
"learning_rate": 9.291111111111112e-06, |
|
"loss": 1.6903, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.6831683168316833, |
|
"grad_norm": 41.854610443115234, |
|
"learning_rate": 9.235555555555556e-06, |
|
"loss": 1.656, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.7326732673267327, |
|
"grad_norm": 38.988529205322266, |
|
"learning_rate": 9.180000000000002e-06, |
|
"loss": 1.5997, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.7821782178217822, |
|
"grad_norm": 54.63161087036133, |
|
"learning_rate": 9.124444444444444e-06, |
|
"loss": 1.6233, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.8316831683168315, |
|
"grad_norm": 41.60354232788086, |
|
"learning_rate": 9.06888888888889e-06, |
|
"loss": 1.4889, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.881188118811881, |
|
"grad_norm": 42.82672119140625, |
|
"learning_rate": 9.013333333333334e-06, |
|
"loss": 1.6465, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.9306930693069306, |
|
"grad_norm": 50.5968132019043, |
|
"learning_rate": 8.957777777777778e-06, |
|
"loss": 1.6738, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 1.9801980198019802, |
|
"grad_norm": 46.50210952758789, |
|
"learning_rate": 8.902222222222224e-06, |
|
"loss": 1.6083, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.9801980198019802, |
|
"eval_loss": 0.5411643981933594, |
|
"eval_runtime": 277.778, |
|
"eval_samples_per_second": 1.814, |
|
"eval_steps_per_second": 0.907, |
|
"eval_wer": 0.4413347685683531, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0297029702970297, |
|
"grad_norm": 25.421289443969727, |
|
"learning_rate": 8.846666666666668e-06, |
|
"loss": 1.1495, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 2.0792079207920793, |
|
"grad_norm": 34.11124038696289, |
|
"learning_rate": 8.791111111111112e-06, |
|
"loss": 0.8415, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.128712871287129, |
|
"grad_norm": 28.867008209228516, |
|
"learning_rate": 8.735555555555556e-06, |
|
"loss": 0.7942, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 2.1782178217821784, |
|
"grad_norm": 28.74831199645996, |
|
"learning_rate": 8.68e-06, |
|
"loss": 0.894, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.227722772277228, |
|
"grad_norm": 31.9539794921875, |
|
"learning_rate": 8.624444444444446e-06, |
|
"loss": 0.9315, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 2.2772277227722775, |
|
"grad_norm": 25.5980224609375, |
|
"learning_rate": 8.56888888888889e-06, |
|
"loss": 0.8735, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.3267326732673266, |
|
"grad_norm": 30.295211791992188, |
|
"learning_rate": 8.513333333333335e-06, |
|
"loss": 0.7829, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 2.376237623762376, |
|
"grad_norm": 38.251556396484375, |
|
"learning_rate": 8.457777777777778e-06, |
|
"loss": 0.7643, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.376237623762376, |
|
"eval_loss": 0.565162181854248, |
|
"eval_runtime": 302.1807, |
|
"eval_samples_per_second": 1.668, |
|
"eval_steps_per_second": 0.834, |
|
"eval_wer": 0.4279870828848224, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.4257425742574257, |
|
"grad_norm": 22.155136108398438, |
|
"learning_rate": 8.402222222222223e-06, |
|
"loss": 0.8875, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 2.4752475247524752, |
|
"grad_norm": 29.48054313659668, |
|
"learning_rate": 8.346666666666668e-06, |
|
"loss": 0.8138, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.5247524752475248, |
|
"grad_norm": 31.146007537841797, |
|
"learning_rate": 8.291111111111112e-06, |
|
"loss": 0.8036, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 2.5742574257425743, |
|
"grad_norm": 39.311256408691406, |
|
"learning_rate": 8.235555555555557e-06, |
|
"loss": 0.8389, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.623762376237624, |
|
"grad_norm": 21.874267578125, |
|
"learning_rate": 8.18e-06, |
|
"loss": 0.8243, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 2.6732673267326734, |
|
"grad_norm": 27.84779167175293, |
|
"learning_rate": 8.124444444444445e-06, |
|
"loss": 1.0702, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.7227722772277225, |
|
"grad_norm": 41.43334197998047, |
|
"learning_rate": 8.06888888888889e-06, |
|
"loss": 0.8892, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 2.772277227722772, |
|
"grad_norm": 36.145965576171875, |
|
"learning_rate": 8.013333333333333e-06, |
|
"loss": 0.8362, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.772277227722772, |
|
"eval_loss": 0.5454630851745605, |
|
"eval_runtime": 277.6054, |
|
"eval_samples_per_second": 1.816, |
|
"eval_steps_per_second": 0.908, |
|
"eval_wer": 0.40516684607104414, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.8217821782178216, |
|
"grad_norm": 82.11780548095703, |
|
"learning_rate": 7.957777777777779e-06, |
|
"loss": 0.7491, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 2.871287128712871, |
|
"grad_norm": 28.929828643798828, |
|
"learning_rate": 7.902222222222223e-06, |
|
"loss": 0.9076, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.9207920792079207, |
|
"grad_norm": 29.58544921875, |
|
"learning_rate": 7.846666666666667e-06, |
|
"loss": 0.8593, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 2.9702970297029703, |
|
"grad_norm": 36.51881790161133, |
|
"learning_rate": 7.791111111111111e-06, |
|
"loss": 0.7739, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.01980198019802, |
|
"grad_norm": 13.50954532623291, |
|
"learning_rate": 7.735555555555557e-06, |
|
"loss": 0.6666, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 3.0693069306930694, |
|
"grad_norm": 15.712291717529297, |
|
"learning_rate": 7.680000000000001e-06, |
|
"loss": 0.3365, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 3.118811881188119, |
|
"grad_norm": 22.804317474365234, |
|
"learning_rate": 7.624444444444445e-06, |
|
"loss": 0.3306, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 3.1683168316831685, |
|
"grad_norm": 22.823379516601562, |
|
"learning_rate": 7.56888888888889e-06, |
|
"loss": 0.422, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.1683168316831685, |
|
"eval_loss": 0.577109694480896, |
|
"eval_runtime": 282.3788, |
|
"eval_samples_per_second": 1.785, |
|
"eval_steps_per_second": 0.892, |
|
"eval_wer": 0.3991388589881593, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.1683168316831685, |
|
"step": 1600, |
|
"total_flos": 1.306068100153344e+19, |
|
"train_loss": 2.189091945886612, |
|
"train_runtime": 5496.1435, |
|
"train_samples_per_second": 7.278, |
|
"train_steps_per_second": 0.91 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 3 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.306068100153344e+19, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|