|
{ |
|
"best_metric": 97.89625360230548, |
|
"best_model_checkpoint": "./whisper-tiny-en/checkpoint-1000", |
|
"epoch": 24.390243902439025, |
|
"eval_steps": 1000, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6097560975609756, |
|
"grad_norm": 76.21949768066406, |
|
"learning_rate": 4.2000000000000006e-07, |
|
"loss": 7.5702, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2195121951219512, |
|
"grad_norm": 72.27240753173828, |
|
"learning_rate": 9.200000000000001e-07, |
|
"loss": 7.1361, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.8292682926829267, |
|
"grad_norm": 27.645973205566406, |
|
"learning_rate": 1.42e-06, |
|
"loss": 6.4445, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.4390243902439024, |
|
"grad_norm": 21.712514877319336, |
|
"learning_rate": 1.9200000000000003e-06, |
|
"loss": 5.8148, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.048780487804878, |
|
"grad_norm": 19.066011428833008, |
|
"learning_rate": 2.42e-06, |
|
"loss": 5.1377, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.658536585365854, |
|
"grad_norm": 14.726506233215332, |
|
"learning_rate": 2.92e-06, |
|
"loss": 4.6, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.2682926829268295, |
|
"grad_norm": 13.597981452941895, |
|
"learning_rate": 3.4200000000000007e-06, |
|
"loss": 4.3595, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.878048780487805, |
|
"grad_norm": 16.399568557739258, |
|
"learning_rate": 3.920000000000001e-06, |
|
"loss": 4.171, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.487804878048781, |
|
"grad_norm": 15.264267921447754, |
|
"learning_rate": 4.42e-06, |
|
"loss": 3.9727, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 6.097560975609756, |
|
"grad_norm": 14.83893871307373, |
|
"learning_rate": 4.92e-06, |
|
"loss": 3.816, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.7073170731707314, |
|
"grad_norm": 14.460186004638672, |
|
"learning_rate": 5.420000000000001e-06, |
|
"loss": 3.6683, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 7.317073170731708, |
|
"grad_norm": 13.663064002990723, |
|
"learning_rate": 5.92e-06, |
|
"loss": 3.5392, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.926829268292683, |
|
"grad_norm": 14.359073638916016, |
|
"learning_rate": 6.42e-06, |
|
"loss": 3.4807, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 8.536585365853659, |
|
"grad_norm": 12.90373706817627, |
|
"learning_rate": 6.92e-06, |
|
"loss": 3.335, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 9.146341463414634, |
|
"grad_norm": 15.809270858764648, |
|
"learning_rate": 7.420000000000001e-06, |
|
"loss": 3.2196, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 9.75609756097561, |
|
"grad_norm": 14.483990669250488, |
|
"learning_rate": 7.92e-06, |
|
"loss": 3.1224, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.365853658536585, |
|
"grad_norm": 15.841504096984863, |
|
"learning_rate": 8.42e-06, |
|
"loss": 2.9858, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 10.975609756097562, |
|
"grad_norm": 16.420425415039062, |
|
"learning_rate": 8.920000000000001e-06, |
|
"loss": 2.9395, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 11.585365853658537, |
|
"grad_norm": 14.952699661254883, |
|
"learning_rate": 9.42e-06, |
|
"loss": 2.7501, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 12.195121951219512, |
|
"grad_norm": 15.765605926513672, |
|
"learning_rate": 9.920000000000002e-06, |
|
"loss": 2.7014, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.804878048780488, |
|
"grad_norm": 17.627931594848633, |
|
"learning_rate": 9.967692307692308e-06, |
|
"loss": 2.601, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 13.414634146341463, |
|
"grad_norm": 16.36449432373047, |
|
"learning_rate": 9.92923076923077e-06, |
|
"loss": 2.4699, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 14.024390243902438, |
|
"grad_norm": 15.8140869140625, |
|
"learning_rate": 9.890769230769231e-06, |
|
"loss": 2.3859, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 14.634146341463415, |
|
"grad_norm": 15.983495712280273, |
|
"learning_rate": 9.852307692307693e-06, |
|
"loss": 2.2235, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 15.24390243902439, |
|
"grad_norm": 15.25550651550293, |
|
"learning_rate": 9.813846153846155e-06, |
|
"loss": 2.1583, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 15.853658536585366, |
|
"grad_norm": 18.167879104614258, |
|
"learning_rate": 9.775384615384616e-06, |
|
"loss": 2.0671, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 16.463414634146343, |
|
"grad_norm": 16.86029624938965, |
|
"learning_rate": 9.736923076923078e-06, |
|
"loss": 1.9547, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 17.073170731707318, |
|
"grad_norm": 16.195270538330078, |
|
"learning_rate": 9.698461538461539e-06, |
|
"loss": 1.8877, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 17.682926829268293, |
|
"grad_norm": 17.331113815307617, |
|
"learning_rate": 9.66e-06, |
|
"loss": 1.7336, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 18.29268292682927, |
|
"grad_norm": 18.00438690185547, |
|
"learning_rate": 9.621538461538463e-06, |
|
"loss": 1.6845, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 18.902439024390244, |
|
"grad_norm": 17.1490478515625, |
|
"learning_rate": 9.583076923076923e-06, |
|
"loss": 1.6027, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 19.51219512195122, |
|
"grad_norm": 17.924156188964844, |
|
"learning_rate": 9.544615384615385e-06, |
|
"loss": 1.4647, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 20.121951219512194, |
|
"grad_norm": 17.920028686523438, |
|
"learning_rate": 9.506153846153848e-06, |
|
"loss": 1.4291, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 20.73170731707317, |
|
"grad_norm": 17.502954483032227, |
|
"learning_rate": 9.467692307692308e-06, |
|
"loss": 1.3113, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 21.341463414634145, |
|
"grad_norm": 18.125986099243164, |
|
"learning_rate": 9.42923076923077e-06, |
|
"loss": 1.232, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 21.951219512195124, |
|
"grad_norm": 16.026338577270508, |
|
"learning_rate": 9.39076923076923e-06, |
|
"loss": 1.1903, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 22.5609756097561, |
|
"grad_norm": 16.795467376708984, |
|
"learning_rate": 9.352307692307693e-06, |
|
"loss": 1.0742, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 23.170731707317074, |
|
"grad_norm": 19.86144256591797, |
|
"learning_rate": 9.313846153846155e-06, |
|
"loss": 1.0346, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 23.78048780487805, |
|
"grad_norm": 17.94236183166504, |
|
"learning_rate": 9.275384615384616e-06, |
|
"loss": 0.9589, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 24.390243902439025, |
|
"grad_norm": 17.72712516784668, |
|
"learning_rate": 9.236923076923078e-06, |
|
"loss": 0.8757, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 24.390243902439025, |
|
"eval_loss": 4.123460292816162, |
|
"eval_runtime": 64.6053, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 0.325, |
|
"eval_wer": 97.89625360230548, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 7000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 171, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.8622106976256e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|