|
{ |
|
"best_metric": 0.5542770299609205, |
|
"best_model_checkpoint": "./whisper-large-v3-quantized/checkpoint-200", |
|
"epoch": 0.36877688998156116, |
|
"eval_steps": 200, |
|
"global_step": 600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015365703749231715, |
|
"grad_norm": 0.47920289635658264, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 1.5655, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03073140749846343, |
|
"grad_norm": 0.15030227601528168, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 1.0487, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.046097111247695145, |
|
"grad_norm": 0.5764341354370117, |
|
"learning_rate": 1.5e-06, |
|
"loss": 1.6787, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06146281499692686, |
|
"grad_norm": 0.6326006650924683, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 1.3895, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07682851874615858, |
|
"grad_norm": 0.5107858180999756, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.5308, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.09219422249539029, |
|
"grad_norm": 0.4186854362487793, |
|
"learning_rate": 3e-06, |
|
"loss": 1.1231, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.107559926244622, |
|
"grad_norm": 0.580566942691803, |
|
"learning_rate": 3.5e-06, |
|
"loss": 1.8924, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.12292562999385372, |
|
"grad_norm": 0.3107517957687378, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.3077, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12292562999385372, |
|
"eval_loss": 1.5075420141220093, |
|
"eval_runtime": 620.2783, |
|
"eval_samples_per_second": 0.656, |
|
"eval_steps_per_second": 0.082, |
|
"eval_wer": 0.5542770299609205, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.13829133374308544, |
|
"grad_norm": 0.6910067796707153, |
|
"learning_rate": 4.5e-06, |
|
"loss": 1.8306, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.15365703749231716, |
|
"grad_norm": 0.04023272544145584, |
|
"learning_rate": 5e-06, |
|
"loss": 1.5791, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.16902274124154887, |
|
"grad_norm": 0.700638473033905, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 1.4698, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.18438844499078058, |
|
"grad_norm": 0.697817325592041, |
|
"learning_rate": 6e-06, |
|
"loss": 1.7872, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1997541487400123, |
|
"grad_norm": 0.5358330011367798, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 1.1349, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.215119852489244, |
|
"grad_norm": 0.7219414710998535, |
|
"learning_rate": 7e-06, |
|
"loss": 1.4458, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.23048555623847572, |
|
"grad_norm": 0.5983926653862, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 1.3574, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.24585125998770743, |
|
"grad_norm": 1.1959525346755981, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.4669, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.24585125998770743, |
|
"eval_loss": 1.4504855871200562, |
|
"eval_runtime": 634.3044, |
|
"eval_samples_per_second": 0.642, |
|
"eval_steps_per_second": 0.08, |
|
"eval_wer": 0.5688232739904473, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.26121696373693915, |
|
"grad_norm": 0.9689226746559143, |
|
"learning_rate": 8.5e-06, |
|
"loss": 1.2762, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.2765826674861709, |
|
"grad_norm": 0.2560756504535675, |
|
"learning_rate": 9e-06, |
|
"loss": 1.2975, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.29194837123540257, |
|
"grad_norm": 1.208335518836975, |
|
"learning_rate": 9.5e-06, |
|
"loss": 1.3111, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.3073140749846343, |
|
"grad_norm": 0.11639931052923203, |
|
"learning_rate": 1e-05, |
|
"loss": 1.2754, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.322679778733866, |
|
"grad_norm": 3.112171173095703, |
|
"learning_rate": 9.5e-06, |
|
"loss": 1.4163, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.33804548248309774, |
|
"grad_norm": 1.0575857162475586, |
|
"learning_rate": 9e-06, |
|
"loss": 1.1581, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.3534111862323294, |
|
"grad_norm": 1.2147361040115356, |
|
"learning_rate": 8.5e-06, |
|
"loss": 1.1211, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.36877688998156116, |
|
"grad_norm": 3.342254161834717, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.7042, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.36877688998156116, |
|
"eval_loss": 1.2238162755966187, |
|
"eval_runtime": 654.1777, |
|
"eval_samples_per_second": 0.622, |
|
"eval_steps_per_second": 0.078, |
|
"eval_wer": 0.5605731654363874, |
|
"step": 600 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.1979496448e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|