|
{ |
|
"best_metric": 1.084672166739036, |
|
"best_model_checkpoint": "./whisper-tiny-qlora/checkpoint-200", |
|
"epoch": 0.24585125998770743, |
|
"eval_steps": 200, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015365703749231715, |
|
"grad_norm": 1.955202579498291, |
|
"learning_rate": 4.800000000000001e-07, |
|
"loss": 2.4438, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03073140749846343, |
|
"grad_norm": 1.465623140335083, |
|
"learning_rate": 9.800000000000001e-07, |
|
"loss": 2.0995, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.046097111247695145, |
|
"grad_norm": 1.489674687385559, |
|
"learning_rate": 1.48e-06, |
|
"loss": 2.4302, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06146281499692686, |
|
"grad_norm": 1.7222563028335571, |
|
"learning_rate": 1.98e-06, |
|
"loss": 2.2989, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07682851874615858, |
|
"grad_norm": 2.750751256942749, |
|
"learning_rate": 2.4800000000000004e-06, |
|
"loss": 2.2729, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.09219422249539029, |
|
"grad_norm": 2.440593957901001, |
|
"learning_rate": 2.9800000000000003e-06, |
|
"loss": 2.0241, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.107559926244622, |
|
"grad_norm": 2.425014019012451, |
|
"learning_rate": 3.48e-06, |
|
"loss": 2.6992, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.12292562999385372, |
|
"grad_norm": 1.8003255128860474, |
|
"learning_rate": 3.980000000000001e-06, |
|
"loss": 2.0456, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12292562999385372, |
|
"eval_loss": 2.245432138442993, |
|
"eval_runtime": 51.8391, |
|
"eval_samples_per_second": 7.851, |
|
"eval_steps_per_second": 0.251, |
|
"eval_wer": 1.084672166739036, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.13829133374308544, |
|
"grad_norm": 1.9000691175460815, |
|
"learning_rate": 4.48e-06, |
|
"loss": 2.611, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.15365703749231716, |
|
"grad_norm": 1.4311325550079346, |
|
"learning_rate": 4.960000000000001e-06, |
|
"loss": 2.4044, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.16902274124154887, |
|
"grad_norm": 2.4365923404693604, |
|
"learning_rate": 5.460000000000001e-06, |
|
"loss": 2.1992, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.18438844499078058, |
|
"grad_norm": 2.4958994388580322, |
|
"learning_rate": 5.9600000000000005e-06, |
|
"loss": 2.5628, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1997541487400123, |
|
"grad_norm": 1.8009036779403687, |
|
"learning_rate": 6.460000000000001e-06, |
|
"loss": 1.8897, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.215119852489244, |
|
"grad_norm": 1.8635932207107544, |
|
"learning_rate": 6.96e-06, |
|
"loss": 2.2831, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.23048555623847572, |
|
"grad_norm": 1.8662347793579102, |
|
"learning_rate": 7.4600000000000006e-06, |
|
"loss": 2.0718, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.24585125998770743, |
|
"grad_norm": 2.4394640922546387, |
|
"learning_rate": 7.960000000000002e-06, |
|
"loss": 2.1359, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.24585125998770743, |
|
"eval_loss": 2.1913249492645264, |
|
"eval_runtime": 49.901, |
|
"eval_samples_per_second": 8.156, |
|
"eval_steps_per_second": 0.261, |
|
"eval_wer": 1.1096396005210596, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.2338970624e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|