|
{ |
|
"best_metric": 82.73972602739727, |
|
"best_model_checkpoint": "/home/rcj2772/data_sets/Seneca/output_dir/checkpoint-100", |
|
"epoch": 62.008, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 9.979166666666668e-06, |
|
"loss": 4.0703, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 9.458333333333334e-06, |
|
"loss": 0.9051, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 8.9375e-06, |
|
"loss": 0.0795, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"learning_rate": 8.416666666666667e-06, |
|
"loss": 0.009, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"eval_loss": 2.0225086212158203, |
|
"eval_runtime": 26.0259, |
|
"eval_samples_per_second": 0.999, |
|
"eval_steps_per_second": 0.269, |
|
"eval_wer": 82.73972602739727, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 15.01, |
|
"learning_rate": 7.895833333333333e-06, |
|
"loss": 0.0015, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 18.01, |
|
"learning_rate": 7.375000000000001e-06, |
|
"loss": 0.0007, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 21.01, |
|
"learning_rate": 6.854166666666667e-06, |
|
"loss": 0.0005, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"learning_rate": 6.333333333333333e-06, |
|
"loss": 0.0004, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"eval_loss": 2.1611859798431396, |
|
"eval_runtime": 19.928, |
|
"eval_samples_per_second": 1.305, |
|
"eval_steps_per_second": 0.351, |
|
"eval_wer": 86.02739726027397, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 5.812500000000001e-06, |
|
"loss": 0.0003, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 5.291666666666667e-06, |
|
"loss": 0.0003, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 34.01, |
|
"learning_rate": 4.770833333333334e-06, |
|
"loss": 0.0003, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 37.01, |
|
"learning_rate": 4.25e-06, |
|
"loss": 0.0002, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 37.01, |
|
"eval_loss": 2.2043375968933105, |
|
"eval_runtime": 20.0279, |
|
"eval_samples_per_second": 1.298, |
|
"eval_steps_per_second": 0.35, |
|
"eval_wer": 85.2054794520548, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 40.01, |
|
"learning_rate": 3.7291666666666672e-06, |
|
"loss": 0.0002, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 43.01, |
|
"learning_rate": 3.2083333333333337e-06, |
|
"loss": 0.0002, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 46.01, |
|
"learning_rate": 2.6875e-06, |
|
"loss": 0.0002, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 49.02, |
|
"learning_rate": 2.166666666666667e-06, |
|
"loss": 0.0002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 49.02, |
|
"eval_loss": 2.2259132862091064, |
|
"eval_runtime": 19.9281, |
|
"eval_samples_per_second": 1.305, |
|
"eval_steps_per_second": 0.351, |
|
"eval_wer": 85.47945205479452, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.6458333333333334e-06, |
|
"loss": 0.0002, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.125e-06, |
|
"loss": 0.0002, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 59.01, |
|
"learning_rate": 6.041666666666667e-07, |
|
"loss": 0.0002, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 62.01, |
|
"learning_rate": 8.333333333333334e-08, |
|
"loss": 0.0002, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 62.01, |
|
"eval_loss": 2.232715129852295, |
|
"eval_runtime": 20.22, |
|
"eval_samples_per_second": 1.286, |
|
"eval_steps_per_second": 0.346, |
|
"eval_wer": 85.2054794520548, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 62.01, |
|
"step": 500, |
|
"total_flos": 5.234938970541916e+17, |
|
"train_loss": 0.2534824407529086, |
|
"train_runtime": 588.6372, |
|
"train_samples_per_second": 3.398, |
|
"train_steps_per_second": 0.849 |
|
} |
|
], |
|
"max_steps": 500, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 5.234938970541916e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|