|
{ |
|
"best_metric": 0.5928366184234619, |
|
"best_model_checkpoint": "/scratch/skscla001/results/mms-zeroshot-300m-natbed-combined-model/checkpoint-2400", |
|
"epoch": 3.7546933667083855, |
|
"eval_steps": 200, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2503128911138924, |
|
"eval_loss": 2.66123104095459, |
|
"eval_runtime": 85.5481, |
|
"eval_samples_per_second": 15.874, |
|
"eval_steps_per_second": 1.987, |
|
"eval_wer": 1.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5006257822277848, |
|
"eval_loss": 0.7787109017372131, |
|
"eval_runtime": 85.3612, |
|
"eval_samples_per_second": 15.909, |
|
"eval_steps_per_second": 1.992, |
|
"eval_wer": 0.6600606010973712, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6257822277847309, |
|
"grad_norm": 0.983116626739502, |
|
"learning_rate": 0.0002950104733975701, |
|
"loss": 3.3034, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7509386733416771, |
|
"eval_loss": 0.7195701599121094, |
|
"eval_runtime": 85.1972, |
|
"eval_samples_per_second": 15.939, |
|
"eval_steps_per_second": 1.995, |
|
"eval_wer": 0.6193595938088609, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0012515644555695, |
|
"eval_loss": 0.6961039900779724, |
|
"eval_runtime": 85.8666, |
|
"eval_samples_per_second": 15.815, |
|
"eval_steps_per_second": 1.98, |
|
"eval_wer": 0.5965932356072394, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.2515644555694618, |
|
"grad_norm": 3.753706693649292, |
|
"learning_rate": 0.0002887264348554671, |
|
"loss": 0.8261, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.2515644555694618, |
|
"eval_loss": 0.6694886684417725, |
|
"eval_runtime": 86.9509, |
|
"eval_samples_per_second": 15.618, |
|
"eval_steps_per_second": 1.955, |
|
"eval_wer": 0.5762017852755712, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5018773466833542, |
|
"eval_loss": 0.6313649415969849, |
|
"eval_runtime": 84.9355, |
|
"eval_samples_per_second": 15.989, |
|
"eval_steps_per_second": 2.002, |
|
"eval_wer": 0.5728441569077062, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.7521902377972465, |
|
"eval_loss": 0.6477869749069214, |
|
"eval_runtime": 84.1241, |
|
"eval_samples_per_second": 16.143, |
|
"eval_steps_per_second": 2.021, |
|
"eval_wer": 0.5575300958152486, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.8773466833541927, |
|
"grad_norm": 2.5700743198394775, |
|
"learning_rate": 0.000282442396313364, |
|
"loss": 0.7513, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.002503128911139, |
|
"eval_loss": 0.6373749375343323, |
|
"eval_runtime": 85.3519, |
|
"eval_samples_per_second": 15.911, |
|
"eval_steps_per_second": 1.992, |
|
"eval_wer": 0.5554008680697732, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.252816020025031, |
|
"eval_loss": 0.6032695770263672, |
|
"eval_runtime": 84.1367, |
|
"eval_samples_per_second": 16.14, |
|
"eval_steps_per_second": 2.021, |
|
"eval_wer": 0.5483580378347391, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.5031289111389237, |
|
"grad_norm": 0.43018773198127747, |
|
"learning_rate": 0.00027615835777126095, |
|
"loss": 0.7173, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.5031289111389237, |
|
"eval_loss": 0.6269567608833313, |
|
"eval_runtime": 84.4441, |
|
"eval_samples_per_second": 16.082, |
|
"eval_steps_per_second": 2.013, |
|
"eval_wer": 0.541888461223487, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.7534418022528158, |
|
"eval_loss": 0.6057294011116028, |
|
"eval_runtime": 84.7682, |
|
"eval_samples_per_second": 16.02, |
|
"eval_steps_per_second": 2.005, |
|
"eval_wer": 0.5432806485955286, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.0037546933667083, |
|
"eval_loss": 0.5928366184234619, |
|
"eval_runtime": 84.7776, |
|
"eval_samples_per_second": 16.018, |
|
"eval_steps_per_second": 2.005, |
|
"eval_wer": 0.527802800753419, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.1289111389236544, |
|
"grad_norm": 1.3301851749420166, |
|
"learning_rate": 0.00026987431922915794, |
|
"loss": 0.7092, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.254067584480601, |
|
"eval_loss": 0.5980406403541565, |
|
"eval_runtime": 84.3511, |
|
"eval_samples_per_second": 16.099, |
|
"eval_steps_per_second": 2.015, |
|
"eval_wer": 0.5321431496191958, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.504380475594493, |
|
"eval_loss": 0.5975831747055054, |
|
"eval_runtime": 85.6923, |
|
"eval_samples_per_second": 15.847, |
|
"eval_steps_per_second": 1.984, |
|
"eval_wer": 0.5260830398820735, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.7546933667083855, |
|
"grad_norm": 0.4005778431892395, |
|
"learning_rate": 0.00026359028068705483, |
|
"loss": 0.696, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.7546933667083855, |
|
"eval_loss": 0.6369301676750183, |
|
"eval_runtime": 85.3682, |
|
"eval_samples_per_second": 15.908, |
|
"eval_steps_per_second": 1.991, |
|
"eval_wer": 0.5247727458848579, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.7546933667083855, |
|
"step": 3000, |
|
"total_flos": 5.095058470999591e+18, |
|
"train_loss": 1.167226084391276, |
|
"train_runtime": 3484.8413, |
|
"train_samples_per_second": 54.984, |
|
"train_steps_per_second": 6.878 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 23970, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 3 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.095058470999591e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|