|
{ |
|
"best_metric": 2.6525354385375977, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0654205607476634, |
|
"eval_steps": 25, |
|
"global_step": 41, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07476635514018691, |
|
"grad_norm": 12.477754592895508, |
|
"learning_rate": 5e-05, |
|
"loss": 36.282, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07476635514018691, |
|
"eval_loss": 3.060811758041382, |
|
"eval_runtime": 0.2235, |
|
"eval_samples_per_second": 402.634, |
|
"eval_steps_per_second": 53.685, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14953271028037382, |
|
"grad_norm": 20.176923751831055, |
|
"learning_rate": 0.0001, |
|
"loss": 42.7203, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.22429906542056074, |
|
"grad_norm": 58.305416107177734, |
|
"learning_rate": 9.983786540671051e-05, |
|
"loss": 55.9882, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.29906542056074764, |
|
"grad_norm": 43.69650650024414, |
|
"learning_rate": 9.935251313189564e-05, |
|
"loss": 46.6822, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 11.199535369873047, |
|
"learning_rate": 9.85470908713026e-05, |
|
"loss": 39.1852, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.4485981308411215, |
|
"grad_norm": 38.89397430419922, |
|
"learning_rate": 9.742682209735727e-05, |
|
"loss": 49.5955, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5233644859813084, |
|
"grad_norm": 61.32236862182617, |
|
"learning_rate": 9.599897218294122e-05, |
|
"loss": 53.197, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5981308411214953, |
|
"grad_norm": 12.756400108337402, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 37.8893, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6728971962616822, |
|
"grad_norm": 21.473352432250977, |
|
"learning_rate": 9.225950427718975e-05, |
|
"loss": 44.1805, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 56.127567291259766, |
|
"learning_rate": 8.997213817017507e-05, |
|
"loss": 51.3801, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.822429906542056, |
|
"grad_norm": 14.02192211151123, |
|
"learning_rate": 8.742553740855506e-05, |
|
"loss": 37.1142, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.897196261682243, |
|
"grad_norm": 17.666357040405273, |
|
"learning_rate": 8.463621767547998e-05, |
|
"loss": 41.9052, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9719626168224299, |
|
"grad_norm": 46.62154769897461, |
|
"learning_rate": 8.162226877976887e-05, |
|
"loss": 54.8154, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0467289719626167, |
|
"grad_norm": 17.347009658813477, |
|
"learning_rate": 7.840323733655778e-05, |
|
"loss": 38.5423, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 15.314298629760742, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 38.654, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1962616822429906, |
|
"grad_norm": 18.701488494873047, |
|
"learning_rate": 7.143462807015271e-05, |
|
"loss": 43.01, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2710280373831775, |
|
"grad_norm": 38.01932907104492, |
|
"learning_rate": 6.773024435212678e-05, |
|
"loss": 47.2544, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.3457943925233644, |
|
"grad_norm": 12.378190994262695, |
|
"learning_rate": 6.391087319582264e-05, |
|
"loss": 37.0581, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.4205607476635513, |
|
"grad_norm": 15.580528259277344, |
|
"learning_rate": 6.0001284688802226e-05, |
|
"loss": 43.4861, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4953271028037383, |
|
"grad_norm": 45.35871124267578, |
|
"learning_rate": 5.602683401276615e-05, |
|
"loss": 48.2524, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5700934579439252, |
|
"grad_norm": 10.084379196166992, |
|
"learning_rate": 5.201329700547076e-05, |
|
"loss": 35.6121, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.644859813084112, |
|
"grad_norm": 14.362281799316406, |
|
"learning_rate": 4.798670299452926e-05, |
|
"loss": 40.7834, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.719626168224299, |
|
"grad_norm": 35.394142150878906, |
|
"learning_rate": 4.397316598723385e-05, |
|
"loss": 47.5074, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.794392523364486, |
|
"grad_norm": 28.203998565673828, |
|
"learning_rate": 3.9998715311197785e-05, |
|
"loss": 40.5831, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"grad_norm": 12.237970352172852, |
|
"learning_rate": 3.608912680417737e-05, |
|
"loss": 38.2899, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"eval_loss": 2.6525354385375977, |
|
"eval_runtime": 0.2206, |
|
"eval_samples_per_second": 408.063, |
|
"eval_steps_per_second": 54.408, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9439252336448598, |
|
"grad_norm": 24.12892723083496, |
|
"learning_rate": 3.226975564787322e-05, |
|
"loss": 45.5854, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.0186915887850465, |
|
"grad_norm": 37.78959655761719, |
|
"learning_rate": 2.8565371929847284e-05, |
|
"loss": 44.6516, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.0934579439252334, |
|
"grad_norm": 8.680986404418945, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 35.3207, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.1682242990654204, |
|
"grad_norm": 15.282751083374023, |
|
"learning_rate": 2.1596762663442218e-05, |
|
"loss": 40.6217, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.2429906542056073, |
|
"grad_norm": 46.95691680908203, |
|
"learning_rate": 1.837773122023114e-05, |
|
"loss": 48.1315, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.317757009345794, |
|
"grad_norm": 8.166537284851074, |
|
"learning_rate": 1.536378232452003e-05, |
|
"loss": 34.1442, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.392523364485981, |
|
"grad_norm": 12.032683372497559, |
|
"learning_rate": 1.257446259144494e-05, |
|
"loss": 38.6735, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.467289719626168, |
|
"grad_norm": 26.102218627929688, |
|
"learning_rate": 1.0027861829824952e-05, |
|
"loss": 46.4967, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.542056074766355, |
|
"grad_norm": 23.399124145507812, |
|
"learning_rate": 7.740495722810271e-06, |
|
"loss": 39.0796, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.616822429906542, |
|
"grad_norm": 10.58754825592041, |
|
"learning_rate": 5.727198717339511e-06, |
|
"loss": 38.4049, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.691588785046729, |
|
"grad_norm": 20.302406311035156, |
|
"learning_rate": 4.001027817058789e-06, |
|
"loss": 43.2738, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.7663551401869158, |
|
"grad_norm": 41.23537063598633, |
|
"learning_rate": 2.573177902642726e-06, |
|
"loss": 46.7631, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.8411214953271027, |
|
"grad_norm": 9.360715866088867, |
|
"learning_rate": 1.4529091286973995e-06, |
|
"loss": 37.2748, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.9158878504672896, |
|
"grad_norm": 15.773098945617676, |
|
"learning_rate": 6.474868681043578e-07, |
|
"loss": 41.8172, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.9906542056074765, |
|
"grad_norm": 44.15718078613281, |
|
"learning_rate": 1.6213459328950352e-07, |
|
"loss": 47.5584, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.0654205607476634, |
|
"grad_norm": 11.734289169311523, |
|
"learning_rate": 0.0, |
|
"loss": 35.6847, |
|
"step": 41 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 41, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5827368409104384.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|