|
{ |
|
"best_metric": 7.983619689941406, |
|
"best_model_checkpoint": "./models/magic/checkpoint-2440", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 2440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.20491803278688525, |
|
"grad_norm": 66683.3203125, |
|
"learning_rate": 1.918032786885246e-05, |
|
"loss": 8.3446, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4098360655737705, |
|
"grad_norm": 88343.609375, |
|
"learning_rate": 1.836065573770492e-05, |
|
"loss": 8.344, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6147540983606558, |
|
"grad_norm": 112013.890625, |
|
"learning_rate": 1.7540983606557377e-05, |
|
"loss": 8.3305, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.819672131147541, |
|
"grad_norm": 107013.5390625, |
|
"learning_rate": 1.6721311475409837e-05, |
|
"loss": 8.3148, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.0054761904761904765, |
|
"eval_loss": 8.26321792602539, |
|
"eval_runtime": 37.0437, |
|
"eval_samples_per_second": 113.38, |
|
"eval_steps_per_second": 3.563, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.0245901639344261, |
|
"grad_norm": 109507.2734375, |
|
"learning_rate": 1.5901639344262295e-05, |
|
"loss": 8.2911, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.2295081967213115, |
|
"grad_norm": 107816.046875, |
|
"learning_rate": 1.5081967213114754e-05, |
|
"loss": 8.2472, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.4344262295081966, |
|
"grad_norm": 108237.4453125, |
|
"learning_rate": 1.4262295081967214e-05, |
|
"loss": 8.2257, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.639344262295082, |
|
"grad_norm": 111140.1640625, |
|
"learning_rate": 1.3442622950819673e-05, |
|
"loss": 8.2074, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.8442622950819674, |
|
"grad_norm": 107701.734375, |
|
"learning_rate": 1.2622950819672132e-05, |
|
"loss": 8.1958, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.05976190476190476, |
|
"eval_loss": 8.151880264282227, |
|
"eval_runtime": 37.1118, |
|
"eval_samples_per_second": 113.172, |
|
"eval_steps_per_second": 3.557, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 2.0491803278688523, |
|
"grad_norm": 111871.875, |
|
"learning_rate": 1.1803278688524591e-05, |
|
"loss": 8.1707, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.2540983606557377, |
|
"grad_norm": 117045.8203125, |
|
"learning_rate": 1.0983606557377052e-05, |
|
"loss": 8.1275, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.459016393442623, |
|
"grad_norm": 110316.90625, |
|
"learning_rate": 1.0163934426229509e-05, |
|
"loss": 8.1193, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.663934426229508, |
|
"grad_norm": 114694.4609375, |
|
"learning_rate": 9.344262295081968e-06, |
|
"loss": 8.1023, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.8688524590163933, |
|
"grad_norm": 113438.8828125, |
|
"learning_rate": 8.524590163934427e-06, |
|
"loss": 8.089, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.15666666666666668, |
|
"eval_loss": 8.05963134765625, |
|
"eval_runtime": 36.9672, |
|
"eval_samples_per_second": 113.614, |
|
"eval_steps_per_second": 3.571, |
|
"step": 1464 |
|
}, |
|
{ |
|
"epoch": 3.0737704918032787, |
|
"grad_norm": 130567.7890625, |
|
"learning_rate": 7.704918032786886e-06, |
|
"loss": 8.0675, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.278688524590164, |
|
"grad_norm": 111654.6484375, |
|
"learning_rate": 6.885245901639345e-06, |
|
"loss": 8.049, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.4836065573770494, |
|
"grad_norm": 110857.171875, |
|
"learning_rate": 6.065573770491804e-06, |
|
"loss": 8.0304, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.6885245901639343, |
|
"grad_norm": 120024.4765625, |
|
"learning_rate": 5.245901639344263e-06, |
|
"loss": 8.0182, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.8934426229508197, |
|
"grad_norm": 113248.8359375, |
|
"learning_rate": 4.426229508196722e-06, |
|
"loss": 8.0208, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.2276190476190476, |
|
"eval_loss": 8.003293991088867, |
|
"eval_runtime": 36.914, |
|
"eval_samples_per_second": 113.778, |
|
"eval_steps_per_second": 3.576, |
|
"step": 1952 |
|
}, |
|
{ |
|
"epoch": 4.098360655737705, |
|
"grad_norm": 114861.390625, |
|
"learning_rate": 3.6065573770491806e-06, |
|
"loss": 8.0023, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.30327868852459, |
|
"grad_norm": 114188.84375, |
|
"learning_rate": 2.786885245901639e-06, |
|
"loss": 7.9841, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 4.508196721311475, |
|
"grad_norm": 114966.8828125, |
|
"learning_rate": 1.9672131147540985e-06, |
|
"loss": 7.9843, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 4.713114754098361, |
|
"grad_norm": 124759.4375, |
|
"learning_rate": 1.1475409836065575e-06, |
|
"loss": 7.9772, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 4.918032786885246, |
|
"grad_norm": 110823.4609375, |
|
"learning_rate": 3.278688524590164e-07, |
|
"loss": 7.983, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.25452380952380954, |
|
"eval_loss": 7.983619689941406, |
|
"eval_runtime": 37.5516, |
|
"eval_samples_per_second": 111.846, |
|
"eval_steps_per_second": 3.515, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 2440, |
|
"total_flos": 6.271796898791424e+18, |
|
"train_loss": 8.131903288794346, |
|
"train_runtime": 1755.8039, |
|
"train_samples_per_second": 44.424, |
|
"train_steps_per_second": 1.39 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2440, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.271796898791424e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|