|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 15.0, |
|
"eval_steps": 500, |
|
"global_step": 4770, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9968553459119497, |
|
"grad_norm": 0.8087115287780762, |
|
"learning_rate": 1.916642112888053e-05, |
|
"loss": 0.6426, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.4332258064516129, |
|
"eval_loss": 0.3768153786659241, |
|
"eval_runtime": 2.7098, |
|
"eval_samples_per_second": 1144.007, |
|
"eval_steps_per_second": 23.987, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.9937106918238994, |
|
"grad_norm": 0.5955724120140076, |
|
"learning_rate": 2.817448208700141e-05, |
|
"loss": 0.2402, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8554838709677419, |
|
"eval_loss": 0.09723836928606033, |
|
"eval_runtime": 2.6882, |
|
"eval_samples_per_second": 1153.187, |
|
"eval_steps_per_second": 24.18, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 2.990566037735849, |
|
"grad_norm": 0.3765904903411865, |
|
"learning_rate": 2.6015074248128236e-05, |
|
"loss": 0.0949, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9080645161290323, |
|
"eval_loss": 0.04660297930240631, |
|
"eval_runtime": 2.6607, |
|
"eval_samples_per_second": 1165.117, |
|
"eval_steps_per_second": 24.43, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.9874213836477987, |
|
"grad_norm": 0.26715776324272156, |
|
"learning_rate": 2.385566640925506e-05, |
|
"loss": 0.0599, |
|
"step": 1268 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9283870967741935, |
|
"eval_loss": 0.03252074867486954, |
|
"eval_runtime": 2.6662, |
|
"eval_samples_per_second": 1162.716, |
|
"eval_steps_per_second": 24.38, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.984276729559748, |
|
"grad_norm": 0.20171727240085602, |
|
"learning_rate": 2.1696258570381886e-05, |
|
"loss": 0.0462, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9309677419354838, |
|
"eval_loss": 0.026936793699860573, |
|
"eval_runtime": 2.6975, |
|
"eval_samples_per_second": 1149.215, |
|
"eval_steps_per_second": 24.096, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 5.981132075471698, |
|
"grad_norm": 0.21952152252197266, |
|
"learning_rate": 1.9536850731508715e-05, |
|
"loss": 0.0395, |
|
"step": 1902 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9345161290322581, |
|
"eval_loss": 0.023858336731791496, |
|
"eval_runtime": 2.6565, |
|
"eval_samples_per_second": 1166.971, |
|
"eval_steps_per_second": 24.469, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.977987421383648, |
|
"grad_norm": 0.30627548694610596, |
|
"learning_rate": 1.737744289263554e-05, |
|
"loss": 0.0356, |
|
"step": 2219 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9354838709677419, |
|
"eval_loss": 0.022071754559874535, |
|
"eval_runtime": 2.6587, |
|
"eval_samples_per_second": 1165.985, |
|
"eval_steps_per_second": 24.448, |
|
"step": 2226 |
|
}, |
|
{ |
|
"epoch": 7.9748427672955975, |
|
"grad_norm": 0.16566617786884308, |
|
"learning_rate": 1.5218035053762365e-05, |
|
"loss": 0.0328, |
|
"step": 2536 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9370967741935484, |
|
"eval_loss": 0.021090181544423103, |
|
"eval_runtime": 2.6745, |
|
"eval_samples_per_second": 1159.074, |
|
"eval_steps_per_second": 24.303, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 8.971698113207546, |
|
"grad_norm": 0.16504672169685364, |
|
"learning_rate": 1.3058627214889192e-05, |
|
"loss": 0.0308, |
|
"step": 2853 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9348387096774193, |
|
"eval_loss": 0.019647156819701195, |
|
"eval_runtime": 2.6634, |
|
"eval_samples_per_second": 1163.916, |
|
"eval_steps_per_second": 24.405, |
|
"step": 2862 |
|
}, |
|
{ |
|
"epoch": 9.968553459119496, |
|
"grad_norm": 0.1753920316696167, |
|
"learning_rate": 1.0899219376016019e-05, |
|
"loss": 0.0293, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9367741935483871, |
|
"eval_loss": 0.019323358312249184, |
|
"eval_runtime": 2.6766, |
|
"eval_samples_per_second": 1158.172, |
|
"eval_steps_per_second": 24.284, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 10.965408805031446, |
|
"grad_norm": 0.16472382843494415, |
|
"learning_rate": 8.739811537142844e-06, |
|
"loss": 0.028, |
|
"step": 3487 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9338709677419355, |
|
"eval_loss": 0.019101083278656006, |
|
"eval_runtime": 2.6594, |
|
"eval_samples_per_second": 1165.659, |
|
"eval_steps_per_second": 24.441, |
|
"step": 3498 |
|
}, |
|
{ |
|
"epoch": 11.962264150943396, |
|
"grad_norm": 0.1276603639125824, |
|
"learning_rate": 6.580403698269671e-06, |
|
"loss": 0.0271, |
|
"step": 3804 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9341935483870968, |
|
"eval_loss": 0.018606653437018394, |
|
"eval_runtime": 2.6418, |
|
"eval_samples_per_second": 1173.436, |
|
"eval_steps_per_second": 24.604, |
|
"step": 3816 |
|
}, |
|
{ |
|
"epoch": 12.959119496855346, |
|
"grad_norm": 0.13854487240314484, |
|
"learning_rate": 4.420995859396498e-06, |
|
"loss": 0.0264, |
|
"step": 4121 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9358064516129032, |
|
"eval_loss": 0.018243877217173576, |
|
"eval_runtime": 2.6544, |
|
"eval_samples_per_second": 1167.882, |
|
"eval_steps_per_second": 24.488, |
|
"step": 4134 |
|
}, |
|
{ |
|
"epoch": 13.955974842767295, |
|
"grad_norm": 0.1205814927816391, |
|
"learning_rate": 2.2615880205233243e-06, |
|
"loss": 0.0259, |
|
"step": 4438 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9361290322580645, |
|
"eval_loss": 0.01793498359620571, |
|
"eval_runtime": 2.6531, |
|
"eval_samples_per_second": 1168.457, |
|
"eval_steps_per_second": 24.5, |
|
"step": 4452 |
|
}, |
|
{ |
|
"epoch": 14.952830188679245, |
|
"grad_norm": 0.13082493841648102, |
|
"learning_rate": 1.0218018165015018e-07, |
|
"loss": 0.0256, |
|
"step": 4755 |
|
} |
|
], |
|
"logging_steps": 317, |
|
"max_steps": 4770, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 1000000000.0, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1259981299661700.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.41037073052052975, |
|
"fp16": false, |
|
"learning_rate": 2.920309591561292e-05, |
|
"lr_scheduler": "cosine", |
|
"num_train_epochs": 15, |
|
"temperature": 4, |
|
"warmup_steps": 483, |
|
"weight_decay": 0.1243517366819557 |
|
} |
|
} |
|
|