|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.98848810437452, |
|
"eval_steps": 500, |
|
"global_step": 26000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9616270145817347e-05, |
|
"loss": 0.2232, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9232540291634693e-05, |
|
"loss": 0.1483, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.8848810437452035e-05, |
|
"loss": 0.1446, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.846508058326938e-05, |
|
"loss": 0.1343, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.8081350729086726e-05, |
|
"loss": 0.1195, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.7697620874904068e-05, |
|
"loss": 0.1209, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.7313891020721413e-05, |
|
"loss": 0.1186, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.693016116653876e-05, |
|
"loss": 0.0884, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.65464313123561e-05, |
|
"loss": 0.1025, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.6162701458173446e-05, |
|
"loss": 0.1051, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.11643879860639572, |
|
"eval_runtime": 429.8512, |
|
"eval_samples_per_second": 12.125, |
|
"eval_steps_per_second": 3.031, |
|
"step": 5212 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.5778971603990792e-05, |
|
"loss": 0.0896, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.5395241749808137e-05, |
|
"loss": 0.0864, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.5011511895625481e-05, |
|
"loss": 0.0913, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.4627782041442827e-05, |
|
"loss": 0.0857, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.4244052187260169e-05, |
|
"loss": 0.0853, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.3860322333077514e-05, |
|
"loss": 0.082, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.3476592478894858e-05, |
|
"loss": 0.0887, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 1.3092862624712203e-05, |
|
"loss": 0.0797, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.2709132770529549e-05, |
|
"loss": 0.0805, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.2325402916346893e-05, |
|
"loss": 0.0717, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.08999563753604889, |
|
"eval_runtime": 429.7074, |
|
"eval_samples_per_second": 12.129, |
|
"eval_steps_per_second": 3.032, |
|
"step": 10424 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 1.1941673062164237e-05, |
|
"loss": 0.0714, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.155794320798158e-05, |
|
"loss": 0.0674, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.1174213353798926e-05, |
|
"loss": 0.0626, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.0790483499616271e-05, |
|
"loss": 0.0705, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.0406753645433617e-05, |
|
"loss": 0.0702, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.002302379125096e-05, |
|
"loss": 0.064, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.639293937068304e-06, |
|
"loss": 0.066, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 9.25556408288565e-06, |
|
"loss": 0.0726, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 8.871834228702994e-06, |
|
"loss": 0.0668, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 8.48810437452034e-06, |
|
"loss": 0.0728, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 8.104374520337683e-06, |
|
"loss": 0.0619, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.08260077238082886, |
|
"eval_runtime": 429.8218, |
|
"eval_samples_per_second": 12.126, |
|
"eval_steps_per_second": 3.031, |
|
"step": 15636 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 7.720644666155027e-06, |
|
"loss": 0.0697, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 7.336914811972372e-06, |
|
"loss": 0.0552, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 6.953184957789717e-06, |
|
"loss": 0.0568, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 6.569455103607061e-06, |
|
"loss": 0.0596, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 6.185725249424405e-06, |
|
"loss": 0.0551, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 5.801995395241751e-06, |
|
"loss": 0.0608, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 5.418265541059095e-06, |
|
"loss": 0.0572, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 5.034535686876439e-06, |
|
"loss": 0.0488, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 4.650805832693784e-06, |
|
"loss": 0.0626, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 4.2670759785111286e-06, |
|
"loss": 0.0528, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.08744284510612488, |
|
"eval_runtime": 429.0419, |
|
"eval_samples_per_second": 12.148, |
|
"eval_steps_per_second": 3.037, |
|
"step": 20848 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.883346124328473e-06, |
|
"loss": 0.059, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 3.4996162701458174e-06, |
|
"loss": 0.0516, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 3.1158864159631625e-06, |
|
"loss": 0.0455, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 2.7321565617805067e-06, |
|
"loss": 0.0515, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 2.3484267075978514e-06, |
|
"loss": 0.0505, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.964696853415196e-06, |
|
"loss": 0.0468, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.5809669992325404e-06, |
|
"loss": 0.0464, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 1.197237145049885e-06, |
|
"loss": 0.0429, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 8.135072908672295e-07, |
|
"loss": 0.0527, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 4.297774366845741e-07, |
|
"loss": 0.0485, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 4.604758250191865e-08, |
|
"loss": 0.042, |
|
"step": 26000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 26060, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 4.68119978016768e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|