|
{ |
|
"best_metric": 1.0736711025238037, |
|
"best_model_checkpoint": "outputs/checkpoint-741", |
|
"epoch": 19.865047233468285, |
|
"eval_steps": 500, |
|
"global_step": 920, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.4318488529014845, |
|
"grad_norm": 1.7572729587554932, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.9109, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.863697705802969, |
|
"grad_norm": 1.7804350852966309, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.7779, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9932523616734144, |
|
"eval_loss": 2.4225194454193115, |
|
"eval_runtime": 12.3603, |
|
"eval_samples_per_second": 30.096, |
|
"eval_steps_per_second": 3.802, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.2955465587044535, |
|
"grad_norm": 1.2646853923797607, |
|
"learning_rate": 1.2e-05, |
|
"loss": 2.4665, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.7273954116059378, |
|
"grad_norm": 0.6016279458999634, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.0703, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.9865047233468287, |
|
"eval_loss": 1.802353858947754, |
|
"eval_runtime": 12.3146, |
|
"eval_samples_per_second": 30.208, |
|
"eval_steps_per_second": 3.817, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.1592442645074224, |
|
"grad_norm": 0.5685097575187683, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0044, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.591093117408907, |
|
"grad_norm": 0.5021196603775024, |
|
"learning_rate": 1.9970658011837404e-05, |
|
"loss": 1.8962, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.979757085020243, |
|
"eval_loss": 1.635912537574768, |
|
"eval_runtime": 12.2905, |
|
"eval_samples_per_second": 30.267, |
|
"eval_steps_per_second": 3.824, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.0229419703103915, |
|
"grad_norm": 0.7247435450553894, |
|
"learning_rate": 1.9882804237803487e-05, |
|
"loss": 1.8878, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.454790823211876, |
|
"grad_norm": 0.736103892326355, |
|
"learning_rate": 1.9736954238777793e-05, |
|
"loss": 1.7908, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.8866396761133606, |
|
"grad_norm": 0.7256895899772644, |
|
"learning_rate": 1.9533963920549307e-05, |
|
"loss": 1.7571, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.9946018893387314, |
|
"eval_loss": 1.50630784034729, |
|
"eval_runtime": 12.3056, |
|
"eval_samples_per_second": 30.23, |
|
"eval_steps_per_second": 3.819, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.318488529014845, |
|
"grad_norm": 0.9457058310508728, |
|
"learning_rate": 1.927502451102095e-05, |
|
"loss": 1.6802, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.75033738191633, |
|
"grad_norm": 1.0999541282653809, |
|
"learning_rate": 1.8961655569610557e-05, |
|
"loss": 1.6754, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.987854251012146, |
|
"eval_loss": 1.4030449390411377, |
|
"eval_runtime": 12.3178, |
|
"eval_samples_per_second": 30.2, |
|
"eval_steps_per_second": 3.816, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 5.182186234817814, |
|
"grad_norm": 0.947107195854187, |
|
"learning_rate": 1.8595696069872013e-05, |
|
"loss": 1.6317, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.614035087719298, |
|
"grad_norm": 1.141870141029358, |
|
"learning_rate": 1.8179293607667177e-05, |
|
"loss": 1.5657, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 5.98110661268556, |
|
"eval_loss": 1.318428874015808, |
|
"eval_runtime": 12.2941, |
|
"eval_samples_per_second": 30.258, |
|
"eval_steps_per_second": 3.823, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 6.045883940620783, |
|
"grad_norm": 1.174417495727539, |
|
"learning_rate": 1.7714891798219432e-05, |
|
"loss": 1.529, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 6.477732793522267, |
|
"grad_norm": 1.2409827709197998, |
|
"learning_rate": 1.720521593600787e-05, |
|
"loss": 1.4535, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 6.909581646423752, |
|
"grad_norm": 1.337241291999817, |
|
"learning_rate": 1.6653257001655652e-05, |
|
"loss": 1.4943, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 6.995951417004049, |
|
"eval_loss": 1.251551866531372, |
|
"eval_runtime": 12.3013, |
|
"eval_samples_per_second": 30.241, |
|
"eval_steps_per_second": 3.821, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 7.341430499325236, |
|
"grad_norm": 1.5739471912384033, |
|
"learning_rate": 1.6062254109666383e-05, |
|
"loss": 1.412, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 7.77327935222672, |
|
"grad_norm": 1.3894844055175781, |
|
"learning_rate": 1.5435675500012212e-05, |
|
"loss": 1.4226, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 7.989203778677463, |
|
"eval_loss": 1.2039802074432373, |
|
"eval_runtime": 12.2961, |
|
"eval_samples_per_second": 30.254, |
|
"eval_steps_per_second": 3.822, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 8.205128205128204, |
|
"grad_norm": 1.5219025611877441, |
|
"learning_rate": 1.477719818512263e-05, |
|
"loss": 1.3758, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 8.63697705802969, |
|
"grad_norm": 1.8435232639312744, |
|
"learning_rate": 1.4090686371713403e-05, |
|
"loss": 1.3446, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 8.982456140350877, |
|
"eval_loss": 1.1595730781555176, |
|
"eval_runtime": 12.2686, |
|
"eval_samples_per_second": 30.321, |
|
"eval_steps_per_second": 3.831, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 9.068825910931174, |
|
"grad_norm": 1.4322152137756348, |
|
"learning_rate": 1.3380168784085028e-05, |
|
"loss": 1.3416, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 9.50067476383266, |
|
"grad_norm": 1.7712641954421997, |
|
"learning_rate": 1.264981502196662e-05, |
|
"loss": 1.2809, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 9.932523616734143, |
|
"grad_norm": 1.577054500579834, |
|
"learning_rate": 1.1903911091646684e-05, |
|
"loss": 1.3033, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 9.997300944669366, |
|
"eval_loss": 1.1354897022247314, |
|
"eval_runtime": 12.2627, |
|
"eval_samples_per_second": 30.336, |
|
"eval_steps_per_second": 3.833, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 10.364372469635628, |
|
"grad_norm": 1.8544738292694092, |
|
"learning_rate": 1.1146834253984008e-05, |
|
"loss": 1.2552, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 10.796221322537113, |
|
"grad_norm": 1.9725706577301025, |
|
"learning_rate": 1.0383027336900356e-05, |
|
"loss": 1.2186, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 10.99055330634278, |
|
"eval_loss": 1.1109498739242554, |
|
"eval_runtime": 12.2456, |
|
"eval_samples_per_second": 30.378, |
|
"eval_steps_per_second": 3.838, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 11.228070175438596, |
|
"grad_norm": 1.9104483127593994, |
|
"learning_rate": 9.616972663099648e-06, |
|
"loss": 1.2626, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 11.65991902834008, |
|
"grad_norm": 2.256964921951294, |
|
"learning_rate": 8.853165746015997e-06, |
|
"loss": 1.224, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 11.983805668016194, |
|
"eval_loss": 1.100693702697754, |
|
"eval_runtime": 12.1688, |
|
"eval_samples_per_second": 30.57, |
|
"eval_steps_per_second": 3.862, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 12.091767881241566, |
|
"grad_norm": 2.1132662296295166, |
|
"learning_rate": 8.096088908353316e-06, |
|
"loss": 1.1934, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 12.523616734143049, |
|
"grad_norm": 1.6598352193832397, |
|
"learning_rate": 7.350184978033386e-06, |
|
"loss": 1.175, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 12.955465587044534, |
|
"grad_norm": 2.5442428588867188, |
|
"learning_rate": 6.619831215914974e-06, |
|
"loss": 1.1688, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 12.998650472334683, |
|
"eval_loss": 1.08635675907135, |
|
"eval_runtime": 12.1828, |
|
"eval_samples_per_second": 30.535, |
|
"eval_steps_per_second": 3.858, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 13.387314439946019, |
|
"grad_norm": 1.9692449569702148, |
|
"learning_rate": 5.9093136282866014e-06, |
|
"loss": 1.1366, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 13.819163292847504, |
|
"grad_norm": 2.064366340637207, |
|
"learning_rate": 5.22280181487737e-06, |
|
"loss": 1.1971, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 13.991902834008098, |
|
"eval_loss": 1.0831245183944702, |
|
"eval_runtime": 12.1763, |
|
"eval_samples_per_second": 30.551, |
|
"eval_steps_per_second": 3.86, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 14.251012145748987, |
|
"grad_norm": 2.0344982147216797, |
|
"learning_rate": 4.56432449998779e-06, |
|
"loss": 1.162, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 14.682860998650472, |
|
"grad_norm": 2.1550192832946777, |
|
"learning_rate": 3.937745890333623e-06, |
|
"loss": 1.1322, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 14.98515519568151, |
|
"eval_loss": 1.0802310705184937, |
|
"eval_runtime": 12.1751, |
|
"eval_samples_per_second": 30.554, |
|
"eval_steps_per_second": 3.86, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 15.114709851551957, |
|
"grad_norm": 2.1858975887298584, |
|
"learning_rate": 3.3467429983443477e-06, |
|
"loss": 1.1555, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 15.54655870445344, |
|
"grad_norm": 2.1912343502044678, |
|
"learning_rate": 2.7947840639921308e-06, |
|
"loss": 1.1123, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 15.978407557354926, |
|
"grad_norm": 2.083388328552246, |
|
"learning_rate": 2.2851082017805704e-06, |
|
"loss": 1.1694, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 1.0736711025238037, |
|
"eval_runtime": 12.1577, |
|
"eval_samples_per_second": 30.598, |
|
"eval_steps_per_second": 3.866, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 16.41025641025641, |
|
"grad_norm": 2.217958927154541, |
|
"learning_rate": 1.820706392332824e-06, |
|
"loss": 1.0918, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 16.842105263157894, |
|
"grad_norm": 2.019780397415161, |
|
"learning_rate": 1.4043039301279904e-06, |
|
"loss": 1.1445, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 16.993252361673413, |
|
"eval_loss": 1.0765767097473145, |
|
"eval_runtime": 12.1611, |
|
"eval_samples_per_second": 30.589, |
|
"eval_steps_per_second": 3.865, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 17.27395411605938, |
|
"grad_norm": 2.233635425567627, |
|
"learning_rate": 1.0383444303894453e-06, |
|
"loss": 1.1372, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 17.705802968960864, |
|
"grad_norm": 2.201345682144165, |
|
"learning_rate": 7.249754889790539e-07, |
|
"loss": 1.069, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 17.98650472334683, |
|
"eval_loss": 1.0766650438308716, |
|
"eval_runtime": 12.1705, |
|
"eval_samples_per_second": 30.566, |
|
"eval_steps_per_second": 3.862, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 18.13765182186235, |
|
"grad_norm": 2.139754295349121, |
|
"learning_rate": 4.660360794506946e-07, |
|
"loss": 1.145, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 18.569500674763834, |
|
"grad_norm": 2.235074758529663, |
|
"learning_rate": 2.6304576122221035e-07, |
|
"loss": 1.0888, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 18.979757085020243, |
|
"eval_loss": 1.076890230178833, |
|
"eval_runtime": 12.1657, |
|
"eval_samples_per_second": 30.578, |
|
"eval_steps_per_second": 3.863, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 19.00134952766532, |
|
"grad_norm": 2.2143237590789795, |
|
"learning_rate": 1.1719576219651585e-07, |
|
"loss": 1.0904, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 19.4331983805668, |
|
"grad_norm": 2.2261524200439453, |
|
"learning_rate": 2.9341988162595593e-08, |
|
"loss": 1.087, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 19.865047233468285, |
|
"grad_norm": 2.2200422286987305, |
|
"learning_rate": 0.0, |
|
"loss": 1.1188, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 19.865047233468285, |
|
"eval_loss": 1.0770390033721924, |
|
"eval_runtime": 12.1693, |
|
"eval_samples_per_second": 30.569, |
|
"eval_steps_per_second": 3.862, |
|
"step": 920 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 920, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 8.214585670867354e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|