{ "best_metric": 0.9229514598846436, "best_model_checkpoint": "outputs/checkpoint-231", "epoch": 11.983805668016194, "eval_steps": 500, "global_step": 555, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.4318488529014845, "grad_norm": 1.8083888292312622, "learning_rate": 4e-05, "loss": 2.7223, "step": 20 }, { "epoch": 0.863697705802969, "grad_norm": 0.509838342666626, "learning_rate": 8e-05, "loss": 2.063, "step": 40 }, { "epoch": 0.9932523616734144, "eval_loss": 1.6813331842422485, "eval_runtime": 12.0916, "eval_samples_per_second": 30.765, "eval_steps_per_second": 3.887, "step": 46 }, { "epoch": 1.2955465587044535, "grad_norm": 0.6181766390800476, "learning_rate": 0.00012, "loss": 1.8828, "step": 60 }, { "epoch": 1.7273954116059378, "grad_norm": 0.8454991579055786, "learning_rate": 0.00016, "loss": 1.6444, "step": 80 }, { "epoch": 1.9865047233468287, "eval_loss": 1.2503478527069092, "eval_runtime": 12.1403, "eval_samples_per_second": 30.642, "eval_steps_per_second": 3.871, "step": 92 }, { "epoch": 2.1592442645074224, "grad_norm": 1.1776914596557617, "learning_rate": 0.0002, "loss": 1.5242, "step": 100 }, { "epoch": 2.591093117408907, "grad_norm": 1.091369390487671, "learning_rate": 0.00019943348002101371, "loss": 1.3296, "step": 120 }, { "epoch": 2.979757085020243, "eval_loss": 1.028580904006958, "eval_runtime": 12.1164, "eval_samples_per_second": 30.702, "eval_steps_per_second": 3.879, "step": 138 }, { "epoch": 3.0229419703103915, "grad_norm": 1.0799137353897095, "learning_rate": 0.00019774033898178667, "loss": 1.2821, "step": 140 }, { "epoch": 3.454790823211876, "grad_norm": 1.2310456037521362, "learning_rate": 0.00019493976084683813, "loss": 1.0677, "step": 160 }, { "epoch": 3.8866396761133606, "grad_norm": 1.1375850439071655, "learning_rate": 0.00019106347728549135, "loss": 1.0588, "step": 180 }, { "epoch": 3.9946018893387314, "eval_loss": 0.9346915483474731, "eval_runtime": 12.162, "eval_samples_per_second": 30.587, "eval_steps_per_second": 3.864, "step": 185 }, { "epoch": 4.318488529014845, "grad_norm": 1.3716574907302856, "learning_rate": 0.0001861554081393806, "loss": 0.8876, "step": 200 }, { "epoch": 4.75033738191633, "grad_norm": 1.3819169998168945, "learning_rate": 0.00018027116379309638, "loss": 0.9074, "step": 220 }, { "epoch": 4.987854251012146, "eval_loss": 0.9229514598846436, "eval_runtime": 12.1062, "eval_samples_per_second": 30.728, "eval_steps_per_second": 3.882, "step": 231 }, { "epoch": 5.182186234817814, "grad_norm": 1.327073097229004, "learning_rate": 0.00017347741508630672, "loss": 0.8266, "step": 240 }, { "epoch": 5.614035087719298, "grad_norm": 1.140026569366455, "learning_rate": 0.00016585113790650388, "loss": 0.7142, "step": 260 }, { "epoch": 5.98110661268556, "eval_loss": 0.9703238606452942, "eval_runtime": 12.1046, "eval_samples_per_second": 30.732, "eval_steps_per_second": 3.883, "step": 277 }, { "epoch": 6.045883940620783, "grad_norm": 1.0288299322128296, "learning_rate": 0.0001574787410214407, "loss": 0.6626, "step": 280 }, { "epoch": 6.477732793522267, "grad_norm": 1.1120308637619019, "learning_rate": 0.00014845508703326504, "loss": 0.5305, "step": 300 }, { "epoch": 6.909581646423752, "grad_norm": 1.0534147024154663, "learning_rate": 0.00013888241754733208, "loss": 0.5869, "step": 320 }, { "epoch": 6.995951417004049, "eval_loss": 1.0680469274520874, "eval_runtime": 12.1954, "eval_samples_per_second": 30.503, "eval_steps_per_second": 3.854, "step": 324 }, { "epoch": 7.341430499325236, "grad_norm": 1.0005006790161133, "learning_rate": 0.0001288691947339621, "loss": 0.4619, "step": 340 }, { "epoch": 7.77327935222672, "grad_norm": 1.1516958475112915, "learning_rate": 0.00011852887240871145, "loss": 0.4722, "step": 360 }, { "epoch": 7.989203778677463, "eval_loss": 1.1098763942718506, "eval_runtime": 12.1616, "eval_samples_per_second": 30.588, "eval_steps_per_second": 3.865, "step": 370 }, { "epoch": 8.205128205128204, "grad_norm": 0.9738859534263611, "learning_rate": 0.00010797861055530831, "loss": 0.3932, "step": 380 }, { "epoch": 8.63697705802969, "grad_norm": 1.0977412462234497, "learning_rate": 9.733794785622253e-05, "loss": 0.3471, "step": 400 }, { "epoch": 8.982456140350877, "eval_loss": 1.191792607307434, "eval_runtime": 12.1511, "eval_samples_per_second": 30.614, "eval_steps_per_second": 3.868, "step": 416 }, { "epoch": 9.068825910931174, "grad_norm": 0.8212457895278931, "learning_rate": 8.672744727162781e-05, "loss": 0.3473, "step": 420 }, { "epoch": 9.50067476383266, "grad_norm": 1.007986068725586, "learning_rate": 7.626733001288851e-05, "loss": 0.2672, "step": 440 }, { "epoch": 9.932523616734143, "grad_norm": 1.0054905414581299, "learning_rate": 6.607611338819697e-05, "loss": 0.2752, "step": 460 }, { "epoch": 9.997300944669366, "eval_loss": 1.290297031402588, "eval_runtime": 12.1732, "eval_samples_per_second": 30.559, "eval_steps_per_second": 3.861, "step": 463 }, { "epoch": 10.364372469635628, "grad_norm": 0.9115879535675049, "learning_rate": 5.626926795411447e-05, "loss": 0.2023, "step": 480 }, { "epoch": 10.796221322537113, "grad_norm": 0.9712518453598022, "learning_rate": 4.695790918802576e-05, "loss": 0.2015, "step": 500 }, { "epoch": 10.99055330634278, "eval_loss": 1.3780165910720825, "eval_runtime": 12.1643, "eval_samples_per_second": 30.581, "eval_steps_per_second": 3.864, "step": 509 }, { "epoch": 11.228070175438596, "grad_norm": 0.9184095859527588, "learning_rate": 3.824753850538082e-05, "loss": 0.1934, "step": 520 }, { "epoch": 11.65991902834008, "grad_norm": 0.763605535030365, "learning_rate": 3.0236847886501542e-05, "loss": 0.1716, "step": 540 }, { "epoch": 11.983805668016194, "eval_loss": 1.4491263628005981, "eval_runtime": 12.1321, "eval_samples_per_second": 30.662, "eval_steps_per_second": 3.874, "step": 555 } ], "logging_steps": 20, "max_steps": 690, "num_input_tokens_seen": 0, "num_train_epochs": 15, "save_steps": 500, "total_flos": 4.962667014222643e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }