{ "best_metric": 0.8839941620826721, "best_model_checkpoint": "outputs/checkpoint-185", "epoch": 8.982456140350877, "eval_steps": 500, "global_step": 416, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.4318488529014845, "grad_norm": 0.6420097947120667, "learning_rate": 4e-05, "loss": 1.9916, "step": 20 }, { "epoch": 0.863697705802969, "grad_norm": 0.48513880372047424, "learning_rate": 8e-05, "loss": 1.8266, "step": 40 }, { "epoch": 0.9932523616734144, "eval_loss": 1.5141435861587524, "eval_runtime": 12.7002, "eval_samples_per_second": 29.291, "eval_steps_per_second": 3.701, "step": 46 }, { "epoch": 1.2955465587044535, "grad_norm": 0.711148202419281, "learning_rate": 0.00012, "loss": 1.7047, "step": 60 }, { "epoch": 1.7273954116059378, "grad_norm": 0.805604875087738, "learning_rate": 0.00016, "loss": 1.481, "step": 80 }, { "epoch": 1.9865047233468287, "eval_loss": 1.1201857328414917, "eval_runtime": 12.66, "eval_samples_per_second": 29.384, "eval_steps_per_second": 3.712, "step": 92 }, { "epoch": 2.1592442645074224, "grad_norm": 1.1893383264541626, "learning_rate": 0.0002, "loss": 1.362, "step": 100 }, { "epoch": 2.591093117408907, "grad_norm": 1.204134464263916, "learning_rate": 0.00019943348002101371, "loss": 1.1848, "step": 120 }, { "epoch": 2.979757085020243, "eval_loss": 0.9436031579971313, "eval_runtime": 12.6699, "eval_samples_per_second": 29.361, "eval_steps_per_second": 3.71, "step": 138 }, { "epoch": 3.0229419703103915, "grad_norm": 1.0650242567062378, "learning_rate": 0.00019774033898178667, "loss": 1.154, "step": 140 }, { "epoch": 3.454790823211876, "grad_norm": 1.2103493213653564, "learning_rate": 0.00019493976084683813, "loss": 0.9441, "step": 160 }, { "epoch": 3.8866396761133606, "grad_norm": 1.035901427268982, "learning_rate": 0.00019106347728549135, "loss": 0.9405, "step": 180 }, { "epoch": 3.9946018893387314, "eval_loss": 0.8839941620826721, "eval_runtime": 12.6739, "eval_samples_per_second": 29.352, "eval_steps_per_second": 3.708, "step": 185 }, { "epoch": 4.318488529014845, "grad_norm": 1.2742565870285034, "learning_rate": 0.0001861554081393806, "loss": 0.7772, "step": 200 }, { "epoch": 4.75033738191633, "grad_norm": 1.2412225008010864, "learning_rate": 0.00018027116379309638, "loss": 0.8008, "step": 220 }, { "epoch": 4.987854251012146, "eval_loss": 0.9072163701057434, "eval_runtime": 12.6529, "eval_samples_per_second": 29.4, "eval_steps_per_second": 3.715, "step": 231 }, { "epoch": 5.182186234817814, "grad_norm": 1.1546653509140015, "learning_rate": 0.00017347741508630672, "loss": 0.7324, "step": 240 }, { "epoch": 5.614035087719298, "grad_norm": 1.1490098237991333, "learning_rate": 0.00016585113790650388, "loss": 0.6233, "step": 260 }, { "epoch": 5.98110661268556, "eval_loss": 0.9705129861831665, "eval_runtime": 12.6584, "eval_samples_per_second": 29.388, "eval_steps_per_second": 3.713, "step": 277 }, { "epoch": 6.045883940620783, "grad_norm": 0.9665806293487549, "learning_rate": 0.0001574787410214407, "loss": 0.5839, "step": 280 }, { "epoch": 6.477732793522267, "grad_norm": 1.0736712217330933, "learning_rate": 0.00014845508703326504, "loss": 0.4599, "step": 300 }, { "epoch": 6.909581646423752, "grad_norm": 1.0971572399139404, "learning_rate": 0.00013888241754733208, "loss": 0.5172, "step": 320 }, { "epoch": 6.995951417004049, "eval_loss": 1.1059515476226807, "eval_runtime": 12.6618, "eval_samples_per_second": 29.38, "eval_steps_per_second": 3.712, "step": 324 }, { "epoch": 7.341430499325236, "grad_norm": 1.080107569694519, "learning_rate": 0.0001288691947339621, "loss": 0.3924, "step": 340 }, { "epoch": 7.77327935222672, "grad_norm": 1.2625104188919067, "learning_rate": 0.00011852887240871145, "loss": 0.4118, "step": 360 }, { "epoch": 7.989203778677463, "eval_loss": 1.1868247985839844, "eval_runtime": 12.6599, "eval_samples_per_second": 29.384, "eval_steps_per_second": 3.713, "step": 370 }, { "epoch": 8.205128205128204, "grad_norm": 1.0898717641830444, "learning_rate": 0.00010797861055530831, "loss": 0.3353, "step": 380 }, { "epoch": 8.63697705802969, "grad_norm": 1.3591790199279785, "learning_rate": 9.733794785622253e-05, "loss": 0.2891, "step": 400 }, { "epoch": 8.982456140350877, "eval_loss": 1.282555341720581, "eval_runtime": 12.677, "eval_samples_per_second": 29.344, "eval_steps_per_second": 3.707, "step": 416 } ], "logging_steps": 20, "max_steps": 690, "num_input_tokens_seen": 0, "num_train_epochs": 15, "save_steps": 500, "total_flos": 3.20770960155648e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }