{ "best_metric": 0.9241104125976562, "best_model_checkpoint": "outputs/checkpoint-231", "epoch": 11.983805668016194, "eval_steps": 500, "global_step": 555, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.4318488529014845, "grad_norm": 1.787990689277649, "learning_rate": 4e-05, "loss": 2.7228, "step": 20 }, { "epoch": 0.863697705802969, "grad_norm": 0.5038349032402039, "learning_rate": 8e-05, "loss": 2.062, "step": 40 }, { "epoch": 0.9932523616734144, "eval_loss": 1.6799557209014893, "eval_runtime": 11.9387, "eval_samples_per_second": 31.159, "eval_steps_per_second": 3.937, "step": 46 }, { "epoch": 1.2955465587044535, "grad_norm": 0.6116560697555542, "learning_rate": 0.00012, "loss": 1.8813, "step": 60 }, { "epoch": 1.7273954116059378, "grad_norm": 0.8396438360214233, "learning_rate": 0.00016, "loss": 1.6421, "step": 80 }, { "epoch": 1.9865047233468287, "eval_loss": 1.2483179569244385, "eval_runtime": 11.921, "eval_samples_per_second": 31.205, "eval_steps_per_second": 3.943, "step": 92 }, { "epoch": 2.1592442645074224, "grad_norm": 1.1572123765945435, "learning_rate": 0.0002, "loss": 1.5211, "step": 100 }, { "epoch": 2.591093117408907, "grad_norm": 1.1013579368591309, "learning_rate": 0.00019943348002101371, "loss": 1.327, "step": 120 }, { "epoch": 2.979757085020243, "eval_loss": 1.0307273864746094, "eval_runtime": 11.9077, "eval_samples_per_second": 31.24, "eval_steps_per_second": 3.947, "step": 138 }, { "epoch": 3.0229419703103915, "grad_norm": 1.080468773841858, "learning_rate": 0.00019774033898178667, "loss": 1.2809, "step": 140 }, { "epoch": 3.454790823211876, "grad_norm": 1.2337204217910767, "learning_rate": 0.00019493976084683813, "loss": 1.0656, "step": 160 }, { "epoch": 3.8866396761133606, "grad_norm": 1.1269233226776123, "learning_rate": 0.00019106347728549135, "loss": 1.0563, "step": 180 }, { "epoch": 3.9946018893387314, "eval_loss": 0.9378284215927124, "eval_runtime": 11.9305, "eval_samples_per_second": 31.181, "eval_steps_per_second": 3.939, "step": 185 }, { "epoch": 4.318488529014845, "grad_norm": 1.3905510902404785, "learning_rate": 0.0001861554081393806, "loss": 0.8855, "step": 200 }, { "epoch": 4.75033738191633, "grad_norm": 1.3569377660751343, "learning_rate": 0.00018027116379309638, "loss": 0.9061, "step": 220 }, { "epoch": 4.987854251012146, "eval_loss": 0.9241104125976562, "eval_runtime": 11.9189, "eval_samples_per_second": 31.211, "eval_steps_per_second": 3.943, "step": 231 }, { "epoch": 5.182186234817814, "grad_norm": 1.3102238178253174, "learning_rate": 0.00017347741508630672, "loss": 0.8228, "step": 240 }, { "epoch": 5.614035087719298, "grad_norm": 1.1156977415084839, "learning_rate": 0.00016585113790650388, "loss": 0.711, "step": 260 }, { "epoch": 5.98110661268556, "eval_loss": 0.9796295762062073, "eval_runtime": 11.9256, "eval_samples_per_second": 31.194, "eval_steps_per_second": 3.941, "step": 277 }, { "epoch": 6.045883940620783, "grad_norm": 0.9861016869544983, "learning_rate": 0.0001574787410214407, "loss": 0.6591, "step": 280 }, { "epoch": 6.477732793522267, "grad_norm": 1.1385196447372437, "learning_rate": 0.00014845508703326504, "loss": 0.5255, "step": 300 }, { "epoch": 6.909581646423752, "grad_norm": 1.0708714723587036, "learning_rate": 0.00013888241754733208, "loss": 0.5846, "step": 320 }, { "epoch": 6.995951417004049, "eval_loss": 1.0800774097442627, "eval_runtime": 11.9183, "eval_samples_per_second": 31.212, "eval_steps_per_second": 3.944, "step": 324 }, { "epoch": 7.341430499325236, "grad_norm": 1.0299906730651855, "learning_rate": 0.0001288691947339621, "loss": 0.4571, "step": 340 }, { "epoch": 7.77327935222672, "grad_norm": 1.1635417938232422, "learning_rate": 0.00011852887240871145, "loss": 0.465, "step": 360 }, { "epoch": 7.989203778677463, "eval_loss": 1.1291037797927856, "eval_runtime": 11.9151, "eval_samples_per_second": 31.221, "eval_steps_per_second": 3.945, "step": 370 }, { "epoch": 8.205128205128204, "grad_norm": 0.9930064082145691, "learning_rate": 0.00010797861055530831, "loss": 0.3898, "step": 380 }, { "epoch": 8.63697705802969, "grad_norm": 1.086180329322815, "learning_rate": 9.733794785622253e-05, "loss": 0.341, "step": 400 }, { "epoch": 8.982456140350877, "eval_loss": 1.2221601009368896, "eval_runtime": 11.9463, "eval_samples_per_second": 31.139, "eval_steps_per_second": 3.934, "step": 416 }, { "epoch": 9.068825910931174, "grad_norm": 0.881294310092926, "learning_rate": 8.672744727162781e-05, "loss": 0.3388, "step": 420 }, { "epoch": 9.50067476383266, "grad_norm": 0.9569498896598816, "learning_rate": 7.626733001288851e-05, "loss": 0.2579, "step": 440 }, { "epoch": 9.932523616734143, "grad_norm": 0.9368858337402344, "learning_rate": 6.607611338819697e-05, "loss": 0.2657, "step": 460 }, { "epoch": 9.997300944669366, "eval_loss": 1.310530424118042, "eval_runtime": 11.9264, "eval_samples_per_second": 31.191, "eval_steps_per_second": 3.941, "step": 463 }, { "epoch": 10.364372469635628, "grad_norm": 0.8425939679145813, "learning_rate": 5.626926795411447e-05, "loss": 0.1949, "step": 480 }, { "epoch": 10.796221322537113, "grad_norm": 0.9611827731132507, "learning_rate": 4.695790918802576e-05, "loss": 0.1918, "step": 500 }, { "epoch": 10.99055330634278, "eval_loss": 1.393843412399292, "eval_runtime": 11.9319, "eval_samples_per_second": 31.177, "eval_steps_per_second": 3.939, "step": 509 }, { "epoch": 11.228070175438596, "grad_norm": 0.75007563829422, "learning_rate": 3.824753850538082e-05, "loss": 0.1854, "step": 520 }, { "epoch": 11.65991902834008, "grad_norm": 0.710945725440979, "learning_rate": 3.0236847886501542e-05, "loss": 0.162, "step": 540 }, { "epoch": 11.983805668016194, "eval_loss": 1.4677594900131226, "eval_runtime": 11.9217, "eval_samples_per_second": 31.204, "eval_steps_per_second": 3.942, "step": 555 } ], "logging_steps": 20, "max_steps": 690, "num_input_tokens_seen": 0, "num_train_epochs": 15, "save_steps": 500, "total_flos": 4.962667014222643e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }