{ "best_metric": 1.2785857915878296, "best_model_checkpoint": "outputs/checkpoint-460", "epoch": 9.932523616734143, "eval_steps": 500, "global_step": 460, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.4318488529014845, "grad_norm": 1.2942979335784912, "learning_rate": 6e-06, "loss": 2.9144, "step": 20 }, { "epoch": 0.863697705802969, "grad_norm": 1.3759782314300537, "learning_rate": 1.2e-05, "loss": 2.8035, "step": 40 }, { "epoch": 0.9932523616734144, "eval_loss": 2.4753777980804443, "eval_runtime": 12.0523, "eval_samples_per_second": 30.865, "eval_steps_per_second": 3.9, "step": 46 }, { "epoch": 1.2955465587044535, "grad_norm": 1.218517541885376, "learning_rate": 1.8e-05, "loss": 2.5059, "step": 60 }, { "epoch": 1.7273954116059378, "grad_norm": 0.5991122722625732, "learning_rate": 2.4e-05, "loss": 2.0867, "step": 80 }, { "epoch": 1.9865047233468287, "eval_loss": 1.8320975303649902, "eval_runtime": 12.0828, "eval_samples_per_second": 30.787, "eval_steps_per_second": 3.89, "step": 92 }, { "epoch": 2.1592442645074224, "grad_norm": 0.5117250680923462, "learning_rate": 3e-05, "loss": 2.0309, "step": 100 }, { "epoch": 2.591093117408907, "grad_norm": 0.4918386936187744, "learning_rate": 2.977211629518312e-05, "loss": 1.9261, "step": 120 }, { "epoch": 2.979757085020243, "eval_loss": 1.6659022569656372, "eval_runtime": 12.0832, "eval_samples_per_second": 30.787, "eval_steps_per_second": 3.89, "step": 138 }, { "epoch": 3.0229419703103915, "grad_norm": 0.7239399552345276, "learning_rate": 2.9095389311788626e-05, "loss": 1.917, "step": 140 }, { "epoch": 3.454790823211876, "grad_norm": 0.745520830154419, "learning_rate": 2.7990381056766583e-05, "loss": 1.8193, "step": 160 }, { "epoch": 3.8866396761133606, "grad_norm": 0.7848306894302368, "learning_rate": 2.649066664678467e-05, "loss": 1.7836, "step": 180 }, { "epoch": 3.9946018893387314, "eval_loss": 1.5325894355773926, "eval_runtime": 12.0426, "eval_samples_per_second": 30.89, "eval_steps_per_second": 3.903, "step": 185 }, { "epoch": 4.318488529014845, "grad_norm": 1.0365893840789795, "learning_rate": 2.464181414529809e-05, "loss": 1.708, "step": 200 }, { "epoch": 4.75033738191633, "grad_norm": 1.2468961477279663, "learning_rate": 2.25e-05, "loss": 1.7037, "step": 220 }, { "epoch": 4.987854251012146, "eval_loss": 1.4344478845596313, "eval_runtime": 12.0914, "eval_samples_per_second": 30.766, "eval_steps_per_second": 3.887, "step": 231 }, { "epoch": 5.182186234817814, "grad_norm": 1.0612260103225708, "learning_rate": 2.0130302149885033e-05, "loss": 1.6629, "step": 240 }, { "epoch": 5.614035087719298, "grad_norm": 1.2603241205215454, "learning_rate": 1.760472266500396e-05, "loss": 1.6026, "step": 260 }, { "epoch": 5.98110661268556, "eval_loss": 1.3610283136367798, "eval_runtime": 12.085, "eval_samples_per_second": 30.782, "eval_steps_per_second": 3.889, "step": 277 }, { "epoch": 6.045883940620783, "grad_norm": 1.312119722366333, "learning_rate": 1.5e-05, "loss": 1.5725, "step": 280 }, { "epoch": 6.477732793522267, "grad_norm": 1.3641616106033325, "learning_rate": 1.2395277334996045e-05, "loss": 1.5075, "step": 300 }, { "epoch": 6.909581646423752, "grad_norm": 1.4151686429977417, "learning_rate": 9.86969785011497e-06, "loss": 1.5518, "step": 320 }, { "epoch": 6.995951417004049, "eval_loss": 1.3124128580093384, "eval_runtime": 12.0879, "eval_samples_per_second": 30.775, "eval_steps_per_second": 3.888, "step": 324 }, { "epoch": 7.341430499325236, "grad_norm": 1.6003135442733765, "learning_rate": 7.500000000000004e-06, "loss": 1.4876, "step": 340 }, { "epoch": 7.77327935222672, "grad_norm": 1.3969717025756836, "learning_rate": 5.3581858547019095e-06, "loss": 1.5052, "step": 360 }, { "epoch": 7.989203778677463, "eval_loss": 1.288710594177246, "eval_runtime": 12.0435, "eval_samples_per_second": 30.888, "eval_steps_per_second": 3.903, "step": 370 }, { "epoch": 8.205128205128204, "grad_norm": 1.4700652360916138, "learning_rate": 3.5093333532153316e-06, "loss": 1.48, "step": 380 }, { "epoch": 8.63697705802969, "grad_norm": 1.7552611827850342, "learning_rate": 2.0096189432334194e-06, "loss": 1.4669, "step": 400 }, { "epoch": 8.982456140350877, "eval_loss": 1.2798781394958496, "eval_runtime": 12.0997, "eval_samples_per_second": 30.744, "eval_steps_per_second": 3.884, "step": 416 }, { "epoch": 9.068825910931174, "grad_norm": 1.3221427202224731, "learning_rate": 9.046106882113753e-07, "loss": 1.4745, "step": 420 }, { "epoch": 9.50067476383266, "grad_norm": 1.6423956155776978, "learning_rate": 2.278837048168797e-07, "loss": 1.4449, "step": 440 }, { "epoch": 9.932523616734143, "grad_norm": 1.3725236654281616, "learning_rate": 0.0, "loss": 1.4768, "step": 460 }, { "epoch": 9.932523616734143, "eval_loss": 1.2785857915878296, "eval_runtime": 12.0719, "eval_samples_per_second": 30.815, "eval_steps_per_second": 3.893, "step": 460 } ], "logging_steps": 20, "max_steps": 460, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "total_flos": 4.088498313476506e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }