{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.05468215994531784, "eval_steps": 13, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0010936431989063567, "grad_norm": 28.85504913330078, "learning_rate": 1e-05, "loss": 61.9596, "step": 1 }, { "epoch": 0.0010936431989063567, "eval_loss": NaN, "eval_runtime": 55.2379, "eval_samples_per_second": 27.898, "eval_steps_per_second": 13.958, "step": 1 }, { "epoch": 0.0021872863978127134, "grad_norm": 27.06040382385254, "learning_rate": 2e-05, "loss": 55.4659, "step": 2 }, { "epoch": 0.0032809295967190706, "grad_norm": 28.58708953857422, "learning_rate": 3e-05, "loss": 59.2286, "step": 3 }, { "epoch": 0.004374572795625427, "grad_norm": 33.11997604370117, "learning_rate": 4e-05, "loss": 56.7791, "step": 4 }, { "epoch": 0.005468215994531784, "grad_norm": 31.91828155517578, "learning_rate": 5e-05, "loss": 62.7061, "step": 5 }, { "epoch": 0.006561859193438141, "grad_norm": 35.19359588623047, "learning_rate": 6e-05, "loss": 59.0522, "step": 6 }, { "epoch": 0.007655502392344498, "grad_norm": 31.660709381103516, "learning_rate": 7e-05, "loss": 58.0115, "step": 7 }, { "epoch": 0.008749145591250854, "grad_norm": 42.28495788574219, "learning_rate": 8e-05, "loss": 62.949, "step": 8 }, { "epoch": 0.00984278879015721, "grad_norm": 50.0612907409668, "learning_rate": 9e-05, "loss": 59.3017, "step": 9 }, { "epoch": 0.010936431989063569, "grad_norm": 47.29265594482422, "learning_rate": 0.0001, "loss": 56.7495, "step": 10 }, { "epoch": 0.012030075187969926, "grad_norm": 55.60464096069336, "learning_rate": 9.98458666866564e-05, "loss": 54.7309, "step": 11 }, { "epoch": 0.013123718386876282, "grad_norm": 52.25802993774414, "learning_rate": 9.938441702975689e-05, "loss": 46.0491, "step": 12 }, { "epoch": 0.014217361585782639, "grad_norm": 53.76634979248047, "learning_rate": 9.861849601988383e-05, "loss": 44.4741, "step": 13 }, { "epoch": 0.014217361585782639, "eval_loss": NaN, "eval_runtime": 29.7353, "eval_samples_per_second": 51.824, "eval_steps_per_second": 25.929, "step": 13 }, { "epoch": 0.015311004784688996, "grad_norm": 46.58119201660156, "learning_rate": 9.755282581475769e-05, "loss": 35.13, "step": 14 }, { "epoch": 0.016404647983595352, "grad_norm": 37.509063720703125, "learning_rate": 9.619397662556435e-05, "loss": 31.4067, "step": 15 }, { "epoch": 0.017498291182501707, "grad_norm": 26.47342300415039, "learning_rate": 9.45503262094184e-05, "loss": 28.0434, "step": 16 }, { "epoch": 0.018591934381408066, "grad_norm": 33.73512649536133, "learning_rate": 9.263200821770461e-05, "loss": 28.5491, "step": 17 }, { "epoch": 0.01968557758031442, "grad_norm": 30.662729263305664, "learning_rate": 9.045084971874738e-05, "loss": 28.1228, "step": 18 }, { "epoch": 0.02077922077922078, "grad_norm": 29.663869857788086, "learning_rate": 8.802029828000156e-05, "loss": 26.2821, "step": 19 }, { "epoch": 0.021872863978127138, "grad_norm": 25.14940643310547, "learning_rate": 8.535533905932738e-05, "loss": 26.3426, "step": 20 }, { "epoch": 0.022966507177033493, "grad_norm": 25.627132415771484, "learning_rate": 8.247240241650918e-05, "loss": 23.1896, "step": 21 }, { "epoch": 0.02406015037593985, "grad_norm": 24.37764549255371, "learning_rate": 7.938926261462366e-05, "loss": 20.1499, "step": 22 }, { "epoch": 0.025153793574846206, "grad_norm": 21.951433181762695, "learning_rate": 7.612492823579745e-05, "loss": 18.7962, "step": 23 }, { "epoch": 0.026247436773752564, "grad_norm": 21.490671157836914, "learning_rate": 7.269952498697734e-05, "loss": 17.1975, "step": 24 }, { "epoch": 0.02734107997265892, "grad_norm": 27.6579532623291, "learning_rate": 6.91341716182545e-05, "loss": 19.7459, "step": 25 }, { "epoch": 0.028434723171565278, "grad_norm": 24.457176208496094, "learning_rate": 6.545084971874738e-05, "loss": 17.8861, "step": 26 }, { "epoch": 0.028434723171565278, "eval_loss": NaN, "eval_runtime": 29.8157, "eval_samples_per_second": 51.684, "eval_steps_per_second": 25.859, "step": 26 }, { "epoch": 0.029528366370471633, "grad_norm": 20.9813289642334, "learning_rate": 6.167226819279528e-05, "loss": 16.0026, "step": 27 }, { "epoch": 0.03062200956937799, "grad_norm": 25.871700286865234, "learning_rate": 5.782172325201155e-05, "loss": 13.681, "step": 28 }, { "epoch": 0.03171565276828435, "grad_norm": 22.013635635375977, "learning_rate": 5.392295478639225e-05, "loss": 12.6155, "step": 29 }, { "epoch": 0.032809295967190705, "grad_norm": 25.675172805786133, "learning_rate": 5e-05, "loss": 12.0468, "step": 30 }, { "epoch": 0.03390293916609706, "grad_norm": 26.090190887451172, "learning_rate": 4.607704521360776e-05, "loss": 10.7349, "step": 31 }, { "epoch": 0.034996582365003415, "grad_norm": 32.42118453979492, "learning_rate": 4.2178276747988446e-05, "loss": 12.2017, "step": 32 }, { "epoch": 0.03609022556390978, "grad_norm": 22.70758819580078, "learning_rate": 3.832773180720475e-05, "loss": 10.3959, "step": 33 }, { "epoch": 0.03718386876281613, "grad_norm": 22.697429656982422, "learning_rate": 3.4549150281252636e-05, "loss": 11.4618, "step": 34 }, { "epoch": 0.03827751196172249, "grad_norm": 22.032344818115234, "learning_rate": 3.086582838174551e-05, "loss": 12.4779, "step": 35 }, { "epoch": 0.03937115516062884, "grad_norm": 17.23045539855957, "learning_rate": 2.7300475013022663e-05, "loss": 8.9037, "step": 36 }, { "epoch": 0.0404647983595352, "grad_norm": 18.696378707885742, "learning_rate": 2.3875071764202563e-05, "loss": 10.774, "step": 37 }, { "epoch": 0.04155844155844156, "grad_norm": 19.836345672607422, "learning_rate": 2.061073738537635e-05, "loss": 9.4798, "step": 38 }, { "epoch": 0.04265208475734791, "grad_norm": 25.298463821411133, "learning_rate": 1.7527597583490822e-05, "loss": 11.0674, "step": 39 }, { "epoch": 0.04265208475734791, "eval_loss": NaN, "eval_runtime": 29.7293, "eval_samples_per_second": 51.834, "eval_steps_per_second": 25.934, "step": 39 }, { "epoch": 0.043745727956254275, "grad_norm": 22.722843170166016, "learning_rate": 1.4644660940672627e-05, "loss": 8.415, "step": 40 }, { "epoch": 0.04483937115516063, "grad_norm": 20.1871395111084, "learning_rate": 1.1979701719998453e-05, "loss": 8.9222, "step": 41 }, { "epoch": 0.045933014354066985, "grad_norm": 19.610918045043945, "learning_rate": 9.549150281252633e-06, "loss": 7.8748, "step": 42 }, { "epoch": 0.04702665755297334, "grad_norm": 19.86541175842285, "learning_rate": 7.367991782295391e-06, "loss": 9.8181, "step": 43 }, { "epoch": 0.0481203007518797, "grad_norm": 15.583364486694336, "learning_rate": 5.449673790581611e-06, "loss": 8.7584, "step": 44 }, { "epoch": 0.04921394395078606, "grad_norm": 19.862598419189453, "learning_rate": 3.8060233744356633e-06, "loss": 7.7709, "step": 45 }, { "epoch": 0.05030758714969241, "grad_norm": 22.887357711791992, "learning_rate": 2.4471741852423237e-06, "loss": 9.1935, "step": 46 }, { "epoch": 0.05140123034859877, "grad_norm": 20.946556091308594, "learning_rate": 1.3815039801161721e-06, "loss": 6.5477, "step": 47 }, { "epoch": 0.05249487354750513, "grad_norm": 19.912525177001953, "learning_rate": 6.15582970243117e-07, "loss": 7.5449, "step": 48 }, { "epoch": 0.053588516746411484, "grad_norm": 17.380966186523438, "learning_rate": 1.5413331334360182e-07, "loss": 7.0048, "step": 49 }, { "epoch": 0.05468215994531784, "grad_norm": 26.645334243774414, "learning_rate": 0.0, "loss": 13.126, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 13, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2.40962761654272e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }