{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.9981515711645101, "eval_steps": 500, "global_step": 270, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.018484288354898338, "grad_norm": 190464.0, "learning_rate": 3.7037037037037037e-06, "loss": 10605.0938, "step": 5 }, { "epoch": 0.036968576709796676, "grad_norm": 89088.0, "learning_rate": 7.4074074074074075e-06, "loss": 9817.9641, "step": 10 }, { "epoch": 0.05545286506469501, "grad_norm": 65280.0, "learning_rate": 1.1111111111111113e-05, "loss": 8481.5859, "step": 15 }, { "epoch": 0.07393715341959335, "grad_norm": 35072.0, "learning_rate": 1.4814814814814815e-05, "loss": 7176.0742, "step": 20 }, { "epoch": 0.09242144177449169, "grad_norm": 32640.0, "learning_rate": 1.851851851851852e-05, "loss": 5997.027, "step": 25 }, { "epoch": 0.11090573012939002, "grad_norm": 46336.0, "learning_rate": 1.9992479525042305e-05, "loss": 5191.1363, "step": 30 }, { "epoch": 0.12939001848428835, "grad_norm": 34048.0, "learning_rate": 1.9946562024066018e-05, "loss": 4804.0578, "step": 35 }, { "epoch": 0.1478743068391867, "grad_norm": 9280.0, "learning_rate": 1.9859096633447965e-05, "loss": 4586.0977, "step": 40 }, { "epoch": 0.16635859519408502, "grad_norm": 43520.0, "learning_rate": 1.973044870579824e-05, "loss": 4305.1379, "step": 45 }, { "epoch": 0.18484288354898337, "grad_norm": 19072.0, "learning_rate": 1.95611556177388e-05, "loss": 4172.2984, "step": 50 }, { "epoch": 0.2033271719038817, "grad_norm": 23808.0, "learning_rate": 1.93519245252219e-05, "loss": 3893.0977, "step": 55 }, { "epoch": 0.22181146025878004, "grad_norm": 6752.0, "learning_rate": 1.9103629409661468e-05, "loss": 3707.5781, "step": 60 }, { "epoch": 0.24029574861367836, "grad_norm": 35584.0, "learning_rate": 1.881730742721608e-05, "loss": 3645.1086, "step": 65 }, { "epoch": 0.2587800369685767, "grad_norm": 66560.0, "learning_rate": 1.8494154576472976e-05, "loss": 3538.7551, "step": 70 }, { "epoch": 0.27726432532347506, "grad_norm": 35328.0, "learning_rate": 1.8135520702629677e-05, "loss": 3496.9391, "step": 75 }, { "epoch": 0.2957486136783734, "grad_norm": 33536.0, "learning_rate": 1.7742903859041324e-05, "loss": 3434.5762, "step": 80 }, { "epoch": 0.3142329020332717, "grad_norm": 22272.0, "learning_rate": 1.7317944049686125e-05, "loss": 3348.5082, "step": 85 }, { "epoch": 0.33271719038817005, "grad_norm": 9408.0, "learning_rate": 1.686241637868734e-05, "loss": 3297.6758, "step": 90 }, { "epoch": 0.3512014787430684, "grad_norm": 12864.0, "learning_rate": 1.637822363550706e-05, "loss": 3260.8111, "step": 95 }, { "epoch": 0.36968576709796674, "grad_norm": 7936.0, "learning_rate": 1.586738834678418e-05, "loss": 3206.3494, "step": 100 }, { "epoch": 0.38817005545286504, "grad_norm": 19200.0, "learning_rate": 1.5332044328016916e-05, "loss": 3263.5463, "step": 105 }, { "epoch": 0.4066543438077634, "grad_norm": 25984.0, "learning_rate": 1.4774427770379492e-05, "loss": 3248.7541, "step": 110 }, { "epoch": 0.42513863216266173, "grad_norm": 33536.0, "learning_rate": 1.4196867899904292e-05, "loss": 3195.2939, "step": 115 }, { "epoch": 0.4436229205175601, "grad_norm": 34560.0, "learning_rate": 1.3601777248047105e-05, "loss": 3080.999, "step": 120 }, { "epoch": 0.46210720887245843, "grad_norm": 24320.0, "learning_rate": 1.2991641574276419e-05, "loss": 3109.1754, "step": 125 }, { "epoch": 0.4805914972273567, "grad_norm": 15488.0, "learning_rate": 1.2369009482781191e-05, "loss": 3104.5951, "step": 130 }, { "epoch": 0.49907578558225507, "grad_norm": 23168.0, "learning_rate": 1.1736481776669307e-05, "loss": 3047.1744, "step": 135 }, { "epoch": 0.5175600739371534, "grad_norm": 41728.0, "learning_rate": 1.1096700594125318e-05, "loss": 3073.7598, "step": 140 }, { "epoch": 0.5360443622920518, "grad_norm": 19200.0, "learning_rate": 1.0452338371907065e-05, "loss": 2990.3996, "step": 145 }, { "epoch": 0.5545286506469501, "grad_norm": 35584.0, "learning_rate": 9.806086682281759e-06, "loss": 3001.843, "step": 150 }, { "epoch": 0.5730129390018485, "grad_norm": 19072.0, "learning_rate": 9.160644990030932e-06, "loss": 3061.5664, "step": 155 }, { "epoch": 0.5914972273567468, "grad_norm": 32000.0, "learning_rate": 8.518709376487515e-06, "loss": 3027.4182, "step": 160 }, { "epoch": 0.609981515711645, "grad_norm": 16640.0, "learning_rate": 7.882961277705897e-06, "loss": 3005.9816, "step": 165 }, { "epoch": 0.6284658040665434, "grad_norm": 24448.0, "learning_rate": 7.256056283806987e-06, "loss": 3036.1979, "step": 170 }, { "epoch": 0.6469500924214417, "grad_norm": 23424.0, "learning_rate": 6.640613046284581e-06, "loss": 3033.5047, "step": 175 }, { "epoch": 0.6654343807763401, "grad_norm": 12288.0, "learning_rate": 6.039202339608432e-06, "loss": 2887.5133, "step": 180 }, { "epoch": 0.6839186691312384, "grad_norm": 14400.0, "learning_rate": 5.454336322814995e-06, "loss": 2917.3609, "step": 185 }, { "epoch": 0.7024029574861368, "grad_norm": 19968.0, "learning_rate": 4.888458045941269e-06, "loss": 2940.7945, "step": 190 }, { "epoch": 0.7208872458410351, "grad_norm": 7488.0, "learning_rate": 4.343931245134616e-06, "loss": 2938.1154, "step": 195 }, { "epoch": 0.7393715341959335, "grad_norm": 5664.0, "learning_rate": 3.823030469065431e-06, "loss": 2921.0854, "step": 200 }, { "epoch": 0.7578558225508318, "grad_norm": 15104.0, "learning_rate": 3.3279315778858034e-06, "loss": 3014.4, "step": 205 }, { "epoch": 0.7763401109057301, "grad_norm": 24832.0, "learning_rate": 2.8607026544210115e-06, "loss": 2952.8635, "step": 210 }, { "epoch": 0.7948243992606284, "grad_norm": 26624.0, "learning_rate": 2.423295365558821e-06, "loss": 2933.4004, "step": 215 }, { "epoch": 0.8133086876155268, "grad_norm": 9792.0, "learning_rate": 2.01753680992107e-06, "loss": 3003.6742, "step": 220 }, { "epoch": 0.8317929759704251, "grad_norm": 12032.0, "learning_rate": 1.6451218858706374e-06, "loss": 2939.7191, "step": 225 }, { "epoch": 0.8502772643253235, "grad_norm": 26752.0, "learning_rate": 1.307606211733522e-06, "loss": 2912.9416, "step": 230 }, { "epoch": 0.8687615526802218, "grad_norm": 14272.0, "learning_rate": 1.0063996278090704e-06, "loss": 2914.6553, "step": 235 }, { "epoch": 0.8872458410351202, "grad_norm": 8704.0, "learning_rate": 7.427603073110967e-07, "loss": 3010.8492, "step": 240 }, { "epoch": 0.9057301293900185, "grad_norm": 4960.0, "learning_rate": 5.177895008392353e-07, "loss": 2934.9787, "step": 245 }, { "epoch": 0.9242144177449169, "grad_norm": 31616.0, "learning_rate": 3.3242693633337986e-07, "loss": 3034.4797, "step": 250 }, { "epoch": 0.9426987060998152, "grad_norm": 6656.0, "learning_rate": 1.874468937261531e-07, "loss": 2923.3105, "step": 255 }, { "epoch": 0.9611829944547134, "grad_norm": 9344.0, "learning_rate": 8.345497068998897e-08, "loss": 2966.9092, "step": 260 }, { "epoch": 0.9796672828096118, "grad_norm": 6240.0, "learning_rate": 2.088555298867978e-08, "loss": 2945.0246, "step": 265 }, { "epoch": 0.9981515711645101, "grad_norm": 19456.0, "learning_rate": 0.0, "loss": 2978.2934, "step": 270 }, { "epoch": 0.9981515711645101, "eval_loss": 2957.430908203125, "eval_runtime": 375.38, "eval_samples_per_second": 2.587, "eval_steps_per_second": 0.325, "step": 270 }, { "epoch": 0.9981515711645101, "step": 270, "total_flos": 5.260333472022528e+16, "train_loss": 3746.6009367766205, "train_runtime": 2500.0335, "train_samples_per_second": 3.461, "train_steps_per_second": 0.108 } ], "logging_steps": 5, "max_steps": 270, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": false, "should_training_stop": false }, "attributes": {} } }, "total_flos": 5.260333472022528e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }