{ "best_metric": 0.7763074636459351, "best_model_checkpoint": "./t5_finetuned/checkpoint-24000", "epoch": 1.0, "eval_steps": 2000, "global_step": 25622, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.019514479743970026, "grad_norm": 50375.1171875, "learning_rate": 3.9219420810241204e-05, "loss": 1.6162, "step": 500 }, { "epoch": 0.03902895948794005, "grad_norm": 40185.09765625, "learning_rate": 3.84388416204824e-05, "loss": 1.0952, "step": 1000 }, { "epoch": 0.05854343923191008, "grad_norm": 26678.357421875, "learning_rate": 3.76582624307236e-05, "loss": 1.0438, "step": 1500 }, { "epoch": 0.0780579189758801, "grad_norm": 75600.265625, "learning_rate": 3.68776832409648e-05, "loss": 1.0177, "step": 2000 }, { "epoch": 0.0780579189758801, "eval_loss": 0.9050301909446716, "eval_runtime": 247.5245, "eval_samples_per_second": 87.171, "eval_steps_per_second": 5.45, "step": 2000 }, { "epoch": 0.09757239871985013, "grad_norm": 48858.64453125, "learning_rate": 3.6097104051205996e-05, "loss": 0.994, "step": 2500 }, { "epoch": 0.11708687846382015, "grad_norm": 38273.890625, "learning_rate": 3.53165248614472e-05, "loss": 0.9774, "step": 3000 }, { "epoch": 0.13660135820779018, "grad_norm": 33092.80078125, "learning_rate": 3.45359456716884e-05, "loss": 0.9659, "step": 3500 }, { "epoch": 0.1561158379517602, "grad_norm": 42197.96484375, "learning_rate": 3.375536648192959e-05, "loss": 0.9396, "step": 4000 }, { "epoch": 0.1561158379517602, "eval_loss": 0.8554931879043579, "eval_runtime": 246.9587, "eval_samples_per_second": 87.371, "eval_steps_per_second": 5.462, "step": 4000 }, { "epoch": 0.17563031769573023, "grad_norm": 31069.32421875, "learning_rate": 3.2974787292170794e-05, "loss": 0.9339, "step": 4500 }, { "epoch": 0.19514479743970026, "grad_norm": 40504.2109375, "learning_rate": 3.2194208102411995e-05, "loss": 0.9271, "step": 5000 }, { "epoch": 0.21465927718367028, "grad_norm": 53872.625, "learning_rate": 3.141362891265319e-05, "loss": 0.9036, "step": 5500 }, { "epoch": 0.2341737569276403, "grad_norm": 40223.39453125, "learning_rate": 3.063304972289439e-05, "loss": 0.8948, "step": 6000 }, { "epoch": 0.2341737569276403, "eval_loss": 0.8311675190925598, "eval_runtime": 247.1162, "eval_samples_per_second": 87.315, "eval_steps_per_second": 5.459, "step": 6000 }, { "epoch": 0.25368823667161033, "grad_norm": 37229.52734375, "learning_rate": 2.985247053313559e-05, "loss": 0.9039, "step": 6500 }, { "epoch": 0.27320271641558036, "grad_norm": 42615.79296875, "learning_rate": 2.9071891343376786e-05, "loss": 0.9099, "step": 7000 }, { "epoch": 0.2927171961595504, "grad_norm": 45535.15234375, "learning_rate": 2.8291312153617988e-05, "loss": 0.9024, "step": 7500 }, { "epoch": 0.3122316759035204, "grad_norm": 44434.55078125, "learning_rate": 2.7510732963859185e-05, "loss": 0.9221, "step": 8000 }, { "epoch": 0.3122316759035204, "eval_loss": 0.8146435618400574, "eval_runtime": 247.4985, "eval_samples_per_second": 87.18, "eval_steps_per_second": 5.451, "step": 8000 }, { "epoch": 0.33174615564749044, "grad_norm": 106098.890625, "learning_rate": 2.6730153774100383e-05, "loss": 0.8931, "step": 8500 }, { "epoch": 0.35126063539146046, "grad_norm": 29800.005859375, "learning_rate": 2.5949574584341584e-05, "loss": 0.8991, "step": 9000 }, { "epoch": 0.3707751151354305, "grad_norm": 41034.53125, "learning_rate": 2.5168995394582782e-05, "loss": 0.8879, "step": 9500 }, { "epoch": 0.3902895948794005, "grad_norm": 36309.5625, "learning_rate": 2.438841620482398e-05, "loss": 0.873, "step": 10000 }, { "epoch": 0.3902895948794005, "eval_loss": 0.8043464422225952, "eval_runtime": 247.5255, "eval_samples_per_second": 87.171, "eval_steps_per_second": 5.45, "step": 10000 }, { "epoch": 0.40980407462337054, "grad_norm": 28724.736328125, "learning_rate": 2.360783701506518e-05, "loss": 0.8964, "step": 10500 }, { "epoch": 0.42931855436734057, "grad_norm": 32160.0625, "learning_rate": 2.282725782530638e-05, "loss": 0.8837, "step": 11000 }, { "epoch": 0.4488330341113106, "grad_norm": 33788.734375, "learning_rate": 2.2046678635547577e-05, "loss": 0.8498, "step": 11500 }, { "epoch": 0.4683475138552806, "grad_norm": 57106.34375, "learning_rate": 2.1266099445788778e-05, "loss": 0.8723, "step": 12000 }, { "epoch": 0.4683475138552806, "eval_loss": 0.7961086630821228, "eval_runtime": 246.9977, "eval_samples_per_second": 87.357, "eval_steps_per_second": 5.462, "step": 12000 }, { "epoch": 0.48786199359925064, "grad_norm": 42744.36328125, "learning_rate": 2.0485520256029976e-05, "loss": 0.8664, "step": 12500 }, { "epoch": 0.5073764733432207, "grad_norm": 59340.5859375, "learning_rate": 1.9704941066271174e-05, "loss": 0.8719, "step": 13000 }, { "epoch": 0.5268909530871907, "grad_norm": 76933.4453125, "learning_rate": 1.8924361876512375e-05, "loss": 0.8896, "step": 13500 }, { "epoch": 0.5464054328311607, "grad_norm": 38514.16015625, "learning_rate": 1.8143782686753573e-05, "loss": 0.848, "step": 14000 }, { "epoch": 0.5464054328311607, "eval_loss": 0.7900422811508179, "eval_runtime": 247.446, "eval_samples_per_second": 87.199, "eval_steps_per_second": 5.452, "step": 14000 }, { "epoch": 0.5659199125751307, "grad_norm": 37615.3515625, "learning_rate": 1.736320349699477e-05, "loss": 0.8632, "step": 14500 }, { "epoch": 0.5854343923191008, "grad_norm": 81223.0625, "learning_rate": 1.6582624307235972e-05, "loss": 0.8622, "step": 15000 }, { "epoch": 0.6049488720630708, "grad_norm": 54736.93359375, "learning_rate": 1.580204511747717e-05, "loss": 0.8655, "step": 15500 }, { "epoch": 0.6244633518070408, "grad_norm": 45588.96484375, "learning_rate": 1.5021465927718368e-05, "loss": 0.877, "step": 16000 }, { "epoch": 0.6244633518070408, "eval_loss": 0.7854538559913635, "eval_runtime": 247.3837, "eval_samples_per_second": 87.221, "eval_steps_per_second": 5.453, "step": 16000 }, { "epoch": 0.6439778315510108, "grad_norm": 43015.6015625, "learning_rate": 1.4240886737959567e-05, "loss": 0.8687, "step": 16500 }, { "epoch": 0.6634923112949809, "grad_norm": 35532.26171875, "learning_rate": 1.3460307548200767e-05, "loss": 0.8749, "step": 17000 }, { "epoch": 0.6830067910389509, "grad_norm": 32677.763671875, "learning_rate": 1.2679728358441965e-05, "loss": 0.8727, "step": 17500 }, { "epoch": 0.7025212707829209, "grad_norm": 42200.640625, "learning_rate": 1.1899149168683164e-05, "loss": 0.862, "step": 18000 }, { "epoch": 0.7025212707829209, "eval_loss": 0.7817208170890808, "eval_runtime": 247.398, "eval_samples_per_second": 87.216, "eval_steps_per_second": 5.453, "step": 18000 }, { "epoch": 0.722035750526891, "grad_norm": 30234.58203125, "learning_rate": 1.1118569978924364e-05, "loss": 0.8432, "step": 18500 }, { "epoch": 0.741550230270861, "grad_norm": 43296.27734375, "learning_rate": 1.0337990789165561e-05, "loss": 0.8711, "step": 19000 }, { "epoch": 0.761064710014831, "grad_norm": 20832.39453125, "learning_rate": 9.557411599406761e-06, "loss": 0.8716, "step": 19500 }, { "epoch": 0.780579189758801, "grad_norm": 33938.4140625, "learning_rate": 8.77683240964796e-06, "loss": 0.8584, "step": 20000 }, { "epoch": 0.780579189758801, "eval_loss": 0.7791029214859009, "eval_runtime": 247.0845, "eval_samples_per_second": 87.326, "eval_steps_per_second": 5.46, "step": 20000 }, { "epoch": 0.800093669502771, "grad_norm": 56116.62109375, "learning_rate": 7.996253219889158e-06, "loss": 0.8776, "step": 20500 }, { "epoch": 0.8196081492467411, "grad_norm": 33855.76953125, "learning_rate": 7.215674030130358e-06, "loss": 0.8571, "step": 21000 }, { "epoch": 0.8391226289907111, "grad_norm": 83128.3359375, "learning_rate": 6.4350948403715564e-06, "loss": 0.8606, "step": 21500 }, { "epoch": 0.8586371087346811, "grad_norm": 32873.78515625, "learning_rate": 5.654515650612755e-06, "loss": 0.8406, "step": 22000 }, { "epoch": 0.8586371087346811, "eval_loss": 0.777363121509552, "eval_runtime": 247.4058, "eval_samples_per_second": 87.213, "eval_steps_per_second": 5.453, "step": 22000 }, { "epoch": 0.8781515884786512, "grad_norm": 31377.396484375, "learning_rate": 4.873936460853954e-06, "loss": 0.8368, "step": 22500 }, { "epoch": 0.8976660682226212, "grad_norm": 22825.560546875, "learning_rate": 4.093357271095153e-06, "loss": 0.8419, "step": 23000 }, { "epoch": 0.9171805479665912, "grad_norm": 39448.79296875, "learning_rate": 3.312778081336352e-06, "loss": 0.8731, "step": 23500 }, { "epoch": 0.9366950277105612, "grad_norm": 67876.9296875, "learning_rate": 2.5321988915775507e-06, "loss": 0.8458, "step": 24000 }, { "epoch": 0.9366950277105612, "eval_loss": 0.7763074636459351, "eval_runtime": 247.0633, "eval_samples_per_second": 87.334, "eval_steps_per_second": 5.46, "step": 24000 }, { "epoch": 0.9562095074545313, "grad_norm": 32097.791015625, "learning_rate": 1.7516197018187498e-06, "loss": 0.8461, "step": 24500 }, { "epoch": 0.9757239871985013, "grad_norm": 34695.05859375, "learning_rate": 9.710405120599484e-07, "loss": 0.8384, "step": 25000 }, { "epoch": 0.9952384669424713, "grad_norm": 41189.7265625, "learning_rate": 1.9046132230114745e-07, "loss": 0.8632, "step": 25500 } ], "logging_steps": 500, "max_steps": 25622, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 2000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.548269480443904e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }