|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.978165938864628, |
|
"eval_steps": 500, |
|
"global_step": 570, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008733624454148471, |
|
"grad_norm": 648.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 56.9969, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.043668122270742356, |
|
"grad_norm": 584.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 59.6974, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08733624454148471, |
|
"grad_norm": 356.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 42.0524, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13100436681222707, |
|
"grad_norm": 39.75, |
|
"learning_rate": 5.2631578947368424e-05, |
|
"loss": 23.223, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.17467248908296942, |
|
"grad_norm": 26.625, |
|
"learning_rate": 7.017543859649122e-05, |
|
"loss": 20.5089, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2183406113537118, |
|
"grad_norm": 7.15625, |
|
"learning_rate": 8.771929824561403e-05, |
|
"loss": 17.4609, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.26200873362445415, |
|
"grad_norm": 10.25, |
|
"learning_rate": 0.00010526315789473685, |
|
"loss": 16.9699, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3056768558951965, |
|
"grad_norm": 36.0, |
|
"learning_rate": 0.00012280701754385965, |
|
"loss": 14.7118, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.34934497816593885, |
|
"grad_norm": 54.75, |
|
"learning_rate": 0.00014035087719298245, |
|
"loss": 9.0151, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3930131004366812, |
|
"grad_norm": 4.09375, |
|
"learning_rate": 0.00015789473684210527, |
|
"loss": 2.2763, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4366812227074236, |
|
"grad_norm": 2.5, |
|
"learning_rate": 0.00017543859649122806, |
|
"loss": 1.6984, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.48034934497816595, |
|
"grad_norm": 2.5, |
|
"learning_rate": 0.00019298245614035088, |
|
"loss": 1.4194, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5240174672489083, |
|
"grad_norm": 1.9765625, |
|
"learning_rate": 0.00019998312416333227, |
|
"loss": 1.2842, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5676855895196506, |
|
"grad_norm": 13.0625, |
|
"learning_rate": 0.0001998800146766861, |
|
"loss": 1.2936, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.611353711790393, |
|
"grad_norm": 3.15625, |
|
"learning_rate": 0.00019968326771610797, |
|
"loss": 1.1866, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6550218340611353, |
|
"grad_norm": 3.765625, |
|
"learning_rate": 0.00019939306773179497, |
|
"loss": 1.0911, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6986899563318777, |
|
"grad_norm": 5.34375, |
|
"learning_rate": 0.00019900968678611666, |
|
"loss": 1.0412, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.74235807860262, |
|
"grad_norm": 2.625, |
|
"learning_rate": 0.00019853348429855672, |
|
"loss": 0.9954, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.7860262008733624, |
|
"grad_norm": 2.6875, |
|
"learning_rate": 0.0001979649067087574, |
|
"loss": 0.9395, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8296943231441049, |
|
"grad_norm": 4.375, |
|
"learning_rate": 0.00019730448705798239, |
|
"loss": 0.9498, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.8733624454148472, |
|
"grad_norm": 4.875, |
|
"learning_rate": 0.00019655284448939094, |
|
"loss": 0.8991, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9170305676855895, |
|
"grad_norm": 1.75, |
|
"learning_rate": 0.00019571068366759143, |
|
"loss": 0.8827, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.9606986899563319, |
|
"grad_norm": 3.1875, |
|
"learning_rate": 0.00019477879411801844, |
|
"loss": 0.8665, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9956331877729258, |
|
"eval_loss": 2.268367052078247, |
|
"eval_runtime": 1.0131, |
|
"eval_samples_per_second": 4.935, |
|
"eval_steps_per_second": 1.974, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.0043668122270741, |
|
"grad_norm": 1.6015625, |
|
"learning_rate": 0.00019375804948675306, |
|
"loss": 0.8773, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.0480349344978166, |
|
"grad_norm": 9.1875, |
|
"learning_rate": 0.00019264940672148018, |
|
"loss": 0.8211, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.091703056768559, |
|
"grad_norm": 1.25, |
|
"learning_rate": 0.00019145390517435012, |
|
"loss": 0.8465, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.1353711790393013, |
|
"grad_norm": 3.09375, |
|
"learning_rate": 0.00019017266562758659, |
|
"loss": 0.8369, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.1790393013100438, |
|
"grad_norm": 1.015625, |
|
"learning_rate": 0.00018880688924275378, |
|
"loss": 0.7924, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.222707423580786, |
|
"grad_norm": 1.9140625, |
|
"learning_rate": 0.00018735785643466784, |
|
"loss": 0.778, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.2663755458515285, |
|
"grad_norm": 0.98828125, |
|
"learning_rate": 0.00018582692567100867, |
|
"loss": 0.7681, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.3100436681222707, |
|
"grad_norm": 1.3984375, |
|
"learning_rate": 0.00018421553219875658, |
|
"loss": 0.8007, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.3537117903930131, |
|
"grad_norm": 80.5, |
|
"learning_rate": 0.00018252518669864936, |
|
"loss": 0.7683, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.3973799126637554, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 0.0001807574738689193, |
|
"loss": 0.7713, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.4410480349344978, |
|
"grad_norm": 2.875, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 0.7784, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.48471615720524, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 0.00017699664611907072, |
|
"loss": 0.7742, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.5283842794759825, |
|
"grad_norm": 2.046875, |
|
"learning_rate": 0.0001750070569734681, |
|
"loss": 0.7356, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.572052401746725, |
|
"grad_norm": 1.375, |
|
"learning_rate": 0.0001729471487418621, |
|
"loss": 0.7613, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.6157205240174672, |
|
"grad_norm": 1.0234375, |
|
"learning_rate": 0.00017081885258739846, |
|
"loss": 0.7373, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.6593886462882095, |
|
"grad_norm": 1.109375, |
|
"learning_rate": 0.0001686241637868734, |
|
"loss": 0.7395, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.703056768558952, |
|
"grad_norm": 0.77734375, |
|
"learning_rate": 0.00016636513986016213, |
|
"loss": 0.7456, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.7467248908296944, |
|
"grad_norm": 1.5, |
|
"learning_rate": 0.00016404389864129533, |
|
"loss": 0.7535, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.7903930131004366, |
|
"grad_norm": 1.0703125, |
|
"learning_rate": 0.00016166261629298995, |
|
"loss": 0.7334, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.8340611353711789, |
|
"grad_norm": 0.99609375, |
|
"learning_rate": 0.00015922352526649803, |
|
"loss": 0.7087, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.8777292576419216, |
|
"grad_norm": 5.03125, |
|
"learning_rate": 0.00015672891220868432, |
|
"loss": 0.7264, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.9213973799126638, |
|
"grad_norm": 0.60546875, |
|
"learning_rate": 0.00015418111581829574, |
|
"loss": 0.7336, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.965065502183406, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 0.00015158252465343242, |
|
"loss": 0.7577, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.1721692085266113, |
|
"eval_runtime": 1.0003, |
|
"eval_samples_per_second": 4.999, |
|
"eval_steps_per_second": 1.999, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 2.0087336244541483, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 0.00014893557489227517, |
|
"loss": 0.7045, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.052401746724891, |
|
"grad_norm": 0.80859375, |
|
"learning_rate": 0.00014624274804916958, |
|
"loss": 0.6618, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.096069868995633, |
|
"grad_norm": 0.62109375, |
|
"learning_rate": 0.00014350656864820733, |
|
"loss": 0.6572, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.1397379912663754, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 0.00014072960185648577, |
|
"loss": 0.6606, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.183406113537118, |
|
"grad_norm": 0.75, |
|
"learning_rate": 0.00013791445107926478, |
|
"loss": 0.6832, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.2270742358078603, |
|
"grad_norm": 0.86328125, |
|
"learning_rate": 0.00013506375551927547, |
|
"loss": 0.6344, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.2707423580786026, |
|
"grad_norm": 1.0859375, |
|
"learning_rate": 0.00013218018770246858, |
|
"loss": 0.6513, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.314410480349345, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 0.0001292664509725226, |
|
"loss": 0.6569, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.3580786026200875, |
|
"grad_norm": 1.1484375, |
|
"learning_rate": 0.00012632527695645993, |
|
"loss": 0.6706, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.4017467248908297, |
|
"grad_norm": 1.265625, |
|
"learning_rate": 0.00012335942300374788, |
|
"loss": 0.6641, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.445414847161572, |
|
"grad_norm": 1.59375, |
|
"learning_rate": 0.00012037166960128443, |
|
"loss": 0.6636, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.489082969432314, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.6687, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.532751091703057, |
|
"grad_norm": 0.73046875, |
|
"learning_rate": 0.00011434168642236964, |
|
"loss": 0.6601, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.576419213973799, |
|
"grad_norm": 0.5703125, |
|
"learning_rate": 0.00011130510975274409, |
|
"loss": 0.6467, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.6200873362445414, |
|
"grad_norm": 0.53125, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 0.6567, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.6637554585152836, |
|
"grad_norm": 0.875, |
|
"learning_rate": 0.00010520301753137724, |
|
"loss": 0.6625, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.7074235807860263, |
|
"grad_norm": 0.59375, |
|
"learning_rate": 0.00010214322268866032, |
|
"loss": 0.6614, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.7510917030567685, |
|
"grad_norm": 0.5703125, |
|
"learning_rate": 9.908141857552737e-05, |
|
"loss": 0.6382, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.7947598253275108, |
|
"grad_norm": 0.470703125, |
|
"learning_rate": 9.602047563211359e-05, |
|
"loss": 0.6527, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.8384279475982535, |
|
"grad_norm": 0.89453125, |
|
"learning_rate": 9.296326349120785e-05, |
|
"loss": 0.682, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.8820960698689957, |
|
"grad_norm": 0.49609375, |
|
"learning_rate": 8.991264828797319e-05, |
|
"loss": 0.651, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.925764192139738, |
|
"grad_norm": 0.51953125, |
|
"learning_rate": 8.687148997294621e-05, |
|
"loss": 0.667, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.96943231441048, |
|
"grad_norm": 0.427734375, |
|
"learning_rate": 8.384263963083453e-05, |
|
"loss": 0.6255, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.995633187772926, |
|
"eval_loss": 2.174067974090576, |
|
"eval_runtime": 1.051, |
|
"eval_samples_per_second": 4.757, |
|
"eval_steps_per_second": 1.903, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 3.013100436681223, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 8.082893680762619e-05, |
|
"loss": 0.6202, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 3.056768558951965, |
|
"grad_norm": 0.53125, |
|
"learning_rate": 7.783320684851614e-05, |
|
"loss": 0.6176, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.1004366812227073, |
|
"grad_norm": 0.51171875, |
|
"learning_rate": 7.485825824914659e-05, |
|
"loss": 0.5783, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 3.14410480349345, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 7.190688002264308e-05, |
|
"loss": 0.5812, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.1877729257641922, |
|
"grad_norm": 0.5546875, |
|
"learning_rate": 6.898183908491617e-05, |
|
"loss": 0.5981, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 3.2314410480349345, |
|
"grad_norm": 0.53125, |
|
"learning_rate": 6.608587766067852e-05, |
|
"loss": 0.5853, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.2751091703056767, |
|
"grad_norm": 0.609375, |
|
"learning_rate": 6.322171071261071e-05, |
|
"loss": 0.5908, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.3187772925764194, |
|
"grad_norm": 1.125, |
|
"learning_rate": 6.039202339608432e-05, |
|
"loss": 0.5644, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.3624454148471616, |
|
"grad_norm": 0.63671875, |
|
"learning_rate": 5.7599468541830356e-05, |
|
"loss": 0.5746, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.406113537117904, |
|
"grad_norm": 0.8828125, |
|
"learning_rate": 5.484666416891109e-05, |
|
"loss": 0.5929, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.449781659388646, |
|
"grad_norm": 0.5859375, |
|
"learning_rate": 5.2136191030328455e-05, |
|
"loss": 0.5704, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.493449781659389, |
|
"grad_norm": 0.671875, |
|
"learning_rate": 4.9470590193569044e-05, |
|
"loss": 0.5971, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.537117903930131, |
|
"grad_norm": 0.51953125, |
|
"learning_rate": 4.685236065835443e-05, |
|
"loss": 0.6042, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.5807860262008733, |
|
"grad_norm": 0.61328125, |
|
"learning_rate": 4.4283957013829846e-05, |
|
"loss": 0.5822, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.6244541484716155, |
|
"grad_norm": 0.64453125, |
|
"learning_rate": 4.176778713738787e-05, |
|
"loss": 0.5709, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.668122270742358, |
|
"grad_norm": 0.49609375, |
|
"learning_rate": 3.9306209937284346e-05, |
|
"loss": 0.5907, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.7117903930131004, |
|
"grad_norm": 0.5625, |
|
"learning_rate": 3.69015331411628e-05, |
|
"loss": 0.589, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.7554585152838427, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 3.455601113256073e-05, |
|
"loss": 0.6115, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.7991266375545854, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 3.227184283742591e-05, |
|
"loss": 0.5629, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.8427947598253276, |
|
"grad_norm": 0.458984375, |
|
"learning_rate": 3.0051169662624225e-05, |
|
"loss": 0.5943, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.88646288209607, |
|
"grad_norm": 0.4765625, |
|
"learning_rate": 2.789607348837153e-05, |
|
"loss": 0.5944, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 3.930131004366812, |
|
"grad_norm": 0.4765625, |
|
"learning_rate": 2.5808574716471856e-05, |
|
"loss": 0.6155, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.9737991266375547, |
|
"grad_norm": 0.48828125, |
|
"learning_rate": 2.379063037619146e-05, |
|
"loss": 0.5966, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.214209794998169, |
|
"eval_runtime": 1.0004, |
|
"eval_samples_per_second": 4.998, |
|
"eval_steps_per_second": 1.999, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 4.0174672489082965, |
|
"grad_norm": 0.458984375, |
|
"learning_rate": 2.184413228954468e-05, |
|
"loss": 0.5676, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.06113537117904, |
|
"grad_norm": 0.48828125, |
|
"learning_rate": 1.9970905297711606e-05, |
|
"loss": 0.5632, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 4.104803493449782, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 1.8172705550250092e-05, |
|
"loss": 0.5515, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 4.148471615720524, |
|
"grad_norm": 0.47265625, |
|
"learning_rate": 1.6451218858706374e-05, |
|
"loss": 0.5725, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 4.192139737991266, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 1.4808059116167305e-05, |
|
"loss": 0.5362, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.235807860262009, |
|
"grad_norm": 0.478515625, |
|
"learning_rate": 1.3244766784236307e-05, |
|
"loss": 0.5569, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 4.279475982532751, |
|
"grad_norm": 0.46484375, |
|
"learning_rate": 1.176280744885121e-05, |
|
"loss": 0.573, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.323144104803493, |
|
"grad_norm": 0.478515625, |
|
"learning_rate": 1.0363570446297999e-05, |
|
"loss": 0.5559, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 4.366812227074236, |
|
"grad_norm": 0.53125, |
|
"learning_rate": 9.048367560708604e-06, |
|
"loss": 0.548, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.4104803493449785, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 7.818431794263836e-06, |
|
"loss": 0.5755, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 4.454148471615721, |
|
"grad_norm": 0.470703125, |
|
"learning_rate": 6.674916211254289e-06, |
|
"loss": 0.5559, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.497816593886463, |
|
"grad_norm": 0.50390625, |
|
"learning_rate": 5.618892857083069e-06, |
|
"loss": 0.5491, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 4.541484716157205, |
|
"grad_norm": 0.4765625, |
|
"learning_rate": 4.65135175322361e-06, |
|
"loss": 0.5586, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.585152838427947, |
|
"grad_norm": 0.458984375, |
|
"learning_rate": 3.7731999690749585e-06, |
|
"loss": 0.5569, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 4.62882096069869, |
|
"grad_norm": 0.47265625, |
|
"learning_rate": 2.9852607715846193e-06, |
|
"loss": 0.5458, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.672489082969433, |
|
"grad_norm": 0.470703125, |
|
"learning_rate": 2.288272853436013e-06, |
|
"loss": 0.5253, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 4.716157205240175, |
|
"grad_norm": 0.47265625, |
|
"learning_rate": 1.6828896405244988e-06, |
|
"loss": 0.5595, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.759825327510917, |
|
"grad_norm": 0.486328125, |
|
"learning_rate": 1.1696786793707781e-06, |
|
"loss": 0.5626, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 4.8034934497816595, |
|
"grad_norm": 0.5, |
|
"learning_rate": 7.491211050462798e-07, |
|
"loss": 0.5407, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.847161572052402, |
|
"grad_norm": 0.47265625, |
|
"learning_rate": 4.216111901092501e-07, |
|
"loss": 0.5542, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 4.890829694323144, |
|
"grad_norm": 0.5234375, |
|
"learning_rate": 1.8745597497433765e-07, |
|
"loss": 0.5542, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.934497816593886, |
|
"grad_norm": 0.458984375, |
|
"learning_rate": 4.687498006236135e-08, |
|
"loss": 0.5682, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 4.978165938864628, |
|
"grad_norm": 0.49609375, |
|
"learning_rate": 0.0, |
|
"loss": 0.5399, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.978165938864628, |
|
"eval_loss": 2.236276865005493, |
|
"eval_runtime": 0.9998, |
|
"eval_samples_per_second": 5.001, |
|
"eval_steps_per_second": 2.0, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.978165938864628, |
|
"step": 570, |
|
"total_flos": 8.714577082329334e+17, |
|
"train_loss": 2.449154797771521, |
|
"train_runtime": 4532.0541, |
|
"train_samples_per_second": 2.02, |
|
"train_steps_per_second": 0.126 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 570, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.714577082329334e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|