|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9996249062265566, |
|
"eval_steps": 80, |
|
"global_step": 1066, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0046886721680420105, |
|
"grad_norm": 131.48858057089643, |
|
"learning_rate": 5.5e-07, |
|
"loss": 1.2775, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.009377344336084021, |
|
"grad_norm": 33.55585411699123, |
|
"learning_rate": 1.1e-06, |
|
"loss": 1.3472, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014066016504126031, |
|
"grad_norm": 10.392565895406074, |
|
"learning_rate": 1.6499999999999999e-06, |
|
"loss": 1.1186, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.018754688672168042, |
|
"grad_norm": 13.30940048870882, |
|
"learning_rate": 2.2e-06, |
|
"loss": 1.1381, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.023443360840210054, |
|
"grad_norm": 25.46432441054564, |
|
"learning_rate": 2.75e-06, |
|
"loss": 1.1455, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.028132033008252063, |
|
"grad_norm": 26.00538399730873, |
|
"learning_rate": 3.2999999999999997e-06, |
|
"loss": 1.049, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.032820705176294075, |
|
"grad_norm": 9.35384218444162, |
|
"learning_rate": 3.8499999999999996e-06, |
|
"loss": 0.8965, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.037509377344336084, |
|
"grad_norm": 2.9429262000270113, |
|
"learning_rate": 4.4e-06, |
|
"loss": 1.1271, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04219804951237809, |
|
"grad_norm": 3.2422986577839716, |
|
"learning_rate": 4.95e-06, |
|
"loss": 1.0061, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04688672168042011, |
|
"grad_norm": 6.503070007858331, |
|
"learning_rate": 5.5e-06, |
|
"loss": 0.9871, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05157539384846212, |
|
"grad_norm": 5.434874712126211, |
|
"learning_rate": 5.414406436166232e-06, |
|
"loss": 1.0584, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.056264066016504126, |
|
"grad_norm": 2.394040425022367, |
|
"learning_rate": 5.32986463435603e-06, |
|
"loss": 1.0672, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.060952738184546135, |
|
"grad_norm": 2.7812207561170776, |
|
"learning_rate": 5.246366801851234e-06, |
|
"loss": 0.9798, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06564141035258815, |
|
"grad_norm": 3.4321299409489927, |
|
"learning_rate": 5.163905165275343e-06, |
|
"loss": 1.0684, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.07033008252063015, |
|
"grad_norm": 2.4331799242183005, |
|
"learning_rate": 5.082471970641763e-06, |
|
"loss": 1.1214, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.07501875468867217, |
|
"grad_norm": 2.66350051316822, |
|
"learning_rate": 5.002059483402411e-06, |
|
"loss": 1.0422, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07501875468867217, |
|
"eval_loss": 1.0044387578964233, |
|
"eval_runtime": 21.7707, |
|
"eval_samples_per_second": 9.187, |
|
"eval_steps_per_second": 2.297, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07970742685671418, |
|
"grad_norm": 2.457085516495112, |
|
"learning_rate": 4.922659988496696e-06, |
|
"loss": 1.004, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.08439609902475619, |
|
"grad_norm": 2.673304521252877, |
|
"learning_rate": 4.844265790400869e-06, |
|
"loss": 1.1774, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0890847711927982, |
|
"grad_norm": 2.9697389911450105, |
|
"learning_rate": 4.766869213177739e-06, |
|
"loss": 1.029, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.09377344336084022, |
|
"grad_norm": 2.7485846803270175, |
|
"learning_rate": 4.690462600526791e-06, |
|
"loss": 1.0735, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09846211552888222, |
|
"grad_norm": 2.3597539988620047, |
|
"learning_rate": 4.615038315834675e-06, |
|
"loss": 1.0426, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.10315078769692423, |
|
"grad_norm": 10.287138782877946, |
|
"learning_rate": 4.5405887422260886e-06, |
|
"loss": 1.1257, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.10783945986496624, |
|
"grad_norm": 6.22099162659968, |
|
"learning_rate": 4.467106282615065e-06, |
|
"loss": 1.0203, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.11252813203300825, |
|
"grad_norm": 6.918830274001748, |
|
"learning_rate": 4.394583359756651e-06, |
|
"loss": 1.1145, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.11721680420105027, |
|
"grad_norm": 5.8742101902111, |
|
"learning_rate": 4.323012416298999e-06, |
|
"loss": 1.0943, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.12190547636909227, |
|
"grad_norm": 2.6642490667255525, |
|
"learning_rate": 4.252385914835873e-06, |
|
"loss": 1.087, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.12659414853713427, |
|
"grad_norm": 2.6965562685918023, |
|
"learning_rate": 4.182696337959566e-06, |
|
"loss": 1.0873, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.1312828207051763, |
|
"grad_norm": 2.951044122767734, |
|
"learning_rate": 4.113936188314245e-06, |
|
"loss": 0.8704, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1359714928732183, |
|
"grad_norm": 2.101235354697959, |
|
"learning_rate": 4.046097988649726e-06, |
|
"loss": 1.1235, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.1406601650412603, |
|
"grad_norm": 3.2330279427425617, |
|
"learning_rate": 3.979174281875685e-06, |
|
"loss": 0.997, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.14534883720930233, |
|
"grad_norm": 3.3889833141888728, |
|
"learning_rate": 3.9131576311163e-06, |
|
"loss": 1.0179, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.15003750937734434, |
|
"grad_norm": 4.973298096388189, |
|
"learning_rate": 3.848040619765356e-06, |
|
"loss": 0.9127, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.15003750937734434, |
|
"eval_loss": 0.9606707692146301, |
|
"eval_runtime": 20.2418, |
|
"eval_samples_per_second": 9.881, |
|
"eval_steps_per_second": 2.47, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.15472618154538634, |
|
"grad_norm": 8.29768606535662, |
|
"learning_rate": 3.7838158515417857e-06, |
|
"loss": 1.0887, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.15941485371342837, |
|
"grad_norm": 3.7810993101840147, |
|
"learning_rate": 3.7204759505456866e-06, |
|
"loss": 0.8966, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.16410352588147037, |
|
"grad_norm": 2.644220736384783, |
|
"learning_rate": 3.65801356131479e-06, |
|
"loss": 1.0543, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.16879219804951237, |
|
"grad_norm": 3.614698732279852, |
|
"learning_rate": 3.596421348881407e-06, |
|
"loss": 0.9387, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.1734808702175544, |
|
"grad_norm": 2.8015834886932787, |
|
"learning_rate": 3.535691998829856e-06, |
|
"loss": 1.0746, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.1781695423855964, |
|
"grad_norm": 2.564293653383082, |
|
"learning_rate": 3.4758182173543725e-06, |
|
"loss": 0.8235, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.1828582145536384, |
|
"grad_norm": 3.118218626122133, |
|
"learning_rate": 3.4167927313175065e-06, |
|
"loss": 0.9392, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.18754688672168043, |
|
"grad_norm": 3.1320015313206078, |
|
"learning_rate": 3.358608288309036e-06, |
|
"loss": 0.9951, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.19223555888972244, |
|
"grad_norm": 2.8693840620672026, |
|
"learning_rate": 3.3012576567053635e-06, |
|
"loss": 1.0835, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.19692423105776444, |
|
"grad_norm": 3.961730546242666, |
|
"learning_rate": 3.2447336257294427e-06, |
|
"loss": 0.8606, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.20161290322580644, |
|
"grad_norm": 3.380982676943891, |
|
"learning_rate": 3.189029005511225e-06, |
|
"loss": 1.0039, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.20630157539384847, |
|
"grad_norm": 3.072769902074738, |
|
"learning_rate": 3.134136627148626e-06, |
|
"loss": 0.9235, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.21099024756189047, |
|
"grad_norm": 2.245002916223262, |
|
"learning_rate": 3.080049342769041e-06, |
|
"loss": 1.06, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.21567891972993247, |
|
"grad_norm": 2.541443132900714, |
|
"learning_rate": 3.026760025591393e-06, |
|
"loss": 0.9432, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.2203675918979745, |
|
"grad_norm": 3.123240987262189, |
|
"learning_rate": 2.97426156998874e-06, |
|
"loss": 1.0341, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.2250562640660165, |
|
"grad_norm": 2.404173594460732, |
|
"learning_rate": 2.9225468915514425e-06, |
|
"loss": 0.8605, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.2250562640660165, |
|
"eval_loss": 0.944010317325592, |
|
"eval_runtime": 20.3476, |
|
"eval_samples_per_second": 9.829, |
|
"eval_steps_per_second": 2.457, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.2297449362340585, |
|
"grad_norm": 2.839083125403713, |
|
"learning_rate": 2.8716089271509e-06, |
|
"loss": 0.986, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.23443360840210054, |
|
"grad_norm": 3.109434158155652, |
|
"learning_rate": 2.8214406350038632e-06, |
|
"loss": 0.8936, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.23912228057014254, |
|
"grad_norm": 2.5603812627943325, |
|
"learning_rate": 2.772034994737337e-06, |
|
"loss": 1.0787, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.24381095273818454, |
|
"grad_norm": 2.843918945845048, |
|
"learning_rate": 2.7233850074540736e-06, |
|
"loss": 0.8681, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.24849962490622657, |
|
"grad_norm": 3.295967379586164, |
|
"learning_rate": 2.6754836957986757e-06, |
|
"loss": 1.063, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.25318829707426854, |
|
"grad_norm": 2.6450503587985867, |
|
"learning_rate": 2.6283241040243133e-06, |
|
"loss": 0.9345, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.25787696924231057, |
|
"grad_norm": 2.6669681551281936, |
|
"learning_rate": 2.5818992980600576e-06, |
|
"loss": 0.9366, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.2625656414103526, |
|
"grad_norm": 2.365910075512645, |
|
"learning_rate": 2.5362023655788563e-06, |
|
"loss": 0.9222, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.2672543135783946, |
|
"grad_norm": 2.4773250316490336, |
|
"learning_rate": 2.491226416066151e-06, |
|
"loss": 0.9816, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.2719429857464366, |
|
"grad_norm": 2.051844651359742, |
|
"learning_rate": 2.4469645808891426e-06, |
|
"loss": 1.0592, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.27663165791447863, |
|
"grad_norm": 2.7110963822173875, |
|
"learning_rate": 2.40341001336673e-06, |
|
"loss": 0.9458, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.2813203300825206, |
|
"grad_norm": 2.1769151172554047, |
|
"learning_rate": 2.3605558888401135e-06, |
|
"loss": 0.8555, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.28600900225056264, |
|
"grad_norm": 2.626727731539155, |
|
"learning_rate": 2.318395404744094e-06, |
|
"loss": 1.1516, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.29069767441860467, |
|
"grad_norm": 2.306595833971501, |
|
"learning_rate": 2.276921780679061e-06, |
|
"loss": 0.8737, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.29538634658664664, |
|
"grad_norm": 2.569147248252582, |
|
"learning_rate": 2.2361282584836925e-06, |
|
"loss": 1.0032, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.30007501875468867, |
|
"grad_norm": 1.9670956149750123, |
|
"learning_rate": 2.1960081023083778e-06, |
|
"loss": 0.9068, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.30007501875468867, |
|
"eval_loss": 0.9342896938323975, |
|
"eval_runtime": 20.3147, |
|
"eval_samples_per_second": 9.845, |
|
"eval_steps_per_second": 2.461, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.3047636909227307, |
|
"grad_norm": 2.0859945243647395, |
|
"learning_rate": 2.156554598689365e-06, |
|
"loss": 1.0191, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.3094523630907727, |
|
"grad_norm": 1.9655466100226573, |
|
"learning_rate": 2.117761056623659e-06, |
|
"loss": 0.9328, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.3141410352588147, |
|
"grad_norm": 2.581123721405676, |
|
"learning_rate": 2.0796208076446752e-06, |
|
"loss": 1.0101, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.31882970742685673, |
|
"grad_norm": 2.443562264938451, |
|
"learning_rate": 2.0421272058986607e-06, |
|
"loss": 0.9154, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.3235183795948987, |
|
"grad_norm": 2.3271716313484987, |
|
"learning_rate": 2.0052736282219008e-06, |
|
"loss": 1.044, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.32820705176294074, |
|
"grad_norm": 2.4236414480748834, |
|
"learning_rate": 1.9690534742187182e-06, |
|
"loss": 1.0251, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.33289572393098277, |
|
"grad_norm": 2.5311887907377253, |
|
"learning_rate": 1.9334601663402865e-06, |
|
"loss": 0.9126, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.33758439609902474, |
|
"grad_norm": 2.19719594303758, |
|
"learning_rate": 1.898487149964267e-06, |
|
"loss": 0.9877, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.34227306826706677, |
|
"grad_norm": 3.0147002131335805, |
|
"learning_rate": 1.8641278934752799e-06, |
|
"loss": 0.9057, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.3469617404351088, |
|
"grad_norm": 2.134323364449907, |
|
"learning_rate": 1.8303758883462328e-06, |
|
"loss": 1.0681, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.3516504126031508, |
|
"grad_norm": 2.3509776870701424, |
|
"learning_rate": 1.7972246492205194e-06, |
|
"loss": 0.9568, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.3563390847711928, |
|
"grad_norm": 2.1090286048701814, |
|
"learning_rate": 1.7646677139950976e-06, |
|
"loss": 1.032, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.36102775693923483, |
|
"grad_norm": 2.36149114890146, |
|
"learning_rate": 1.7326986439044696e-06, |
|
"loss": 0.9867, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.3657164291072768, |
|
"grad_norm": 2.255024349073518, |
|
"learning_rate": 1.701311023605583e-06, |
|
"loss": 0.8747, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.37040510127531884, |
|
"grad_norm": 2.6380764656009923, |
|
"learning_rate": 1.6704984612636572e-06, |
|
"loss": 0.9224, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.37509377344336087, |
|
"grad_norm": 2.4870934478859126, |
|
"learning_rate": 1.6402545886389659e-06, |
|
"loss": 0.9147, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.37509377344336087, |
|
"eval_loss": 0.9293374419212341, |
|
"eval_runtime": 20.325, |
|
"eval_samples_per_second": 9.84, |
|
"eval_steps_per_second": 2.46, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.37978244561140284, |
|
"grad_norm": 2.5774447639043236, |
|
"learning_rate": 1.610573061174586e-06, |
|
"loss": 0.9285, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.38447111777944487, |
|
"grad_norm": 2.2678788716470817, |
|
"learning_rate": 1.5814475580851346e-06, |
|
"loss": 0.9994, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.38915978994748684, |
|
"grad_norm": 2.8722747503884114, |
|
"learning_rate": 1.5528717824465089e-06, |
|
"loss": 0.8864, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.3938484621155289, |
|
"grad_norm": 2.436535560556849, |
|
"learning_rate": 1.5248394612866496e-06, |
|
"loss": 1.1302, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.3985371342835709, |
|
"grad_norm": 1.9720574518399083, |
|
"learning_rate": 1.4973443456773522e-06, |
|
"loss": 0.9394, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.4032258064516129, |
|
"grad_norm": 2.6156706265956258, |
|
"learning_rate": 1.4703802108271373e-06, |
|
"loss": 0.7922, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.4079144786196549, |
|
"grad_norm": 2.295310925977984, |
|
"learning_rate": 1.4439408561752077e-06, |
|
"loss": 0.9312, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.41260315078769694, |
|
"grad_norm": 2.147812597078933, |
|
"learning_rate": 1.4180201054865116e-06, |
|
"loss": 0.9859, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.4172918229557389, |
|
"grad_norm": 2.029009456836816, |
|
"learning_rate": 1.392611806947934e-06, |
|
"loss": 1.0307, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.42198049512378094, |
|
"grad_norm": 2.590837380389044, |
|
"learning_rate": 1.3677098332656357e-06, |
|
"loss": 0.92, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.42666916729182297, |
|
"grad_norm": 2.3763009955883505, |
|
"learning_rate": 1.3433080817635696e-06, |
|
"loss": 1.0955, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.43135783945986494, |
|
"grad_norm": 2.80941534379534, |
|
"learning_rate": 1.3194004744831898e-06, |
|
"loss": 0.903, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.436046511627907, |
|
"grad_norm": 2.14656443429063, |
|
"learning_rate": 1.2959809582843855e-06, |
|
"loss": 0.8284, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.440735183795949, |
|
"grad_norm": 2.7676121711339423, |
|
"learning_rate": 1.273043504947661e-06, |
|
"loss": 1.0462, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.445423855963991, |
|
"grad_norm": 2.44008408182776, |
|
"learning_rate": 1.2505821112775862e-06, |
|
"loss": 0.979, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.450112528132033, |
|
"grad_norm": 2.584889518443889, |
|
"learning_rate": 1.2285907992075474e-06, |
|
"loss": 1.0192, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.450112528132033, |
|
"eval_loss": 0.9250276684761047, |
|
"eval_runtime": 20.5471, |
|
"eval_samples_per_second": 9.734, |
|
"eval_steps_per_second": 2.433, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.45480120030007504, |
|
"grad_norm": 2.496373694809835, |
|
"learning_rate": 1.207063615905829e-06, |
|
"loss": 0.9037, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.459489872468117, |
|
"grad_norm": 2.371873747415523, |
|
"learning_rate": 1.1859946338830404e-06, |
|
"loss": 1.0312, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.46417854463615904, |
|
"grad_norm": 2.2570285962672894, |
|
"learning_rate": 1.1653779511009372e-06, |
|
"loss": 0.9113, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.46886721680420107, |
|
"grad_norm": 2.6279198777243606, |
|
"learning_rate": 1.145207691082648e-06, |
|
"loss": 0.8337, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.47355588897224304, |
|
"grad_norm": 2.7761777412837767, |
|
"learning_rate": 1.1254780030243539e-06, |
|
"loss": 0.9602, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.4782445611402851, |
|
"grad_norm": 2.5445962149324663, |
|
"learning_rate": 1.1061830619084358e-06, |
|
"loss": 0.9804, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.4829332333083271, |
|
"grad_norm": 2.3633364319931514, |
|
"learning_rate": 1.087317068618139e-06, |
|
"loss": 0.977, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.4876219054763691, |
|
"grad_norm": 1.9986528249443352, |
|
"learning_rate": 1.0688742500537784e-06, |
|
"loss": 0.9425, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.4923105776444111, |
|
"grad_norm": 2.4501412442964803, |
|
"learning_rate": 1.0508488592505175e-06, |
|
"loss": 0.9995, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.49699924981245314, |
|
"grad_norm": 2.4394994412302786, |
|
"learning_rate": 1.0332351754977698e-06, |
|
"loss": 0.9329, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.5016879219804952, |
|
"grad_norm": 2.2074019259444544, |
|
"learning_rate": 1.016027504460246e-06, |
|
"loss": 0.9203, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.5063765941485371, |
|
"grad_norm": 2.610216953517823, |
|
"learning_rate": 9.992201783006927e-07, |
|
"loss": 1.0291, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.5110652663165791, |
|
"grad_norm": 2.718383436345948, |
|
"learning_rate": 9.828075558043617e-07, |
|
"loss": 0.9292, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.5157539384846211, |
|
"grad_norm": 3.6229517535213143, |
|
"learning_rate": 9.667840225052484e-07, |
|
"loss": 1.0165, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.5204426106526632, |
|
"grad_norm": 2.566929441984276, |
|
"learning_rate": 9.511439908141446e-07, |
|
"loss": 1.012, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.5251312828207052, |
|
"grad_norm": 2.2133821736785015, |
|
"learning_rate": 9.358819001485473e-07, |
|
"loss": 0.8303, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.5251312828207052, |
|
"eval_loss": 0.9191934466362, |
|
"eval_runtime": 20.5275, |
|
"eval_samples_per_second": 9.743, |
|
"eval_steps_per_second": 2.436, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.5298199549887472, |
|
"grad_norm": 2.530396363584249, |
|
"learning_rate": 9.209922170644708e-07, |
|
"loss": 1.022, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.5345086271567892, |
|
"grad_norm": 3.0761740344436017, |
|
"learning_rate": 9.06469435390206e-07, |
|
"loss": 0.9437, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.5391972993248312, |
|
"grad_norm": 2.679955680751534, |
|
"learning_rate": 8.923080763620794e-07, |
|
"loss": 0.9776, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.5438859714928732, |
|
"grad_norm": 2.477813948279551, |
|
"learning_rate": 8.785026887622588e-07, |
|
"loss": 0.8858, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.5485746436609152, |
|
"grad_norm": 3.021641574122816, |
|
"learning_rate": 8.650478490586582e-07, |
|
"loss": 0.9392, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.5532633158289573, |
|
"grad_norm": 3.0622821568316985, |
|
"learning_rate": 8.519381615469985e-07, |
|
"loss": 1.0067, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.5579519879969993, |
|
"grad_norm": 2.0240771327720606, |
|
"learning_rate": 8.391682584950767e-07, |
|
"loss": 0.8645, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.5626406601650412, |
|
"grad_norm": 2.4592599613454453, |
|
"learning_rate": 8.267328002892997e-07, |
|
"loss": 0.8116, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.5673293323330832, |
|
"grad_norm": 2.3279060484215304, |
|
"learning_rate": 8.146264755835511e-07, |
|
"loss": 1.0685, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.5720180045011253, |
|
"grad_norm": 2.1915343114528394, |
|
"learning_rate": 8.028440014504431e-07, |
|
"loss": 1.0312, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.5767066766691673, |
|
"grad_norm": 2.353311242466667, |
|
"learning_rate": 7.913801235350256e-07, |
|
"loss": 0.9753, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.5813953488372093, |
|
"grad_norm": 1.9822254771453949, |
|
"learning_rate": 7.80229616211014e-07, |
|
"loss": 1.0626, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.5860840210052514, |
|
"grad_norm": 2.229570765456582, |
|
"learning_rate": 7.693872827396111e-07, |
|
"loss": 0.8915, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.5907726931732933, |
|
"grad_norm": 2.1333212031989017, |
|
"learning_rate": 7.58847955430991e-07, |
|
"loss": 0.9256, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.5954613653413353, |
|
"grad_norm": 2.3285962864619107, |
|
"learning_rate": 7.486064958085216e-07, |
|
"loss": 0.8844, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.6001500375093773, |
|
"grad_norm": 2.4239895162113654, |
|
"learning_rate": 7.386577947758049e-07, |
|
"loss": 1.0284, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.6001500375093773, |
|
"eval_loss": 0.9292100667953491, |
|
"eval_runtime": 20.2551, |
|
"eval_samples_per_second": 9.874, |
|
"eval_steps_per_second": 2.469, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.6048387096774194, |
|
"grad_norm": 2.592229243089013, |
|
"learning_rate": 7.289967727866171e-07, |
|
"loss": 0.8607, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.6095273818454614, |
|
"grad_norm": 2.557096114560489, |
|
"learning_rate": 7.196183800178289e-07, |
|
"loss": 1.0461, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.6142160540135033, |
|
"grad_norm": 2.7258364788939278, |
|
"learning_rate": 7.105175965454019e-07, |
|
"loss": 0.8923, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.6189047261815454, |
|
"grad_norm": 2.500897088452433, |
|
"learning_rate": 7.016894325235454e-07, |
|
"loss": 0.908, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.6235933983495874, |
|
"grad_norm": 3.4364862124732873, |
|
"learning_rate": 6.931289283671353e-07, |
|
"loss": 0.8488, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.6282820705176294, |
|
"grad_norm": 2.6544071650230405, |
|
"learning_rate": 6.84831154937491e-07, |
|
"loss": 0.815, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.6329707426856714, |
|
"grad_norm": 2.9017321933643356, |
|
"learning_rate": 6.767912137316187e-07, |
|
"loss": 1.0641, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.6376594148537135, |
|
"grad_norm": 3.2461446427637854, |
|
"learning_rate": 6.690042370750264e-07, |
|
"loss": 0.9388, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.6423480870217554, |
|
"grad_norm": 2.5936370960610398, |
|
"learning_rate": 6.614653883182271e-07, |
|
"loss": 0.83, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.6470367591897974, |
|
"grad_norm": 3.027451608426823, |
|
"learning_rate": 6.541698620370481e-07, |
|
"loss": 0.9852, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.6517254313578394, |
|
"grad_norm": 2.8965181885799933, |
|
"learning_rate": 6.471128842368711e-07, |
|
"loss": 0.8914, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.6564141035258815, |
|
"grad_norm": 2.9030689776107, |
|
"learning_rate": 6.402897125609332e-07, |
|
"loss": 0.9833, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.6611027756939235, |
|
"grad_norm": 2.6265372108049303, |
|
"learning_rate": 6.336956365028259e-07, |
|
"loss": 1.0902, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.6657914478619655, |
|
"grad_norm": 2.186738270502514, |
|
"learning_rate": 6.273259776233337e-07, |
|
"loss": 0.8316, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.6704801200300075, |
|
"grad_norm": 10.89006237530139, |
|
"learning_rate": 6.211760897717641e-07, |
|
"loss": 1.0283, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.6751687921980495, |
|
"grad_norm": 2.7796220586562344, |
|
"learning_rate": 6.152413593119235e-07, |
|
"loss": 0.9183, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.6751687921980495, |
|
"eval_loss": 0.9389083385467529, |
|
"eval_runtime": 20.3673, |
|
"eval_samples_per_second": 9.82, |
|
"eval_steps_per_second": 2.455, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.6798574643660915, |
|
"grad_norm": 2.906995236208513, |
|
"learning_rate": 6.095172053529076e-07, |
|
"loss": 1.034, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.6845461365341335, |
|
"grad_norm": 2.3367410989611943, |
|
"learning_rate": 6.039990799848741e-07, |
|
"loss": 0.898, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.6892348087021756, |
|
"grad_norm": 2.3266907504669976, |
|
"learning_rate": 5.986824685199863e-07, |
|
"loss": 0.7855, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.6939234808702176, |
|
"grad_norm": 2.1566038379464842, |
|
"learning_rate": 5.935628897387149e-07, |
|
"loss": 1.0417, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.6986121530382595, |
|
"grad_norm": 2.3519944231674685, |
|
"learning_rate": 5.886358961416999e-07, |
|
"loss": 0.9567, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.7033008252063015, |
|
"grad_norm": 2.5749570284624457, |
|
"learning_rate": 5.838970742073876e-07, |
|
"loss": 0.7935, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.7079894973743436, |
|
"grad_norm": 2.0032057277402844, |
|
"learning_rate": 5.793420446556638e-07, |
|
"loss": 0.9967, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.7126781695423856, |
|
"grad_norm": 2.8692030696523703, |
|
"learning_rate": 5.74966462717722e-07, |
|
"loss": 1.0786, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.7173668417104276, |
|
"grad_norm": 2.1378421674086994, |
|
"learning_rate": 5.707660184124143e-07, |
|
"loss": 1.115, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.7220555138784697, |
|
"grad_norm": 2.344543312884903, |
|
"learning_rate": 5.667364368293497e-07, |
|
"loss": 0.9502, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.7267441860465116, |
|
"grad_norm": 1.9323329729740275, |
|
"learning_rate": 5.6287347841902e-07, |
|
"loss": 0.9964, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.7314328582145536, |
|
"grad_norm": 2.948582260278745, |
|
"learning_rate": 5.591729392902467e-07, |
|
"loss": 1.0077, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.7361215303825956, |
|
"grad_norm": 2.1985962635350442, |
|
"learning_rate": 5.556306515152638e-07, |
|
"loss": 0.7058, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.7408102025506377, |
|
"grad_norm": 4.099442910663203, |
|
"learning_rate": 5.522424834427688e-07, |
|
"loss": 0.856, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.7454988747186797, |
|
"grad_norm": 2.3260610179486787, |
|
"learning_rate": 5.490043400192936e-07, |
|
"loss": 0.9852, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.7501875468867217, |
|
"grad_norm": 2.2249724666107693, |
|
"learning_rate": 5.459121631192727e-07, |
|
"loss": 0.9897, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.7501875468867217, |
|
"eval_loss": 0.9337027072906494, |
|
"eval_runtime": 20.543, |
|
"eval_samples_per_second": 9.736, |
|
"eval_steps_per_second": 2.434, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.7548762190547637, |
|
"grad_norm": 2.3238862226735244, |
|
"learning_rate": 5.429619318842062e-07, |
|
"loss": 0.9693, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.7595648912228057, |
|
"grad_norm": 2.5983979780721005, |
|
"learning_rate": 5.401496630713439e-07, |
|
"loss": 0.9363, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.7642535633908477, |
|
"grad_norm": 3.203392479652258, |
|
"learning_rate": 5.374714114123462e-07, |
|
"loss": 0.9007, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.7689422355588897, |
|
"grad_norm": 3.0116714434819523, |
|
"learning_rate": 5.34923269982403e-07, |
|
"loss": 0.8538, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.7736309077269318, |
|
"grad_norm": 2.735036803752382, |
|
"learning_rate": 5.325013705803326e-07, |
|
"loss": 0.9429, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.7783195798949737, |
|
"grad_norm": 2.7192900196409693, |
|
"learning_rate": 5.302018841202155e-07, |
|
"loss": 0.862, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.7830082520630157, |
|
"grad_norm": 3.310126483594716, |
|
"learning_rate": 5.28021021035156e-07, |
|
"loss": 0.8743, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.7876969242310577, |
|
"grad_norm": 3.5788231445835623, |
|
"learning_rate": 5.25955031693814e-07, |
|
"loss": 0.8728, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.7923855963990998, |
|
"grad_norm": 2.3031948200618784, |
|
"learning_rate": 5.240002068303935e-07, |
|
"loss": 0.9804, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.7970742685671418, |
|
"grad_norm": 2.27441145876717, |
|
"learning_rate": 5.22152877988829e-07, |
|
"loss": 0.9592, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.8017629407351838, |
|
"grad_norm": 2.399760421263353, |
|
"learning_rate": 5.204094179819663e-07, |
|
"loss": 1.1113, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.8064516129032258, |
|
"grad_norm": 2.786650153581553, |
|
"learning_rate": 5.187662413666055e-07, |
|
"loss": 0.896, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.8111402850712678, |
|
"grad_norm": 2.3375248528715886, |
|
"learning_rate": 5.137741469209312e-07, |
|
"loss": 0.9484, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.8158289572393098, |
|
"grad_norm": 2.107239092918484, |
|
"learning_rate": 5.034812576940423e-07, |
|
"loss": 0.9107, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.8205176294073518, |
|
"grad_norm": 3.2120652639502136, |
|
"learning_rate": 4.931883684671535e-07, |
|
"loss": 1.0274, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.8252063015753939, |
|
"grad_norm": 2.1470972788703353, |
|
"learning_rate": 4.828954792402647e-07, |
|
"loss": 1.0209, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.8252063015753939, |
|
"eval_loss": 0.93896484375, |
|
"eval_runtime": 20.3111, |
|
"eval_samples_per_second": 9.847, |
|
"eval_steps_per_second": 2.462, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.8298949737434359, |
|
"grad_norm": 2.0424483656754724, |
|
"learning_rate": 4.7260259001337577e-07, |
|
"loss": 0.9472, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.8345836459114778, |
|
"grad_norm": 2.5878530403298434, |
|
"learning_rate": 4.6230970078648696e-07, |
|
"loss": 0.9458, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.8392723180795199, |
|
"grad_norm": 2.497909901428293, |
|
"learning_rate": 4.5201681155959816e-07, |
|
"loss": 0.7928, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.8439609902475619, |
|
"grad_norm": 2.1704945555054462, |
|
"learning_rate": 4.417239223327093e-07, |
|
"loss": 0.8992, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.8486496624156039, |
|
"grad_norm": 1.9807380776486385, |
|
"learning_rate": 4.314310331058205e-07, |
|
"loss": 1.0681, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.8533383345836459, |
|
"grad_norm": 2.2057348642236625, |
|
"learning_rate": 4.2113814387893164e-07, |
|
"loss": 1.057, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.858027006751688, |
|
"grad_norm": 2.434197081864654, |
|
"learning_rate": 4.108452546520428e-07, |
|
"loss": 0.9609, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.8627156789197299, |
|
"grad_norm": 1.641371038860168, |
|
"learning_rate": 4.00552365425154e-07, |
|
"loss": 0.9775, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.8674043510877719, |
|
"grad_norm": 2.207714901698735, |
|
"learning_rate": 3.9025947619826517e-07, |
|
"loss": 0.7712, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.872093023255814, |
|
"grad_norm": 2.011772170698376, |
|
"learning_rate": 3.7996658697137626e-07, |
|
"loss": 0.9408, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.876781695423856, |
|
"grad_norm": 2.5252414435017747, |
|
"learning_rate": 3.6967369774448745e-07, |
|
"loss": 0.8363, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.881470367591898, |
|
"grad_norm": 2.3365392350792287, |
|
"learning_rate": 3.5938080851759865e-07, |
|
"loss": 0.9977, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.88615903975994, |
|
"grad_norm": 2.3882731527862346, |
|
"learning_rate": 3.490879192907099e-07, |
|
"loss": 0.9326, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.890847711927982, |
|
"grad_norm": 2.180049227770247, |
|
"learning_rate": 3.38795030063821e-07, |
|
"loss": 0.9909, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.895536384096024, |
|
"grad_norm": 2.4546116544168965, |
|
"learning_rate": 3.285021408369321e-07, |
|
"loss": 0.8207, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.900225056264066, |
|
"grad_norm": 3.062713753398875, |
|
"learning_rate": 3.1820925161004326e-07, |
|
"loss": 0.9118, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.900225056264066, |
|
"eval_loss": 0.9374144077301025, |
|
"eval_runtime": 20.3928, |
|
"eval_samples_per_second": 9.807, |
|
"eval_steps_per_second": 2.452, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.904913728432108, |
|
"grad_norm": 2.9305276635268633, |
|
"learning_rate": 3.0791636238315446e-07, |
|
"loss": 0.8345, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.9096024006001501, |
|
"grad_norm": 2.9508689565092046, |
|
"learning_rate": 2.9762347315626565e-07, |
|
"loss": 0.7999, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.9142910727681921, |
|
"grad_norm": 3.9095096230897237, |
|
"learning_rate": 2.873305839293768e-07, |
|
"loss": 0.8812, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.918979744936234, |
|
"grad_norm": 2.954610613967905, |
|
"learning_rate": 2.7703769470248794e-07, |
|
"loss": 0.9124, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.923668417104276, |
|
"grad_norm": 2.3909984231204446, |
|
"learning_rate": 2.6674480547559913e-07, |
|
"loss": 0.8418, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.9283570892723181, |
|
"grad_norm": 2.407545423074375, |
|
"learning_rate": 2.5645191624871027e-07, |
|
"loss": 0.9194, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.9330457614403601, |
|
"grad_norm": 2.868767477778071, |
|
"learning_rate": 2.4615902702182147e-07, |
|
"loss": 0.9541, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.9377344336084021, |
|
"grad_norm": 2.1293635803564066, |
|
"learning_rate": 2.3586613779493258e-07, |
|
"loss": 0.8087, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.9424231057764441, |
|
"grad_norm": 2.0557494272602264, |
|
"learning_rate": 2.255732485680438e-07, |
|
"loss": 0.9081, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.9471117779444861, |
|
"grad_norm": 2.1864930223853056, |
|
"learning_rate": 2.1528035934115495e-07, |
|
"loss": 0.859, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.9518004501125281, |
|
"grad_norm": 2.05122621453041, |
|
"learning_rate": 2.0498747011426614e-07, |
|
"loss": 0.8725, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.9564891222805701, |
|
"grad_norm": 3.2726273841892923, |
|
"learning_rate": 1.9469458088737728e-07, |
|
"loss": 0.7904, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.9611777944486122, |
|
"grad_norm": 2.259537550017741, |
|
"learning_rate": 1.8440169166048842e-07, |
|
"loss": 1.0669, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.9658664666166542, |
|
"grad_norm": 2.406310953068651, |
|
"learning_rate": 1.7410880243359964e-07, |
|
"loss": 0.8434, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.9705551387846961, |
|
"grad_norm": 1.9259292180053644, |
|
"learning_rate": 1.6381591320671076e-07, |
|
"loss": 1.0234, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.9752438109527382, |
|
"grad_norm": 2.0241322819114504, |
|
"learning_rate": 1.5352302397982195e-07, |
|
"loss": 0.9077, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.9752438109527382, |
|
"eval_loss": 0.9355975389480591, |
|
"eval_runtime": 20.3082, |
|
"eval_samples_per_second": 9.848, |
|
"eval_steps_per_second": 2.462, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.9799324831207802, |
|
"grad_norm": 1.9205593868577922, |
|
"learning_rate": 1.432301347529331e-07, |
|
"loss": 0.7646, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.9846211552888222, |
|
"grad_norm": 3.2012232274694834, |
|
"learning_rate": 1.329372455260443e-07, |
|
"loss": 0.8593, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.9893098274568642, |
|
"grad_norm": 2.3484948634638205, |
|
"learning_rate": 1.2264435629915543e-07, |
|
"loss": 1.0972, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.9939984996249063, |
|
"grad_norm": 2.7160625332606685, |
|
"learning_rate": 1.1235146707226661e-07, |
|
"loss": 0.7634, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.9986871717929482, |
|
"grad_norm": 2.3964216948732173, |
|
"learning_rate": 1.0205857784537777e-07, |
|
"loss": 0.9783, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.9996249062265566, |
|
"step": 1066, |
|
"total_flos": 1.5465201990107136e+17, |
|
"train_loss": 0.9640034305221815, |
|
"train_runtime": 13404.8173, |
|
"train_samples_per_second": 2.386, |
|
"train_steps_per_second": 0.08 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1066, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1066, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5465201990107136e+17, |
|
"train_batch_size": 3, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|