|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5e-05, |
|
"loss": 1.8138, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001, |
|
"loss": 1.7936, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.7012, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 1.3853, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019996629653035126, |
|
"loss": 1.4042, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019986520883988232, |
|
"loss": 1.4301, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019969680506871137, |
|
"loss": 1.3754, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019946119873266613, |
|
"loss": 1.3265, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019915854864676664, |
|
"loss": 1.2392, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019878905881817252, |
|
"loss": 1.344, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019835297830866826, |
|
"loss": 1.2774, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019785060106677818, |
|
"loss": 1.2003, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019728226572962473, |
|
"loss": 1.1964, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001966483553946637, |
|
"loss": 1.2569, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019594929736144976, |
|
"loss": 1.1401, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019518556284360696, |
|
"loss": 1.1552, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001943576666511982, |
|
"loss": 1.2524, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001934661668437073, |
|
"loss": 1.1609, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001925116643538684, |
|
"loss": 1.2637, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019149480258259533, |
|
"loss": 1.2292, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019041626696528503, |
|
"loss": 1.1428, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001892767845097864, |
|
"loss": 1.304, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018807712330634642, |
|
"loss": 1.3038, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001868180920098644, |
|
"loss": 1.1857, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018550053929480202, |
|
"loss": 1.2526, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00018412535328311814, |
|
"loss": 1.1447, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001826934609456129, |
|
"loss": 1.1373, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018120582747708502, |
|
"loss": 1.1123, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001796634556457236, |
|
"loss": 1.1393, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001780673851171728, |
|
"loss": 1.059, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00017641869175372493, |
|
"loss": 1.1767, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00017471848688911464, |
|
"loss": 1.2007, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000172967916579403, |
|
"loss": 1.1403, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00017116816083045602, |
|
"loss": 1.1573, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001693204328025389, |
|
"loss": 1.1436, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00016742597799256182, |
|
"loss": 1.1916, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00016548607339452853, |
|
"loss": 1.1979, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00016350202663875386, |
|
"loss": 1.108, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001614751751104301, |
|
"loss": 1.1139, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00015940688504813662, |
|
"loss": 1.1229, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00015729855062290022, |
|
"loss": 1.1243, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015515159299842707, |
|
"loss": 1.1036, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015296745937313987, |
|
"loss": 1.1246, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00015074762200466556, |
|
"loss": 1.1044, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00014849357721743168, |
|
"loss": 1.1097, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014620684439403962, |
|
"loss": 1.0692, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001438889649510956, |
|
"loss": 1.2537, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00014154150130018866, |
|
"loss": 1.186, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00013916603579471705, |
|
"loss": 1.1326, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000136764169663272, |
|
"loss": 1.1722, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00013433752193029886, |
|
"loss": 1.1804, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00013188772832476188, |
|
"loss": 1.2119, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00012941644017754964, |
|
"loss": 1.1719, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00012692532330836346, |
|
"loss": 1.1734, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00012441605690283915, |
|
"loss": 1.0837, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001218903323806595, |
|
"loss": 1.0661, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011934985225541998, |
|
"loss": 1.1172, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00011679632898701649, |
|
"loss": 1.06, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00011423148382732853, |
|
"loss": 1.0949, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00011165704565997593, |
|
"loss": 1.1851, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00010907474983493144, |
|
"loss": 1.1116, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001064863369987743, |
|
"loss": 1.1421, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00010389355192137377, |
|
"loss": 1.0834, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001012981423197931, |
|
"loss": 1.0071, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.870185768020693e-05, |
|
"loss": 1.1504, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.610644807862625e-05, |
|
"loss": 1.2267, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.35136630012257e-05, |
|
"loss": 1.0855, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.092525016506858e-05, |
|
"loss": 1.1245, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.83429543400241e-05, |
|
"loss": 1.0357, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.57685161726715e-05, |
|
"loss": 1.1321, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.320367101298351e-05, |
|
"loss": 1.1019, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.065014774458003e-05, |
|
"loss": 1.0587, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.810966761934053e-05, |
|
"loss": 1.0647, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.558394309716088e-05, |
|
"loss": 1.2036, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.307467669163655e-05, |
|
"loss": 1.1131, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.058355982245037e-05, |
|
"loss": 1.1824, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.811227167523815e-05, |
|
"loss": 1.1728, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.566247806970119e-05, |
|
"loss": 1.0121, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.323583033672799e-05, |
|
"loss": 1.0867, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.083396420528298e-05, |
|
"loss": 1.1176, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.845849869981137e-05, |
|
"loss": 1.0947, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.611103504890444e-05, |
|
"loss": 0.9342, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.379315560596038e-05, |
|
"loss": 1.2286, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.1506422782568345e-05, |
|
"loss": 0.9824, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.9252377995334444e-05, |
|
"loss": 1.131, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.703254062686017e-05, |
|
"loss": 1.0423, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.484840700157295e-05, |
|
"loss": 1.0741, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.270144937709981e-05, |
|
"loss": 1.1291, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.059311495186338e-05, |
|
"loss": 1.1319, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.852482488956992e-05, |
|
"loss": 1.1128, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.649797336124615e-05, |
|
"loss": 1.0266, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.45139266054715e-05, |
|
"loss": 1.1304, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.257402200743821e-05, |
|
"loss": 1.1331, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.0679567197461134e-05, |
|
"loss": 1.0754, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.8831839169543996e-05, |
|
"loss": 1.014, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.7032083420597e-05, |
|
"loss": 1.0225, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.528151311088537e-05, |
|
"loss": 1.1003, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.3581308246275103e-05, |
|
"loss": 0.9896, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.1932614882827197e-05, |
|
"loss": 1.1506, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.03365443542764e-05, |
|
"loss": 1.1285, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.879417252291502e-05, |
|
"loss": 1.1775, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.730653905438714e-05, |
|
"loss": 1.1682, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.587464671688187e-05, |
|
"loss": 1.0003, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.4499460705197998e-05, |
|
"loss": 1.186, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.3181907990135622e-05, |
|
"loss": 1.1757, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.1922876693653585e-05, |
|
"loss": 1.0854, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.0723215490213634e-05, |
|
"loss": 1.0466, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.583733034714981e-06, |
|
"loss": 1.0728, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.505197417404687e-06, |
|
"loss": 1.2036, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.488335646131628e-06, |
|
"loss": 1.0486, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.533833156292679e-06, |
|
"loss": 1.0899, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.6423333488018095e-06, |
|
"loss": 1.0798, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.8144371563930476e-06, |
|
"loss": 1.0657, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.050702638550275e-06, |
|
"loss": 1.0662, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.3516446053363015e-06, |
|
"loss": 1.0746, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.717734270375272e-06, |
|
"loss": 1.1993, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 2.1493989332218468e-06, |
|
"loss": 1.0301, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.6470216913317626e-06, |
|
"loss": 1.0373, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.2109411818274852e-06, |
|
"loss": 0.9994, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.41451353233369e-07, |
|
"loss": 1.044, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.388012673338661e-07, |
|
"loss": 0.9979, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.0319493128866396e-07, |
|
"loss": 1.1068, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.3479116011769767e-07, |
|
"loss": 1.0095, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.370346964876036e-08, |
|
"loss": 1.0024, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.0189, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 125, |
|
"total_flos": 1901956792320.0, |
|
"train_loss": 1.1506947622299195, |
|
"train_runtime": 1117.0219, |
|
"train_samples_per_second": 1.79, |
|
"train_steps_per_second": 0.112 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50000, |
|
"total_flos": 1901956792320.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|