|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9864454484852976, |
|
"eval_steps": 11, |
|
"global_step": 315, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009480779201540626, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 2e-05, |
|
"loss": 0.4197, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009480779201540626, |
|
"eval_loss": 0.41279804706573486, |
|
"eval_runtime": 34.6855, |
|
"eval_samples_per_second": 20.498, |
|
"eval_steps_per_second": 20.498, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018961558403081252, |
|
"grad_norm": 0.359375, |
|
"learning_rate": 4e-05, |
|
"loss": 0.3966, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02844233760462188, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 6e-05, |
|
"loss": 0.4257, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.037923116806162505, |
|
"grad_norm": 0.408203125, |
|
"learning_rate": 8e-05, |
|
"loss": 0.4026, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04740389600770313, |
|
"grad_norm": 0.384765625, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3229, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05688467520924376, |
|
"grad_norm": 0.251953125, |
|
"learning_rate": 0.00012, |
|
"loss": 0.2408, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06636545441078438, |
|
"grad_norm": 0.203125, |
|
"learning_rate": 0.00014, |
|
"loss": 0.1709, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07584623361232501, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.00016, |
|
"loss": 0.1328, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08532701281386564, |
|
"grad_norm": 0.1201171875, |
|
"learning_rate": 0.00018, |
|
"loss": 0.0993, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09480779201540626, |
|
"grad_norm": 0.10888671875, |
|
"learning_rate": 0.0002, |
|
"loss": 0.0914, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10428857121694689, |
|
"grad_norm": 0.119140625, |
|
"learning_rate": 0.00019999469523400122, |
|
"loss": 0.0885, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.10428857121694689, |
|
"eval_loss": 0.07809103280305862, |
|
"eval_runtime": 34.2892, |
|
"eval_samples_per_second": 20.735, |
|
"eval_steps_per_second": 20.735, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11376935041848751, |
|
"grad_norm": 0.09228515625, |
|
"learning_rate": 0.00019997878149881574, |
|
"loss": 0.0761, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12325012962002814, |
|
"grad_norm": 0.0703125, |
|
"learning_rate": 0.0001999522604828164, |
|
"loss": 0.0683, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13273090882156877, |
|
"grad_norm": 0.059326171875, |
|
"learning_rate": 0.00019991513499975882, |
|
"loss": 0.0696, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1422116880231094, |
|
"grad_norm": 0.044921875, |
|
"learning_rate": 0.00019986740898848306, |
|
"loss": 0.0615, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15169246722465002, |
|
"grad_norm": 0.054443359375, |
|
"learning_rate": 0.00019980908751249555, |
|
"loss": 0.0625, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.16117324642619066, |
|
"grad_norm": 0.04736328125, |
|
"learning_rate": 0.00019974017675943192, |
|
"loss": 0.0598, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.17065402562773127, |
|
"grad_norm": 0.039306640625, |
|
"learning_rate": 0.0001996606840404006, |
|
"loss": 0.0573, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1801348048292719, |
|
"grad_norm": 0.036376953125, |
|
"learning_rate": 0.00019957061778920701, |
|
"loss": 0.0482, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18961558403081252, |
|
"grad_norm": 0.039306640625, |
|
"learning_rate": 0.0001994699875614589, |
|
"loss": 0.0555, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19909636323235316, |
|
"grad_norm": 0.0380859375, |
|
"learning_rate": 0.00019935880403355253, |
|
"loss": 0.0495, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.20857714243389378, |
|
"grad_norm": 0.038818359375, |
|
"learning_rate": 0.00019923707900153982, |
|
"loss": 0.0482, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.20857714243389378, |
|
"eval_loss": 0.05171125754714012, |
|
"eval_runtime": 34.4113, |
|
"eval_samples_per_second": 20.662, |
|
"eval_steps_per_second": 20.662, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21805792163543442, |
|
"grad_norm": 0.038818359375, |
|
"learning_rate": 0.00019910482537987702, |
|
"loss": 0.0529, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.22753870083697503, |
|
"grad_norm": 0.031494140625, |
|
"learning_rate": 0.0001989620572000544, |
|
"loss": 0.0495, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.23701948003851567, |
|
"grad_norm": 0.0380859375, |
|
"learning_rate": 0.00019880878960910772, |
|
"loss": 0.059, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24650025924005628, |
|
"grad_norm": 0.032470703125, |
|
"learning_rate": 0.00019864503886801106, |
|
"loss": 0.0545, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2559810384415969, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 0.00019847082234995171, |
|
"loss": 0.0417, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.26546181764313753, |
|
"grad_norm": 0.037109375, |
|
"learning_rate": 0.00019828615853848688, |
|
"loss": 0.0459, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.27494259684467814, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 0.00019809106702558277, |
|
"loss": 0.0412, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2844233760462188, |
|
"grad_norm": 0.032470703125, |
|
"learning_rate": 0.0001978855685095358, |
|
"loss": 0.0403, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2939041552477594, |
|
"grad_norm": 0.0289306640625, |
|
"learning_rate": 0.00019766968479277683, |
|
"loss": 0.0463, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.30338493444930004, |
|
"grad_norm": 0.0301513671875, |
|
"learning_rate": 0.00019744343877955788, |
|
"loss": 0.041, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3128657136508407, |
|
"grad_norm": 0.033447265625, |
|
"learning_rate": 0.00019720685447352209, |
|
"loss": 0.045, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3128657136508407, |
|
"eval_loss": 0.04294149950146675, |
|
"eval_runtime": 34.2719, |
|
"eval_samples_per_second": 20.746, |
|
"eval_steps_per_second": 20.746, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3223464928523813, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 0.0001969599569751571, |
|
"loss": 0.0369, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.33182727205392193, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 0.00019670277247913205, |
|
"loss": 0.0465, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.34130805125546254, |
|
"grad_norm": 0.032958984375, |
|
"learning_rate": 0.0001964353282715183, |
|
"loss": 0.0382, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3507888304570032, |
|
"grad_norm": 0.0299072265625, |
|
"learning_rate": 0.00019615765272689461, |
|
"loss": 0.0442, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3602696096585438, |
|
"grad_norm": 0.0322265625, |
|
"learning_rate": 0.00019586977530533677, |
|
"loss": 0.0481, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.36975038886008443, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 0.00019557172654929196, |
|
"loss": 0.0423, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.37923116806162505, |
|
"grad_norm": 0.0299072265625, |
|
"learning_rate": 0.00019526353808033825, |
|
"loss": 0.0384, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3887119472631657, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 0.00019494524259582992, |
|
"loss": 0.0404, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3981927264647063, |
|
"grad_norm": 0.0286865234375, |
|
"learning_rate": 0.00019461687386542826, |
|
"loss": 0.0394, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.40767350566624694, |
|
"grad_norm": 0.032958984375, |
|
"learning_rate": 0.00019427846672751873, |
|
"loss": 0.0351, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.41715428486778755, |
|
"grad_norm": 0.02783203125, |
|
"learning_rate": 0.00019393005708551498, |
|
"loss": 0.0425, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.41715428486778755, |
|
"eval_loss": 0.03996235132217407, |
|
"eval_runtime": 34.3007, |
|
"eval_samples_per_second": 20.728, |
|
"eval_steps_per_second": 20.728, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4266350640693282, |
|
"grad_norm": 0.030517578125, |
|
"learning_rate": 0.00019357168190404936, |
|
"loss": 0.0365, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.43611584327086883, |
|
"grad_norm": 0.032958984375, |
|
"learning_rate": 0.00019320337920505153, |
|
"loss": 0.0438, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.44559662247240944, |
|
"grad_norm": 0.0306396484375, |
|
"learning_rate": 0.00019282518806371414, |
|
"loss": 0.044, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.45507740167395005, |
|
"grad_norm": 0.037841796875, |
|
"learning_rate": 0.0001924371486043473, |
|
"loss": 0.0496, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4645581808754907, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 0.0001920393019961217, |
|
"loss": 0.0369, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.47403896007703133, |
|
"grad_norm": 0.0341796875, |
|
"learning_rate": 0.0001916316904487005, |
|
"loss": 0.0399, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.48351973927857195, |
|
"grad_norm": 0.0255126953125, |
|
"learning_rate": 0.00019121435720776122, |
|
"loss": 0.0363, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.49300051848011256, |
|
"grad_norm": 0.0311279296875, |
|
"learning_rate": 0.0001907873465504076, |
|
"loss": 0.0384, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5024812976816532, |
|
"grad_norm": 0.03271484375, |
|
"learning_rate": 0.00019035070378047204, |
|
"loss": 0.0371, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5119620768831938, |
|
"grad_norm": 0.0263671875, |
|
"learning_rate": 0.00018990447522370884, |
|
"loss": 0.0351, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5214428560847345, |
|
"grad_norm": 0.033203125, |
|
"learning_rate": 0.00018944870822287956, |
|
"loss": 0.0411, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5214428560847345, |
|
"eval_loss": 0.03788134828209877, |
|
"eval_runtime": 34.6048, |
|
"eval_samples_per_second": 20.546, |
|
"eval_steps_per_second": 20.546, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5309236352862751, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 0.00018898345113272998, |
|
"loss": 0.0337, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5404044144878157, |
|
"grad_norm": 0.0245361328125, |
|
"learning_rate": 0.00018850875331485995, |
|
"loss": 0.0309, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5498851936893563, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 0.00018802466513248632, |
|
"loss": 0.0329, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.559365972890897, |
|
"grad_norm": 0.0267333984375, |
|
"learning_rate": 0.00018753123794509974, |
|
"loss": 0.0365, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5688467520924376, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 0.00018702852410301554, |
|
"loss": 0.0373, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5783275312939782, |
|
"grad_norm": 0.025390625, |
|
"learning_rate": 0.0001865165769418196, |
|
"loss": 0.0375, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5878083104955188, |
|
"grad_norm": 0.02978515625, |
|
"learning_rate": 0.00018599545077670985, |
|
"loss": 0.0376, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5972890896970595, |
|
"grad_norm": 0.0242919921875, |
|
"learning_rate": 0.0001854652008967335, |
|
"loss": 0.0337, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6067698688986001, |
|
"grad_norm": 0.02880859375, |
|
"learning_rate": 0.00018492588355892124, |
|
"loss": 0.0379, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6162506481001407, |
|
"grad_norm": 0.0322265625, |
|
"learning_rate": 0.00018437755598231856, |
|
"loss": 0.0392, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6257314273016814, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 0.00018382027634191524, |
|
"loss": 0.0348, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6257314273016814, |
|
"eval_loss": 0.03594927862286568, |
|
"eval_runtime": 34.2756, |
|
"eval_samples_per_second": 20.744, |
|
"eval_steps_per_second": 20.744, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.635212206503222, |
|
"grad_norm": 0.03515625, |
|
"learning_rate": 0.00018325410376247294, |
|
"loss": 0.0379, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.6446929857047626, |
|
"grad_norm": 0.0301513671875, |
|
"learning_rate": 0.0001826790983122527, |
|
"loss": 0.0317, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6541737649063032, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 0.00018209532099664174, |
|
"loss": 0.0366, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6636545441078439, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 0.00018150283375168114, |
|
"loss": 0.0345, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6731353233093845, |
|
"grad_norm": 0.03125, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.0368, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6826161025109251, |
|
"grad_norm": 0.032470703125, |
|
"learning_rate": 0.00018029198183161998, |
|
"loss": 0.0455, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6920968817124658, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 0.00017967374562224132, |
|
"loss": 0.0362, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7015776609140064, |
|
"grad_norm": 0.0311279296875, |
|
"learning_rate": 0.00017904705640132718, |
|
"loss": 0.0388, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.711058440115547, |
|
"grad_norm": 0.02685546875, |
|
"learning_rate": 0.00017841198065767107, |
|
"loss": 0.0288, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7205392193170876, |
|
"grad_norm": 0.0302734375, |
|
"learning_rate": 0.00017776858576983712, |
|
"loss": 0.0349, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7300199985186282, |
|
"grad_norm": 0.026611328125, |
|
"learning_rate": 0.0001771169399990119, |
|
"loss": 0.0288, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7300199985186282, |
|
"eval_loss": 0.03419337794184685, |
|
"eval_runtime": 34.3669, |
|
"eval_samples_per_second": 20.688, |
|
"eval_steps_per_second": 20.688, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7395007777201689, |
|
"grad_norm": 0.0240478515625, |
|
"learning_rate": 0.00017645711248176195, |
|
"loss": 0.0285, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7489815569217095, |
|
"grad_norm": 0.025390625, |
|
"learning_rate": 0.00017578917322269886, |
|
"loss": 0.0338, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7584623361232501, |
|
"grad_norm": 0.0281982421875, |
|
"learning_rate": 0.00017511319308705198, |
|
"loss": 0.0316, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7679431153247908, |
|
"grad_norm": 0.025146484375, |
|
"learning_rate": 0.0001744292437931502, |
|
"loss": 0.0306, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7774238945263314, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 0.00017373739790481262, |
|
"loss": 0.0335, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.786904673727872, |
|
"grad_norm": 0.0242919921875, |
|
"learning_rate": 0.00017303772882365016, |
|
"loss": 0.0271, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.7963854529294127, |
|
"grad_norm": 0.028564453125, |
|
"learning_rate": 0.00017233031078127788, |
|
"loss": 0.0334, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8058662321309532, |
|
"grad_norm": 0.0299072265625, |
|
"learning_rate": 0.00017161521883143934, |
|
"loss": 0.0403, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8153470113324939, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 0.00017089252884204377, |
|
"loss": 0.0368, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8248277905340345, |
|
"grad_norm": 0.03369140625, |
|
"learning_rate": 0.0001701623174871168, |
|
"loss": 0.0335, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8343085697355751, |
|
"grad_norm": 0.0277099609375, |
|
"learning_rate": 0.0001694246622386658, |
|
"loss": 0.0339, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8343085697355751, |
|
"eval_loss": 0.033091045916080475, |
|
"eval_runtime": 34.4475, |
|
"eval_samples_per_second": 20.64, |
|
"eval_steps_per_second": 20.64, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8437893489371158, |
|
"grad_norm": 0.02392578125, |
|
"learning_rate": 0.00016867964135846043, |
|
"loss": 0.0241, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8532701281386564, |
|
"grad_norm": 0.0361328125, |
|
"learning_rate": 0.00016792733388972932, |
|
"loss": 0.0387, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.862750907340197, |
|
"grad_norm": 0.0260009765625, |
|
"learning_rate": 0.0001671678196487741, |
|
"loss": 0.0346, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8722316865417377, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 0.00016640117921650117, |
|
"loss": 0.0378, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8817124657432783, |
|
"grad_norm": 0.0279541015625, |
|
"learning_rate": 0.00016562749392987254, |
|
"loss": 0.0294, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.8911932449448189, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 0.0001648468458732762, |
|
"loss": 0.0308, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.9006740241463596, |
|
"grad_norm": 0.0242919921875, |
|
"learning_rate": 0.00016405931786981755, |
|
"loss": 0.0326, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9101548033479001, |
|
"grad_norm": 0.0262451171875, |
|
"learning_rate": 0.00016326499347253207, |
|
"loss": 0.032, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.9196355825494408, |
|
"grad_norm": 0.0264892578125, |
|
"learning_rate": 0.00016246395695552085, |
|
"loss": 0.0321, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9291163617509814, |
|
"grad_norm": 0.0244140625, |
|
"learning_rate": 0.00016165629330500952, |
|
"loss": 0.0301, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.938597140952522, |
|
"grad_norm": 0.029052734375, |
|
"learning_rate": 0.0001608420882103315, |
|
"loss": 0.0297, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.938597140952522, |
|
"eval_loss": 0.03182319179177284, |
|
"eval_runtime": 34.2304, |
|
"eval_samples_per_second": 20.771, |
|
"eval_steps_per_second": 20.771, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.9480779201540627, |
|
"grad_norm": 0.0302734375, |
|
"learning_rate": 0.00016002142805483685, |
|
"loss": 0.0327, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9575586993556033, |
|
"grad_norm": 0.0260009765625, |
|
"learning_rate": 0.0001591943999067273, |
|
"loss": 0.0285, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.9670394785571439, |
|
"grad_norm": 0.032470703125, |
|
"learning_rate": 0.00015836109150981886, |
|
"loss": 0.0308, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9765202577586846, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 0.00015752159127423263, |
|
"loss": 0.0301, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9860010369602251, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 0.0001566759882670146, |
|
"loss": 0.0287, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.9954818161617658, |
|
"grad_norm": 0.025634765625, |
|
"learning_rate": 0.00015582437220268647, |
|
"loss": 0.031, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.0049625953633063, |
|
"grad_norm": 0.02490234375, |
|
"learning_rate": 0.0001549668334337271, |
|
"loss": 0.0275, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.0144433745648471, |
|
"grad_norm": 0.03369140625, |
|
"learning_rate": 0.0001541034629409865, |
|
"loss": 0.0302, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.0239241537663877, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 0.00015323435232403337, |
|
"loss": 0.0308, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.0334049329679282, |
|
"grad_norm": 0.0240478515625, |
|
"learning_rate": 0.00015235959379143678, |
|
"loss": 0.0247, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.042885712169469, |
|
"grad_norm": 0.0255126953125, |
|
"learning_rate": 0.0001514792801509831, |
|
"loss": 0.0281, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.042885712169469, |
|
"eval_loss": 0.03116695210337639, |
|
"eval_runtime": 34.429, |
|
"eval_samples_per_second": 20.651, |
|
"eval_steps_per_second": 20.651, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0523664913710096, |
|
"grad_norm": 0.0277099609375, |
|
"learning_rate": 0.00015059350479982965, |
|
"loss": 0.0243, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.0618472705725501, |
|
"grad_norm": 0.028564453125, |
|
"learning_rate": 0.0001497023617145958, |
|
"loss": 0.0274, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.071328049774091, |
|
"grad_norm": 0.0301513671875, |
|
"learning_rate": 0.0001488059454413923, |
|
"loss": 0.0265, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.0808088289756315, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 0.00014790435108579048, |
|
"loss": 0.0287, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.090289608177172, |
|
"grad_norm": 0.025390625, |
|
"learning_rate": 0.000146997674302732, |
|
"loss": 0.0255, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.0997703873787126, |
|
"grad_norm": 0.02685546875, |
|
"learning_rate": 0.00014608601128638027, |
|
"loss": 0.0258, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.1092511665802534, |
|
"grad_norm": 0.0277099609375, |
|
"learning_rate": 0.00014516945875991472, |
|
"loss": 0.0326, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.118731945781794, |
|
"grad_norm": 0.0260009765625, |
|
"learning_rate": 0.00014424811396526892, |
|
"loss": 0.0241, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.1282127249833347, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 0.00014332207465281364, |
|
"loss": 0.0276, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.1376935041848752, |
|
"grad_norm": 0.0306396484375, |
|
"learning_rate": 0.0001423914390709861, |
|
"loss": 0.0283, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1471742833864158, |
|
"grad_norm": 0.0274658203125, |
|
"learning_rate": 0.00014145630595586607, |
|
"loss": 0.027, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.1471742833864158, |
|
"eval_loss": 0.030295666307210922, |
|
"eval_runtime": 34.4847, |
|
"eval_samples_per_second": 20.618, |
|
"eval_steps_per_second": 20.618, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.1566550625879564, |
|
"grad_norm": 0.0244140625, |
|
"learning_rate": 0.00014051677452070065, |
|
"loss": 0.0248, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.1661358417894971, |
|
"grad_norm": 0.023193359375, |
|
"learning_rate": 0.00013957294444537808, |
|
"loss": 0.0226, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.1756166209910377, |
|
"grad_norm": 0.02880859375, |
|
"learning_rate": 0.0001386249158658522, |
|
"loss": 0.0332, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.1850974001925783, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 0.00013767278936351854, |
|
"loss": 0.0292, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.194578179394119, |
|
"grad_norm": 0.0264892578125, |
|
"learning_rate": 0.00013671666595454295, |
|
"loss": 0.0224, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.2040589585956596, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 0.00013575664707914448, |
|
"loss": 0.0234, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.2135397377972001, |
|
"grad_norm": 0.03125, |
|
"learning_rate": 0.0001347928345908329, |
|
"loss": 0.029, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.223020516998741, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 0.00013382533074560255, |
|
"loss": 0.0249, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.2325012962002815, |
|
"grad_norm": 0.0291748046875, |
|
"learning_rate": 0.0001328542381910835, |
|
"loss": 0.026, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.241982075401822, |
|
"grad_norm": 0.0291748046875, |
|
"learning_rate": 0.00013187965995565098, |
|
"loss": 0.0252, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.2514628546033628, |
|
"grad_norm": 0.0279541015625, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.023, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.2514628546033628, |
|
"eval_loss": 0.029845552518963814, |
|
"eval_runtime": 34.1739, |
|
"eval_samples_per_second": 20.805, |
|
"eval_steps_per_second": 20.805, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.2609436338049034, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 0.00012992046039364893, |
|
"loss": 0.0259, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.270424413006444, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 0.0001289360469289838, |
|
"loss": 0.0244, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.2799051922079845, |
|
"grad_norm": 0.0286865234375, |
|
"learning_rate": 0.00012794856348516095, |
|
"loss": 0.0237, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.2893859714095253, |
|
"grad_norm": 0.0289306640625, |
|
"learning_rate": 0.00012695811482955227, |
|
"loss": 0.0248, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.2988667506110658, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 0.00012596480604412484, |
|
"loss": 0.0283, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.3083475298126066, |
|
"grad_norm": 0.0260009765625, |
|
"learning_rate": 0.000124968742514292, |
|
"loss": 0.021, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.3178283090141472, |
|
"grad_norm": 0.0279541015625, |
|
"learning_rate": 0.00012397002991773275, |
|
"loss": 0.0249, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.3273090882156877, |
|
"grad_norm": 0.026611328125, |
|
"learning_rate": 0.0001229687742131796, |
|
"loss": 0.0231, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.3367898674172283, |
|
"grad_norm": 0.0274658203125, |
|
"learning_rate": 0.00012196508162917677, |
|
"loss": 0.0276, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.346270646618769, |
|
"grad_norm": 0.03173828125, |
|
"learning_rate": 0.00012095905865281025, |
|
"loss": 0.0268, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.3557514258203096, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 0.00011995081201840956, |
|
"loss": 0.0259, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.3557514258203096, |
|
"eval_loss": 0.029736338183283806, |
|
"eval_runtime": 34.4542, |
|
"eval_samples_per_second": 20.636, |
|
"eval_steps_per_second": 20.636, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.3652322050218502, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 0.00011894044869622403, |
|
"loss": 0.025, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.374712984223391, |
|
"grad_norm": 0.0263671875, |
|
"learning_rate": 0.00011792807588107357, |
|
"loss": 0.0221, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.3841937634249315, |
|
"grad_norm": 0.031982421875, |
|
"learning_rate": 0.00011691380098097597, |
|
"loss": 0.0281, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.393674542626472, |
|
"grad_norm": 0.025146484375, |
|
"learning_rate": 0.0001158977316057513, |
|
"loss": 0.021, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.4031553218280126, |
|
"grad_norm": 0.032958984375, |
|
"learning_rate": 0.00011487997555560503, |
|
"loss": 0.0256, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.4126361010295534, |
|
"grad_norm": 0.0306396484375, |
|
"learning_rate": 0.00011386064080969094, |
|
"loss": 0.0267, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.422116880231094, |
|
"grad_norm": 0.0267333984375, |
|
"learning_rate": 0.00011283983551465511, |
|
"loss": 0.0211, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4315976594326347, |
|
"grad_norm": 0.0291748046875, |
|
"learning_rate": 0.0001118176679731619, |
|
"loss": 0.025, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.4410784386341753, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 0.00011079424663240372, |
|
"loss": 0.0273, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.4505592178357158, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 0.00010976968007259519, |
|
"loss": 0.0243, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.4600399970372564, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 0.00010874407699545328, |
|
"loss": 0.0232, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.4600399970372564, |
|
"eval_loss": 0.029951849952340126, |
|
"eval_runtime": 34.3508, |
|
"eval_samples_per_second": 20.698, |
|
"eval_steps_per_second": 20.698, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.4695207762387972, |
|
"grad_norm": 0.03271484375, |
|
"learning_rate": 0.00010771754621266466, |
|
"loss": 0.0283, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.4790015554403377, |
|
"grad_norm": 0.024169921875, |
|
"learning_rate": 0.00010669019663434117, |
|
"loss": 0.0218, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.4884823346418785, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 0.00010566213725746506, |
|
"loss": 0.0273, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.497963113843419, |
|
"grad_norm": 0.0263671875, |
|
"learning_rate": 0.00010463347715432488, |
|
"loss": 0.0229, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.5074438930449596, |
|
"grad_norm": 0.02978515625, |
|
"learning_rate": 0.00010360432546094341, |
|
"loss": 0.0242, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.5169246722465002, |
|
"grad_norm": 0.031494140625, |
|
"learning_rate": 0.00010257479136549889, |
|
"loss": 0.0252, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.5264054514480407, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 0.00010154498409674051, |
|
"loss": 0.0275, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.5358862306495815, |
|
"grad_norm": 0.035400390625, |
|
"learning_rate": 0.00010051501291240008, |
|
"loss": 0.0299, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.5453670098511223, |
|
"grad_norm": 0.031982421875, |
|
"learning_rate": 9.948498708759993e-05, |
|
"loss": 0.0264, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.5548477890526629, |
|
"grad_norm": 0.0302734375, |
|
"learning_rate": 9.845501590325948e-05, |
|
"loss": 0.025, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.5643285682542034, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 9.742520863450115e-05, |
|
"loss": 0.0203, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.5643285682542034, |
|
"eval_loss": 0.02908056415617466, |
|
"eval_runtime": 34.2083, |
|
"eval_samples_per_second": 20.784, |
|
"eval_steps_per_second": 20.784, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.573809347455744, |
|
"grad_norm": 0.0322265625, |
|
"learning_rate": 9.639567453905661e-05, |
|
"loss": 0.0262, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.5832901266572845, |
|
"grad_norm": 0.0245361328125, |
|
"learning_rate": 9.536652284567513e-05, |
|
"loss": 0.0213, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.5927709058588253, |
|
"grad_norm": 0.031005859375, |
|
"learning_rate": 9.433786274253495e-05, |
|
"loss": 0.0272, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.6022516850603659, |
|
"grad_norm": 0.0262451171875, |
|
"learning_rate": 9.330980336565887e-05, |
|
"loss": 0.0217, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.6117324642619066, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 9.228245378733537e-05, |
|
"loss": 0.0258, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.6212132434634472, |
|
"grad_norm": 0.03466796875, |
|
"learning_rate": 9.125592300454676e-05, |
|
"loss": 0.0278, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.6306940226649878, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 9.023031992740488e-05, |
|
"loss": 0.0234, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.6401748018665283, |
|
"grad_norm": 0.0301513671875, |
|
"learning_rate": 8.920575336759629e-05, |
|
"loss": 0.025, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.6496555810680689, |
|
"grad_norm": 0.03173828125, |
|
"learning_rate": 8.818233202683814e-05, |
|
"loss": 0.0268, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.6591363602696096, |
|
"grad_norm": 0.0260009765625, |
|
"learning_rate": 8.71601644853449e-05, |
|
"loss": 0.0241, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.6686171394711504, |
|
"grad_norm": 0.028564453125, |
|
"learning_rate": 8.613935919030907e-05, |
|
"loss": 0.0241, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.6686171394711504, |
|
"eval_loss": 0.02843022532761097, |
|
"eval_runtime": 34.2598, |
|
"eval_samples_per_second": 20.753, |
|
"eval_steps_per_second": 20.753, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.678097918672691, |
|
"grad_norm": 0.029052734375, |
|
"learning_rate": 8.512002444439502e-05, |
|
"loss": 0.0228, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.6875786978742315, |
|
"grad_norm": 0.02880859375, |
|
"learning_rate": 8.410226839424871e-05, |
|
"loss": 0.0272, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.697059477075772, |
|
"grad_norm": 0.0238037109375, |
|
"learning_rate": 8.308619901902406e-05, |
|
"loss": 0.0196, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.7065402562773127, |
|
"grad_norm": 0.02783203125, |
|
"learning_rate": 8.207192411892646e-05, |
|
"loss": 0.0218, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.7160210354788534, |
|
"grad_norm": 0.0277099609375, |
|
"learning_rate": 8.1059551303776e-05, |
|
"loss": 0.022, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.7255018146803942, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 8.004918798159045e-05, |
|
"loss": 0.0219, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.7349825938819348, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 7.904094134718976e-05, |
|
"loss": 0.0244, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.7444633730834753, |
|
"grad_norm": 0.036376953125, |
|
"learning_rate": 7.803491837082324e-05, |
|
"loss": 0.0301, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.7539441522850159, |
|
"grad_norm": 0.03125, |
|
"learning_rate": 7.703122578682046e-05, |
|
"loss": 0.0259, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.7634249314865564, |
|
"grad_norm": 0.025634765625, |
|
"learning_rate": 7.602997008226726e-05, |
|
"loss": 0.0216, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.7729057106880972, |
|
"grad_norm": 0.02783203125, |
|
"learning_rate": 7.5031257485708e-05, |
|
"loss": 0.0245, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.7729057106880972, |
|
"eval_loss": 0.028160251677036285, |
|
"eval_runtime": 34.3215, |
|
"eval_samples_per_second": 20.716, |
|
"eval_steps_per_second": 20.716, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.7823864898896378, |
|
"grad_norm": 0.024658203125, |
|
"learning_rate": 7.403519395587521e-05, |
|
"loss": 0.0221, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.7918672690911785, |
|
"grad_norm": 0.0263671875, |
|
"learning_rate": 7.304188517044774e-05, |
|
"loss": 0.0228, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.801348048292719, |
|
"grad_norm": 0.02587890625, |
|
"learning_rate": 7.205143651483906e-05, |
|
"loss": 0.0226, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.8108288274942597, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 7.106395307101621e-05, |
|
"loss": 0.0213, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.8203096066958002, |
|
"grad_norm": 0.0299072265625, |
|
"learning_rate": 7.007953960635109e-05, |
|
"loss": 0.0247, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.8297903858973408, |
|
"grad_norm": 0.0341796875, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.0276, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.8392711650988816, |
|
"grad_norm": 0.031494140625, |
|
"learning_rate": 6.812034004434903e-05, |
|
"loss": 0.0233, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.8487519443004223, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 6.714576180891654e-05, |
|
"loss": 0.0213, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.858232723501963, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 6.617466925439746e-05, |
|
"loss": 0.0213, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.8677135027035034, |
|
"grad_norm": 0.02685546875, |
|
"learning_rate": 6.520716540916709e-05, |
|
"loss": 0.0227, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.877194281905044, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 6.424335292085553e-05, |
|
"loss": 0.0222, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.877194281905044, |
|
"eval_loss": 0.027654768899083138, |
|
"eval_runtime": 34.2048, |
|
"eval_samples_per_second": 20.787, |
|
"eval_steps_per_second": 20.787, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.8866750611065846, |
|
"grad_norm": 0.0255126953125, |
|
"learning_rate": 6.32833340454571e-05, |
|
"loss": 0.021, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.8961558403081253, |
|
"grad_norm": 0.02978515625, |
|
"learning_rate": 6.232721063648148e-05, |
|
"loss": 0.0249, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.905636619509666, |
|
"grad_norm": 0.029052734375, |
|
"learning_rate": 6.137508413414784e-05, |
|
"loss": 0.0238, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.9151173987112067, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 6.0427055554621913e-05, |
|
"loss": 0.0246, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.9245981779127472, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 5.948322547929939e-05, |
|
"loss": 0.0202, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.9340789571142878, |
|
"grad_norm": 0.024658203125, |
|
"learning_rate": 5.854369404413398e-05, |
|
"loss": 0.0165, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.9435597363158283, |
|
"grad_norm": 0.026611328125, |
|
"learning_rate": 5.7608560929013946e-05, |
|
"loss": 0.0226, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.9530405155173691, |
|
"grad_norm": 0.033203125, |
|
"learning_rate": 5.667792534718639e-05, |
|
"loss": 0.0265, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.9625212947189097, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 5.5751886034731115e-05, |
|
"loss": 0.0233, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.9720020739204505, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 5.483054124008528e-05, |
|
"loss": 0.0209, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.981482853121991, |
|
"grad_norm": 0.0286865234375, |
|
"learning_rate": 5.391398871361972e-05, |
|
"loss": 0.0231, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.981482853121991, |
|
"eval_loss": 0.027809714898467064, |
|
"eval_runtime": 34.3727, |
|
"eval_samples_per_second": 20.685, |
|
"eval_steps_per_second": 20.685, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.9909636323235316, |
|
"grad_norm": 0.03271484375, |
|
"learning_rate": 5.300232569726804e-05, |
|
"loss": 0.0246, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.000444411525072, |
|
"grad_norm": 0.032470703125, |
|
"learning_rate": 5.2095648914209525e-05, |
|
"loss": 0.0285, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 2.0099251907266127, |
|
"grad_norm": 0.026123046875, |
|
"learning_rate": 5.119405455860772e-05, |
|
"loss": 0.0201, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 2.0194059699281537, |
|
"grad_norm": 0.02490234375, |
|
"learning_rate": 5.029763828540419e-05, |
|
"loss": 0.0179, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 2.0288867491296942, |
|
"grad_norm": 0.0255126953125, |
|
"learning_rate": 4.940649520017035e-05, |
|
"loss": 0.0207, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 2.038367528331235, |
|
"grad_norm": 0.02587890625, |
|
"learning_rate": 4.852071984901696e-05, |
|
"loss": 0.0195, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.0478483075327754, |
|
"grad_norm": 0.02490234375, |
|
"learning_rate": 4.7640406208563224e-05, |
|
"loss": 0.0195, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 2.057329086734316, |
|
"grad_norm": 0.02490234375, |
|
"learning_rate": 4.676564767596663e-05, |
|
"loss": 0.0171, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 2.0668098659358565, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 4.5896537059013536e-05, |
|
"loss": 0.0174, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 2.076290645137397, |
|
"grad_norm": 0.02392578125, |
|
"learning_rate": 4.503316656627294e-05, |
|
"loss": 0.0193, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 2.085771424338938, |
|
"grad_norm": 0.02490234375, |
|
"learning_rate": 4.417562779731355e-05, |
|
"loss": 0.0175, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.085771424338938, |
|
"eval_loss": 0.027570126578211784, |
|
"eval_runtime": 34.4181, |
|
"eval_samples_per_second": 20.658, |
|
"eval_steps_per_second": 20.658, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.0952522035404786, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 4.3324011732985433e-05, |
|
"loss": 0.0202, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 2.104732982742019, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 4.247840872576739e-05, |
|
"loss": 0.0189, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 2.1142137619435597, |
|
"grad_norm": 0.0252685546875, |
|
"learning_rate": 4.163890849018114e-05, |
|
"loss": 0.0191, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 2.1236945411451003, |
|
"grad_norm": 0.0264892578125, |
|
"learning_rate": 4.0805600093272735e-05, |
|
"loss": 0.015, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 2.133175320346641, |
|
"grad_norm": 0.0264892578125, |
|
"learning_rate": 3.997857194516319e-05, |
|
"loss": 0.0191, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.142656099548182, |
|
"grad_norm": 0.0244140625, |
|
"learning_rate": 3.9157911789668525e-05, |
|
"loss": 0.0173, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 2.1521368787497224, |
|
"grad_norm": 0.0250244140625, |
|
"learning_rate": 3.8343706694990465e-05, |
|
"loss": 0.0168, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 2.161617657951263, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 3.753604304447915e-05, |
|
"loss": 0.0166, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 2.1710984371528035, |
|
"grad_norm": 0.0281982421875, |
|
"learning_rate": 3.6735006527467965e-05, |
|
"loss": 0.0167, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 2.180579216354344, |
|
"grad_norm": 0.0263671875, |
|
"learning_rate": 3.594068213018249e-05, |
|
"loss": 0.0167, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.1900599955558846, |
|
"grad_norm": 0.0262451171875, |
|
"learning_rate": 3.515315412672384e-05, |
|
"loss": 0.0165, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 2.1900599955558846, |
|
"eval_loss": 0.028142958879470825, |
|
"eval_runtime": 34.4859, |
|
"eval_samples_per_second": 20.617, |
|
"eval_steps_per_second": 20.617, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 2.199540774757425, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 3.437250607012748e-05, |
|
"loss": 0.0172, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 2.209021553958966, |
|
"grad_norm": 0.0299072265625, |
|
"learning_rate": 3.359882078349883e-05, |
|
"loss": 0.0187, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 2.2185023331605067, |
|
"grad_norm": 0.0277099609375, |
|
"learning_rate": 3.283218035122592e-05, |
|
"loss": 0.0182, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 2.2279831123620473, |
|
"grad_norm": 0.0311279296875, |
|
"learning_rate": 3.207266611027069e-05, |
|
"loss": 0.0169, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.237463891563588, |
|
"grad_norm": 0.030029296875, |
|
"learning_rate": 3.132035864153958e-05, |
|
"loss": 0.0192, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 2.2469446707651284, |
|
"grad_norm": 0.0303955078125, |
|
"learning_rate": 3.057533776133421e-05, |
|
"loss": 0.0157, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 2.2564254499666694, |
|
"grad_norm": 0.0262451171875, |
|
"learning_rate": 2.98376825128832e-05, |
|
"loss": 0.0149, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 2.26590622916821, |
|
"grad_norm": 0.031005859375, |
|
"learning_rate": 2.910747115795628e-05, |
|
"loss": 0.0177, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 2.2753870083697505, |
|
"grad_norm": 0.03173828125, |
|
"learning_rate": 2.8384781168560693e-05, |
|
"loss": 0.0193, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.284867787571291, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 2.766968921872213e-05, |
|
"loss": 0.0165, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 2.2943485667728316, |
|
"grad_norm": 0.032470703125, |
|
"learning_rate": 2.6962271176349852e-05, |
|
"loss": 0.0174, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 2.2943485667728316, |
|
"eval_loss": 0.028074759989976883, |
|
"eval_runtime": 34.4556, |
|
"eval_samples_per_second": 20.635, |
|
"eval_steps_per_second": 20.635, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 2.303829345974372, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 2.626260209518737e-05, |
|
"loss": 0.0172, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 2.3133101251759127, |
|
"grad_norm": 0.0281982421875, |
|
"learning_rate": 2.5570756206849832e-05, |
|
"loss": 0.0158, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 2.3227909043774533, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 2.4886806912948035e-05, |
|
"loss": 0.0159, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.3322716835789943, |
|
"grad_norm": 0.0274658203125, |
|
"learning_rate": 2.4210826777301153e-05, |
|
"loss": 0.0168, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 2.341752462780535, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 2.3542887518238056e-05, |
|
"loss": 0.0169, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 2.3512332419820754, |
|
"grad_norm": 0.028564453125, |
|
"learning_rate": 2.288306000098811e-05, |
|
"loss": 0.0174, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 2.360714021183616, |
|
"grad_norm": 0.0306396484375, |
|
"learning_rate": 2.2231414230162894e-05, |
|
"loss": 0.0219, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 2.3701948003851565, |
|
"grad_norm": 0.03466796875, |
|
"learning_rate": 2.1588019342328968e-05, |
|
"loss": 0.0234, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.3796755795866975, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 2.0952943598672846e-05, |
|
"loss": 0.0174, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 2.389156358788238, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 2.0326254377758703e-05, |
|
"loss": 0.0194, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.3986371379897786, |
|
"grad_norm": 0.0296630859375, |
|
"learning_rate": 1.9708018168380037e-05, |
|
"loss": 0.021, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.3986371379897786, |
|
"eval_loss": 0.02788708359003067, |
|
"eval_runtime": 34.3192, |
|
"eval_samples_per_second": 20.717, |
|
"eval_steps_per_second": 20.717, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.408117917191319, |
|
"grad_norm": 0.02490234375, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.0155, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 2.4175986963928597, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 1.8497166248318876e-05, |
|
"loss": 0.0176, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.4270794755944003, |
|
"grad_norm": 0.0302734375, |
|
"learning_rate": 1.7904679003358283e-05, |
|
"loss": 0.0176, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 2.436560254795941, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 1.7320901687747292e-05, |
|
"loss": 0.0172, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 2.446041033997482, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 1.674589623752707e-05, |
|
"loss": 0.0198, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 2.4555218131990224, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 1.617972365808481e-05, |
|
"loss": 0.0187, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 2.465002592400563, |
|
"grad_norm": 0.0233154296875, |
|
"learning_rate": 1.562244401768144e-05, |
|
"loss": 0.0127, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.4744833716021035, |
|
"grad_norm": 0.03076171875, |
|
"learning_rate": 1.507411644107879e-05, |
|
"loss": 0.0228, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 2.483964150803644, |
|
"grad_norm": 0.031494140625, |
|
"learning_rate": 1.4534799103266505e-05, |
|
"loss": 0.0193, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 2.4934449300051846, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 1.4004549223290164e-05, |
|
"loss": 0.0194, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 2.5029257092067256, |
|
"grad_norm": 0.0238037109375, |
|
"learning_rate": 1.3483423058180421e-05, |
|
"loss": 0.0147, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.5029257092067256, |
|
"eval_loss": 0.027736255899071693, |
|
"eval_runtime": 34.4272, |
|
"eval_samples_per_second": 20.652, |
|
"eval_steps_per_second": 20.652, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.512406488408266, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 1.2971475896984475e-05, |
|
"loss": 0.0163, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.5218872676098067, |
|
"grad_norm": 0.0322265625, |
|
"learning_rate": 1.2468762054900262e-05, |
|
"loss": 0.0197, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.5313680468113473, |
|
"grad_norm": 0.0303955078125, |
|
"learning_rate": 1.1975334867513687e-05, |
|
"loss": 0.017, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 2.540848826012888, |
|
"grad_norm": 0.02978515625, |
|
"learning_rate": 1.1491246685140077e-05, |
|
"loss": 0.0183, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 2.5503296052144284, |
|
"grad_norm": 0.030029296875, |
|
"learning_rate": 1.1016548867270038e-05, |
|
"loss": 0.0183, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 2.559810384415969, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 1.0551291777120464e-05, |
|
"loss": 0.0168, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.56929116361751, |
|
"grad_norm": 0.0302734375, |
|
"learning_rate": 1.0095524776291165e-05, |
|
"loss": 0.0187, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 2.5787719428190505, |
|
"grad_norm": 0.0272216796875, |
|
"learning_rate": 9.649296219527982e-06, |
|
"loss": 0.0179, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 2.588252722020591, |
|
"grad_norm": 0.030029296875, |
|
"learning_rate": 9.21265344959239e-06, |
|
"loss": 0.0189, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 2.5977335012221316, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 8.785642792238814e-06, |
|
"loss": 0.0182, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 2.607214280423672, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 8.368309551299536e-06, |
|
"loss": 0.0162, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.607214280423672, |
|
"eval_loss": 0.027653872966766357, |
|
"eval_runtime": 34.1633, |
|
"eval_samples_per_second": 20.812, |
|
"eval_steps_per_second": 20.812, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.616695059625213, |
|
"grad_norm": 0.032958984375, |
|
"learning_rate": 7.960698003878309e-06, |
|
"loss": 0.0206, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 2.6261758388267538, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 7.562851395652692e-06, |
|
"loss": 0.0151, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 2.6356566180282943, |
|
"grad_norm": 0.030517578125, |
|
"learning_rate": 7.174811936285886e-06, |
|
"loss": 0.0187, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 2.645137397229835, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 6.796620794948483e-06, |
|
"loss": 0.0169, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 2.6546181764313754, |
|
"grad_norm": 0.02734375, |
|
"learning_rate": 6.428318095950647e-06, |
|
"loss": 0.0164, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.664098955632916, |
|
"grad_norm": 0.029296875, |
|
"learning_rate": 6.06994291448505e-06, |
|
"loss": 0.018, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 2.6735797348344565, |
|
"grad_norm": 0.030029296875, |
|
"learning_rate": 5.721533272481272e-06, |
|
"loss": 0.0201, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.683060514035997, |
|
"grad_norm": 0.02685546875, |
|
"learning_rate": 5.3831261345717476e-06, |
|
"loss": 0.0154, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 2.692541293237538, |
|
"grad_norm": 0.031005859375, |
|
"learning_rate": 5.054757404170074e-06, |
|
"loss": 0.0173, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 2.7020220724390787, |
|
"grad_norm": 0.0286865234375, |
|
"learning_rate": 4.7364619196617495e-06, |
|
"loss": 0.0189, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.711502851640619, |
|
"grad_norm": 0.031494140625, |
|
"learning_rate": 4.428273450708065e-06, |
|
"loss": 0.0206, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.711502851640619, |
|
"eval_loss": 0.02764257788658142, |
|
"eval_runtime": 34.4144, |
|
"eval_samples_per_second": 20.66, |
|
"eval_steps_per_second": 20.66, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.7209836308421598, |
|
"grad_norm": 0.035400390625, |
|
"learning_rate": 4.130224694663221e-06, |
|
"loss": 0.0197, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.7304644100437003, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 3.842347273105396e-06, |
|
"loss": 0.0173, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.7399451892452413, |
|
"grad_norm": 0.027099609375, |
|
"learning_rate": 3.564671728481739e-06, |
|
"loss": 0.0153, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 2.749425968446782, |
|
"grad_norm": 0.030029296875, |
|
"learning_rate": 3.2972275208679625e-06, |
|
"loss": 0.0189, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.7589067476483224, |
|
"grad_norm": 0.0303955078125, |
|
"learning_rate": 3.040043024842898e-06, |
|
"loss": 0.0185, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.768387526849863, |
|
"grad_norm": 0.03173828125, |
|
"learning_rate": 2.793145526477914e-06, |
|
"loss": 0.0194, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.7778683060514036, |
|
"grad_norm": 0.0286865234375, |
|
"learning_rate": 2.556561220442144e-06, |
|
"loss": 0.0191, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 2.787349085252944, |
|
"grad_norm": 0.0322265625, |
|
"learning_rate": 2.3303152072231883e-06, |
|
"loss": 0.0187, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.7968298644544847, |
|
"grad_norm": 0.0289306640625, |
|
"learning_rate": 2.1144314904642195e-06, |
|
"loss": 0.0183, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.8063106436560252, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 1.908932974417266e-06, |
|
"loss": 0.0161, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 2.8157914228575662, |
|
"grad_norm": 0.03125, |
|
"learning_rate": 1.7138414615131327e-06, |
|
"loss": 0.0241, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.8157914228575662, |
|
"eval_loss": 0.027634525671601295, |
|
"eval_runtime": 34.429, |
|
"eval_samples_per_second": 20.651, |
|
"eval_steps_per_second": 20.651, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.825272202059107, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 1.529177650048297e-06, |
|
"loss": 0.0179, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 2.8347529812606473, |
|
"grad_norm": 0.0262451171875, |
|
"learning_rate": 1.3549611319889521e-06, |
|
"loss": 0.0144, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 2.844233760462188, |
|
"grad_norm": 0.0283203125, |
|
"learning_rate": 1.1912103908922945e-06, |
|
"loss": 0.0169, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.8537145396637285, |
|
"grad_norm": 0.029541015625, |
|
"learning_rate": 1.0379427999456015e-06, |
|
"loss": 0.0191, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 2.8631953188652695, |
|
"grad_norm": 0.0284423828125, |
|
"learning_rate": 8.95174620122996e-07, |
|
"loss": 0.0174, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 2.87267609806681, |
|
"grad_norm": 0.028564453125, |
|
"learning_rate": 7.629209984601815e-07, |
|
"loss": 0.0172, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 2.8821568772683506, |
|
"grad_norm": 0.028076171875, |
|
"learning_rate": 6.411959664474831e-07, |
|
"loss": 0.0186, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 2.891637656469891, |
|
"grad_norm": 0.03173828125, |
|
"learning_rate": 5.300124385410943e-07, |
|
"loss": 0.0227, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.9011184356714317, |
|
"grad_norm": 0.0281982421875, |
|
"learning_rate": 4.2938221079300566e-07, |
|
"loss": 0.0159, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.9105992148729722, |
|
"grad_norm": 0.0267333984375, |
|
"learning_rate": 3.3931595959942886e-07, |
|
"loss": 0.0166, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 2.920079994074513, |
|
"grad_norm": 0.027587890625, |
|
"learning_rate": 2.5982324056810225e-07, |
|
"loss": 0.0162, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 2.920079994074513, |
|
"eval_loss": 0.027648651972413063, |
|
"eval_runtime": 34.3412, |
|
"eval_samples_per_second": 20.704, |
|
"eval_steps_per_second": 20.704, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 2.9295607732760534, |
|
"grad_norm": 0.0269775390625, |
|
"learning_rate": 1.9091248750446876e-07, |
|
"loss": 0.018, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 2.9390415524775944, |
|
"grad_norm": 0.0262451171875, |
|
"learning_rate": 1.3259101151694708e-07, |
|
"loss": 0.0181, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.948522331679135, |
|
"grad_norm": 0.0247802734375, |
|
"learning_rate": 8.486500024118548e-08, |
|
"loss": 0.0146, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 2.9580031108806755, |
|
"grad_norm": 0.0274658203125, |
|
"learning_rate": 4.7739517183620356e-08, |
|
"loss": 0.0153, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 2.967483890082216, |
|
"grad_norm": 0.0294189453125, |
|
"learning_rate": 2.1218501184261386e-08, |
|
"loss": 0.0189, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 2.976964669283757, |
|
"grad_norm": 0.0299072265625, |
|
"learning_rate": 5.304765998781491e-09, |
|
"loss": 0.0181, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 2.9864454484852976, |
|
"grad_norm": 0.02685546875, |
|
"learning_rate": 0.0, |
|
"loss": 0.0152, |
|
"step": 315 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 315, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 53, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.251934498730803e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|