gommt / checkpoint-1200 /trainer_state.json
sizhkhy's picture
Upload folder using huggingface_hub
8fb8455 verified
{
"best_metric": 0.020646410062909126,
"best_model_checkpoint": "/home/paperspace/Data/models/gommt/llm3br256/checkpoint-1200",
"epoch": 7.741935483870968,
"eval_steps": 25,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064516129032258064,
"grad_norm": 0.5787440538406372,
"learning_rate": 2.577319587628866e-07,
"loss": 0.2947,
"step": 1
},
{
"epoch": 0.012903225806451613,
"grad_norm": 0.5514878630638123,
"learning_rate": 5.154639175257732e-07,
"loss": 0.2774,
"step": 2
},
{
"epoch": 0.01935483870967742,
"grad_norm": 0.5519412159919739,
"learning_rate": 7.731958762886599e-07,
"loss": 0.301,
"step": 3
},
{
"epoch": 0.025806451612903226,
"grad_norm": 0.5313215851783752,
"learning_rate": 1.0309278350515464e-06,
"loss": 0.2792,
"step": 4
},
{
"epoch": 0.03225806451612903,
"grad_norm": 0.5504052639007568,
"learning_rate": 1.288659793814433e-06,
"loss": 0.2918,
"step": 5
},
{
"epoch": 0.03870967741935484,
"grad_norm": 0.5057755708694458,
"learning_rate": 1.5463917525773197e-06,
"loss": 0.2731,
"step": 6
},
{
"epoch": 0.04516129032258064,
"grad_norm": 0.5207090377807617,
"learning_rate": 1.804123711340206e-06,
"loss": 0.3004,
"step": 7
},
{
"epoch": 0.05161290322580645,
"grad_norm": 0.5149654746055603,
"learning_rate": 2.061855670103093e-06,
"loss": 0.2605,
"step": 8
},
{
"epoch": 0.05806451612903226,
"grad_norm": 0.5021620988845825,
"learning_rate": 2.3195876288659796e-06,
"loss": 0.2932,
"step": 9
},
{
"epoch": 0.06451612903225806,
"grad_norm": 0.5057784914970398,
"learning_rate": 2.577319587628866e-06,
"loss": 0.2761,
"step": 10
},
{
"epoch": 0.07096774193548387,
"grad_norm": 0.49885252118110657,
"learning_rate": 2.8350515463917527e-06,
"loss": 0.2779,
"step": 11
},
{
"epoch": 0.07741935483870968,
"grad_norm": 0.5161502957344055,
"learning_rate": 3.0927835051546395e-06,
"loss": 0.2666,
"step": 12
},
{
"epoch": 0.08387096774193549,
"grad_norm": 0.4862574338912964,
"learning_rate": 3.350515463917526e-06,
"loss": 0.2891,
"step": 13
},
{
"epoch": 0.09032258064516129,
"grad_norm": 0.42886313796043396,
"learning_rate": 3.608247422680412e-06,
"loss": 0.2605,
"step": 14
},
{
"epoch": 0.0967741935483871,
"grad_norm": 0.41169312596321106,
"learning_rate": 3.865979381443299e-06,
"loss": 0.2515,
"step": 15
},
{
"epoch": 0.1032258064516129,
"grad_norm": 0.39097175002098083,
"learning_rate": 4.123711340206186e-06,
"loss": 0.2676,
"step": 16
},
{
"epoch": 0.10967741935483871,
"grad_norm": 0.3687884211540222,
"learning_rate": 4.381443298969072e-06,
"loss": 0.2564,
"step": 17
},
{
"epoch": 0.11612903225806452,
"grad_norm": 0.36199015378952026,
"learning_rate": 4.639175257731959e-06,
"loss": 0.2631,
"step": 18
},
{
"epoch": 0.12258064516129032,
"grad_norm": 0.33205243945121765,
"learning_rate": 4.8969072164948455e-06,
"loss": 0.2598,
"step": 19
},
{
"epoch": 0.12903225806451613,
"grad_norm": 0.2873350977897644,
"learning_rate": 5.154639175257732e-06,
"loss": 0.2445,
"step": 20
},
{
"epoch": 0.13548387096774195,
"grad_norm": 0.27828869223594666,
"learning_rate": 5.412371134020619e-06,
"loss": 0.2312,
"step": 21
},
{
"epoch": 0.14193548387096774,
"grad_norm": 0.2796691358089447,
"learning_rate": 5.670103092783505e-06,
"loss": 0.2373,
"step": 22
},
{
"epoch": 0.14838709677419354,
"grad_norm": 0.2518955171108246,
"learning_rate": 5.927835051546392e-06,
"loss": 0.2147,
"step": 23
},
{
"epoch": 0.15483870967741936,
"grad_norm": 0.24648350477218628,
"learning_rate": 6.185567010309279e-06,
"loss": 0.209,
"step": 24
},
{
"epoch": 0.16129032258064516,
"grad_norm": 0.24219118058681488,
"learning_rate": 6.443298969072164e-06,
"loss": 0.232,
"step": 25
},
{
"epoch": 0.16129032258064516,
"eval_loss": 0.2192624807357788,
"eval_runtime": 21.7174,
"eval_samples_per_second": 4.605,
"eval_steps_per_second": 0.092,
"step": 25
},
{
"epoch": 0.16774193548387098,
"grad_norm": 0.23314689099788666,
"learning_rate": 6.701030927835052e-06,
"loss": 0.2115,
"step": 26
},
{
"epoch": 0.17419354838709677,
"grad_norm": 0.2250155806541443,
"learning_rate": 6.958762886597939e-06,
"loss": 0.2171,
"step": 27
},
{
"epoch": 0.18064516129032257,
"grad_norm": 0.21403469145298004,
"learning_rate": 7.216494845360824e-06,
"loss": 0.1991,
"step": 28
},
{
"epoch": 0.1870967741935484,
"grad_norm": 0.2005668729543686,
"learning_rate": 7.4742268041237115e-06,
"loss": 0.2015,
"step": 29
},
{
"epoch": 0.1935483870967742,
"grad_norm": 0.2055639624595642,
"learning_rate": 7.731958762886599e-06,
"loss": 0.1995,
"step": 30
},
{
"epoch": 0.2,
"grad_norm": 0.20002567768096924,
"learning_rate": 7.989690721649484e-06,
"loss": 0.1987,
"step": 31
},
{
"epoch": 0.2064516129032258,
"grad_norm": 0.19098369777202606,
"learning_rate": 8.247422680412371e-06,
"loss": 0.1842,
"step": 32
},
{
"epoch": 0.2129032258064516,
"grad_norm": 0.17131243646144867,
"learning_rate": 8.505154639175259e-06,
"loss": 0.1934,
"step": 33
},
{
"epoch": 0.21935483870967742,
"grad_norm": 0.19705425202846527,
"learning_rate": 8.762886597938144e-06,
"loss": 0.1983,
"step": 34
},
{
"epoch": 0.22580645161290322,
"grad_norm": 0.1701938658952713,
"learning_rate": 9.020618556701031e-06,
"loss": 0.1854,
"step": 35
},
{
"epoch": 0.23225806451612904,
"grad_norm": 0.16887512803077698,
"learning_rate": 9.278350515463918e-06,
"loss": 0.1883,
"step": 36
},
{
"epoch": 0.23870967741935484,
"grad_norm": 0.16543127596378326,
"learning_rate": 9.536082474226804e-06,
"loss": 0.159,
"step": 37
},
{
"epoch": 0.24516129032258063,
"grad_norm": 0.16382737457752228,
"learning_rate": 9.793814432989691e-06,
"loss": 0.1893,
"step": 38
},
{
"epoch": 0.25161290322580643,
"grad_norm": 0.16113704442977905,
"learning_rate": 1.0051546391752578e-05,
"loss": 0.1746,
"step": 39
},
{
"epoch": 0.25806451612903225,
"grad_norm": 0.15014876425266266,
"learning_rate": 1.0309278350515464e-05,
"loss": 0.1712,
"step": 40
},
{
"epoch": 0.2645161290322581,
"grad_norm": 0.16049867868423462,
"learning_rate": 1.0567010309278351e-05,
"loss": 0.1589,
"step": 41
},
{
"epoch": 0.2709677419354839,
"grad_norm": 0.13896311819553375,
"learning_rate": 1.0824742268041238e-05,
"loss": 0.1621,
"step": 42
},
{
"epoch": 0.27741935483870966,
"grad_norm": 0.14771641790866852,
"learning_rate": 1.1082474226804124e-05,
"loss": 0.1783,
"step": 43
},
{
"epoch": 0.2838709677419355,
"grad_norm": 0.14235621690750122,
"learning_rate": 1.134020618556701e-05,
"loss": 0.1637,
"step": 44
},
{
"epoch": 0.2903225806451613,
"grad_norm": 0.13013780117034912,
"learning_rate": 1.1597938144329898e-05,
"loss": 0.1537,
"step": 45
},
{
"epoch": 0.2967741935483871,
"grad_norm": 0.13013966381549835,
"learning_rate": 1.1855670103092783e-05,
"loss": 0.1574,
"step": 46
},
{
"epoch": 0.3032258064516129,
"grad_norm": 0.1356808990240097,
"learning_rate": 1.211340206185567e-05,
"loss": 0.1585,
"step": 47
},
{
"epoch": 0.3096774193548387,
"grad_norm": 0.1297629475593567,
"learning_rate": 1.2371134020618558e-05,
"loss": 0.1554,
"step": 48
},
{
"epoch": 0.3161290322580645,
"grad_norm": 0.13573496043682098,
"learning_rate": 1.2628865979381443e-05,
"loss": 0.1584,
"step": 49
},
{
"epoch": 0.3225806451612903,
"grad_norm": 0.14283044636249542,
"learning_rate": 1.2886597938144329e-05,
"loss": 0.1524,
"step": 50
},
{
"epoch": 0.3225806451612903,
"eval_loss": 0.15066054463386536,
"eval_runtime": 20.6503,
"eval_samples_per_second": 4.843,
"eval_steps_per_second": 0.097,
"step": 50
},
{
"epoch": 0.32903225806451614,
"grad_norm": 0.1236417219042778,
"learning_rate": 1.3144329896907218e-05,
"loss": 0.1459,
"step": 51
},
{
"epoch": 0.33548387096774196,
"grad_norm": 0.12115009129047394,
"learning_rate": 1.3402061855670103e-05,
"loss": 0.1408,
"step": 52
},
{
"epoch": 0.3419354838709677,
"grad_norm": 0.1199135035276413,
"learning_rate": 1.3659793814432989e-05,
"loss": 0.1444,
"step": 53
},
{
"epoch": 0.34838709677419355,
"grad_norm": 0.1241321787238121,
"learning_rate": 1.3917525773195878e-05,
"loss": 0.1416,
"step": 54
},
{
"epoch": 0.3548387096774194,
"grad_norm": 0.11240442097187042,
"learning_rate": 1.4175257731958763e-05,
"loss": 0.1229,
"step": 55
},
{
"epoch": 0.36129032258064514,
"grad_norm": 0.11829587817192078,
"learning_rate": 1.4432989690721649e-05,
"loss": 0.1387,
"step": 56
},
{
"epoch": 0.36774193548387096,
"grad_norm": 0.1287028193473816,
"learning_rate": 1.4690721649484537e-05,
"loss": 0.1481,
"step": 57
},
{
"epoch": 0.3741935483870968,
"grad_norm": 0.11497591435909271,
"learning_rate": 1.4948453608247423e-05,
"loss": 0.1259,
"step": 58
},
{
"epoch": 0.38064516129032255,
"grad_norm": 0.11566972732543945,
"learning_rate": 1.5206185567010308e-05,
"loss": 0.1193,
"step": 59
},
{
"epoch": 0.3870967741935484,
"grad_norm": 0.1135919988155365,
"learning_rate": 1.5463917525773197e-05,
"loss": 0.1215,
"step": 60
},
{
"epoch": 0.3935483870967742,
"grad_norm": 0.1099628284573555,
"learning_rate": 1.5721649484536083e-05,
"loss": 0.1331,
"step": 61
},
{
"epoch": 0.4,
"grad_norm": 0.10907468944787979,
"learning_rate": 1.5979381443298968e-05,
"loss": 0.117,
"step": 62
},
{
"epoch": 0.4064516129032258,
"grad_norm": 0.11619539558887482,
"learning_rate": 1.6237113402061857e-05,
"loss": 0.1126,
"step": 63
},
{
"epoch": 0.4129032258064516,
"grad_norm": 0.109222911298275,
"learning_rate": 1.6494845360824743e-05,
"loss": 0.1192,
"step": 64
},
{
"epoch": 0.41935483870967744,
"grad_norm": 0.12224064767360687,
"learning_rate": 1.6752577319587628e-05,
"loss": 0.1245,
"step": 65
},
{
"epoch": 0.4258064516129032,
"grad_norm": 0.12005683034658432,
"learning_rate": 1.7010309278350517e-05,
"loss": 0.1111,
"step": 66
},
{
"epoch": 0.432258064516129,
"grad_norm": 0.11314112693071365,
"learning_rate": 1.7268041237113403e-05,
"loss": 0.1109,
"step": 67
},
{
"epoch": 0.43870967741935485,
"grad_norm": 0.11626581847667694,
"learning_rate": 1.7525773195876288e-05,
"loss": 0.1224,
"step": 68
},
{
"epoch": 0.44516129032258067,
"grad_norm": 0.11105289310216904,
"learning_rate": 1.7783505154639177e-05,
"loss": 0.1142,
"step": 69
},
{
"epoch": 0.45161290322580644,
"grad_norm": 0.12696442008018494,
"learning_rate": 1.8041237113402062e-05,
"loss": 0.1077,
"step": 70
},
{
"epoch": 0.45806451612903226,
"grad_norm": 0.11383359134197235,
"learning_rate": 1.8298969072164948e-05,
"loss": 0.1076,
"step": 71
},
{
"epoch": 0.4645161290322581,
"grad_norm": 0.11222764849662781,
"learning_rate": 1.8556701030927837e-05,
"loss": 0.1212,
"step": 72
},
{
"epoch": 0.47096774193548385,
"grad_norm": 0.12386021018028259,
"learning_rate": 1.8814432989690722e-05,
"loss": 0.1203,
"step": 73
},
{
"epoch": 0.4774193548387097,
"grad_norm": 0.10532196611166,
"learning_rate": 1.9072164948453608e-05,
"loss": 0.1079,
"step": 74
},
{
"epoch": 0.4838709677419355,
"grad_norm": 0.1084241271018982,
"learning_rate": 1.9329896907216497e-05,
"loss": 0.115,
"step": 75
},
{
"epoch": 0.4838709677419355,
"eval_loss": 0.11645982414484024,
"eval_runtime": 20.6021,
"eval_samples_per_second": 4.854,
"eval_steps_per_second": 0.097,
"step": 75
},
{
"epoch": 0.49032258064516127,
"grad_norm": 0.12820391356945038,
"learning_rate": 1.9587628865979382e-05,
"loss": 0.1091,
"step": 76
},
{
"epoch": 0.4967741935483871,
"grad_norm": 0.1124340295791626,
"learning_rate": 1.9845360824742268e-05,
"loss": 0.1129,
"step": 77
},
{
"epoch": 0.5032258064516129,
"grad_norm": 0.10982845723628998,
"learning_rate": 2.0103092783505157e-05,
"loss": 0.126,
"step": 78
},
{
"epoch": 0.5096774193548387,
"grad_norm": 0.1185258999466896,
"learning_rate": 2.0360824742268042e-05,
"loss": 0.1094,
"step": 79
},
{
"epoch": 0.5161290322580645,
"grad_norm": 0.0972081646323204,
"learning_rate": 2.0618556701030927e-05,
"loss": 0.103,
"step": 80
},
{
"epoch": 0.5225806451612903,
"grad_norm": 0.10331461578607559,
"learning_rate": 2.0876288659793816e-05,
"loss": 0.1107,
"step": 81
},
{
"epoch": 0.5290322580645161,
"grad_norm": 0.11630548536777496,
"learning_rate": 2.1134020618556702e-05,
"loss": 0.1184,
"step": 82
},
{
"epoch": 0.535483870967742,
"grad_norm": 0.10714004188776016,
"learning_rate": 2.1391752577319587e-05,
"loss": 0.1059,
"step": 83
},
{
"epoch": 0.5419354838709678,
"grad_norm": 0.10682078450918198,
"learning_rate": 2.1649484536082476e-05,
"loss": 0.1047,
"step": 84
},
{
"epoch": 0.5483870967741935,
"grad_norm": 0.10373492538928986,
"learning_rate": 2.1907216494845362e-05,
"loss": 0.0906,
"step": 85
},
{
"epoch": 0.5548387096774193,
"grad_norm": 0.10939467698335648,
"learning_rate": 2.2164948453608247e-05,
"loss": 0.105,
"step": 86
},
{
"epoch": 0.5612903225806452,
"grad_norm": 0.1066933274269104,
"learning_rate": 2.2422680412371136e-05,
"loss": 0.1046,
"step": 87
},
{
"epoch": 0.567741935483871,
"grad_norm": 0.11235528439283371,
"learning_rate": 2.268041237113402e-05,
"loss": 0.1183,
"step": 88
},
{
"epoch": 0.5741935483870968,
"grad_norm": 0.11120587587356567,
"learning_rate": 2.2938144329896907e-05,
"loss": 0.1012,
"step": 89
},
{
"epoch": 0.5806451612903226,
"grad_norm": 0.11943212896585464,
"learning_rate": 2.3195876288659796e-05,
"loss": 0.1081,
"step": 90
},
{
"epoch": 0.5870967741935483,
"grad_norm": 0.11067789793014526,
"learning_rate": 2.345360824742268e-05,
"loss": 0.106,
"step": 91
},
{
"epoch": 0.5935483870967742,
"grad_norm": 0.10897871106863022,
"learning_rate": 2.3711340206185567e-05,
"loss": 0.1017,
"step": 92
},
{
"epoch": 0.6,
"grad_norm": 0.10811934620141983,
"learning_rate": 2.3969072164948456e-05,
"loss": 0.1043,
"step": 93
},
{
"epoch": 0.6064516129032258,
"grad_norm": 0.12143644690513611,
"learning_rate": 2.422680412371134e-05,
"loss": 0.0906,
"step": 94
},
{
"epoch": 0.6129032258064516,
"grad_norm": 0.11472067981958389,
"learning_rate": 2.4484536082474227e-05,
"loss": 0.1064,
"step": 95
},
{
"epoch": 0.6193548387096774,
"grad_norm": 0.11486558616161346,
"learning_rate": 2.4742268041237116e-05,
"loss": 0.1076,
"step": 96
},
{
"epoch": 0.6258064516129033,
"grad_norm": 0.11682534962892532,
"learning_rate": 2.5e-05,
"loss": 0.1015,
"step": 97
},
{
"epoch": 0.632258064516129,
"grad_norm": 0.10343407839536667,
"learning_rate": 2.5257731958762887e-05,
"loss": 0.0951,
"step": 98
},
{
"epoch": 0.6387096774193548,
"grad_norm": 0.13064107298851013,
"learning_rate": 2.5515463917525772e-05,
"loss": 0.0952,
"step": 99
},
{
"epoch": 0.6451612903225806,
"grad_norm": 0.11045321077108383,
"learning_rate": 2.5773195876288658e-05,
"loss": 0.0875,
"step": 100
},
{
"epoch": 0.6451612903225806,
"eval_loss": 0.1003575325012207,
"eval_runtime": 20.6227,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 100
},
{
"epoch": 0.6516129032258065,
"grad_norm": 0.10884646326303482,
"learning_rate": 2.603092783505155e-05,
"loss": 0.1082,
"step": 101
},
{
"epoch": 0.6580645161290323,
"grad_norm": 0.12365655601024628,
"learning_rate": 2.6288659793814435e-05,
"loss": 0.1079,
"step": 102
},
{
"epoch": 0.6645161290322581,
"grad_norm": 0.12333594262599945,
"learning_rate": 2.654639175257732e-05,
"loss": 0.0987,
"step": 103
},
{
"epoch": 0.6709677419354839,
"grad_norm": 0.12649482488632202,
"learning_rate": 2.6804123711340206e-05,
"loss": 0.1008,
"step": 104
},
{
"epoch": 0.6774193548387096,
"grad_norm": 0.12309876084327698,
"learning_rate": 2.7061855670103092e-05,
"loss": 0.0865,
"step": 105
},
{
"epoch": 0.6838709677419355,
"grad_norm": 0.11590270698070526,
"learning_rate": 2.7319587628865977e-05,
"loss": 0.0943,
"step": 106
},
{
"epoch": 0.6903225806451613,
"grad_norm": 0.13238589465618134,
"learning_rate": 2.757731958762887e-05,
"loss": 0.1092,
"step": 107
},
{
"epoch": 0.6967741935483871,
"grad_norm": 0.10838130116462708,
"learning_rate": 2.7835051546391755e-05,
"loss": 0.0972,
"step": 108
},
{
"epoch": 0.7032258064516129,
"grad_norm": 0.11450201272964478,
"learning_rate": 2.809278350515464e-05,
"loss": 0.0886,
"step": 109
},
{
"epoch": 0.7096774193548387,
"grad_norm": 0.11476748436689377,
"learning_rate": 2.8350515463917526e-05,
"loss": 0.0934,
"step": 110
},
{
"epoch": 0.7161290322580646,
"grad_norm": 0.1416608691215515,
"learning_rate": 2.860824742268041e-05,
"loss": 0.1044,
"step": 111
},
{
"epoch": 0.7225806451612903,
"grad_norm": 0.11946123838424683,
"learning_rate": 2.8865979381443297e-05,
"loss": 0.1001,
"step": 112
},
{
"epoch": 0.7290322580645161,
"grad_norm": 0.10268019139766693,
"learning_rate": 2.912371134020619e-05,
"loss": 0.0879,
"step": 113
},
{
"epoch": 0.7354838709677419,
"grad_norm": 0.12969037890434265,
"learning_rate": 2.9381443298969075e-05,
"loss": 0.1046,
"step": 114
},
{
"epoch": 0.7419354838709677,
"grad_norm": 0.112672358751297,
"learning_rate": 2.963917525773196e-05,
"loss": 0.0979,
"step": 115
},
{
"epoch": 0.7483870967741936,
"grad_norm": 0.11950547248125076,
"learning_rate": 2.9896907216494846e-05,
"loss": 0.0983,
"step": 116
},
{
"epoch": 0.7548387096774194,
"grad_norm": 0.11480306833982468,
"learning_rate": 3.015463917525773e-05,
"loss": 0.0772,
"step": 117
},
{
"epoch": 0.7612903225806451,
"grad_norm": 0.11258739978075027,
"learning_rate": 3.0412371134020617e-05,
"loss": 0.0855,
"step": 118
},
{
"epoch": 0.7677419354838709,
"grad_norm": 0.11581332236528397,
"learning_rate": 3.0670103092783506e-05,
"loss": 0.0864,
"step": 119
},
{
"epoch": 0.7741935483870968,
"grad_norm": 0.13305814564228058,
"learning_rate": 3.0927835051546395e-05,
"loss": 0.0917,
"step": 120
},
{
"epoch": 0.7806451612903226,
"grad_norm": 0.11415888369083405,
"learning_rate": 3.118556701030928e-05,
"loss": 0.0948,
"step": 121
},
{
"epoch": 0.7870967741935484,
"grad_norm": 0.12214888632297516,
"learning_rate": 3.1443298969072166e-05,
"loss": 0.0931,
"step": 122
},
{
"epoch": 0.7935483870967742,
"grad_norm": 0.13365623354911804,
"learning_rate": 3.1701030927835054e-05,
"loss": 0.0816,
"step": 123
},
{
"epoch": 0.8,
"grad_norm": 0.12570084631443024,
"learning_rate": 3.1958762886597937e-05,
"loss": 0.0859,
"step": 124
},
{
"epoch": 0.8064516129032258,
"grad_norm": 0.11142268776893616,
"learning_rate": 3.2216494845360825e-05,
"loss": 0.092,
"step": 125
},
{
"epoch": 0.8064516129032258,
"eval_loss": 0.09087119251489639,
"eval_runtime": 20.6603,
"eval_samples_per_second": 4.84,
"eval_steps_per_second": 0.097,
"step": 125
},
{
"epoch": 0.8129032258064516,
"grad_norm": 0.13126152753829956,
"learning_rate": 3.2474226804123714e-05,
"loss": 0.085,
"step": 126
},
{
"epoch": 0.8193548387096774,
"grad_norm": 0.12571650743484497,
"learning_rate": 3.2731958762886596e-05,
"loss": 0.092,
"step": 127
},
{
"epoch": 0.8258064516129032,
"grad_norm": 0.11935378611087799,
"learning_rate": 3.2989690721649485e-05,
"loss": 0.0923,
"step": 128
},
{
"epoch": 0.832258064516129,
"grad_norm": 0.1310584396123886,
"learning_rate": 3.3247422680412374e-05,
"loss": 0.0854,
"step": 129
},
{
"epoch": 0.8387096774193549,
"grad_norm": 0.12715069949626923,
"learning_rate": 3.3505154639175256e-05,
"loss": 0.0917,
"step": 130
},
{
"epoch": 0.8451612903225807,
"grad_norm": 0.13257332146167755,
"learning_rate": 3.3762886597938145e-05,
"loss": 0.083,
"step": 131
},
{
"epoch": 0.8516129032258064,
"grad_norm": 0.11446800082921982,
"learning_rate": 3.4020618556701034e-05,
"loss": 0.0901,
"step": 132
},
{
"epoch": 0.8580645161290322,
"grad_norm": 0.10843442380428314,
"learning_rate": 3.4278350515463916e-05,
"loss": 0.0892,
"step": 133
},
{
"epoch": 0.864516129032258,
"grad_norm": 0.13865868747234344,
"learning_rate": 3.4536082474226805e-05,
"loss": 0.0843,
"step": 134
},
{
"epoch": 0.8709677419354839,
"grad_norm": 0.11984317004680634,
"learning_rate": 3.4793814432989694e-05,
"loss": 0.0909,
"step": 135
},
{
"epoch": 0.8774193548387097,
"grad_norm": 0.12147698551416397,
"learning_rate": 3.5051546391752576e-05,
"loss": 0.0818,
"step": 136
},
{
"epoch": 0.8838709677419355,
"grad_norm": 0.14456403255462646,
"learning_rate": 3.5309278350515465e-05,
"loss": 0.0829,
"step": 137
},
{
"epoch": 0.8903225806451613,
"grad_norm": 0.11969202756881714,
"learning_rate": 3.5567010309278354e-05,
"loss": 0.0846,
"step": 138
},
{
"epoch": 0.896774193548387,
"grad_norm": 0.12115851789712906,
"learning_rate": 3.5824742268041236e-05,
"loss": 0.0736,
"step": 139
},
{
"epoch": 0.9032258064516129,
"grad_norm": 0.1291874796152115,
"learning_rate": 3.6082474226804125e-05,
"loss": 0.0888,
"step": 140
},
{
"epoch": 0.9096774193548387,
"grad_norm": 0.11851019412279129,
"learning_rate": 3.6340206185567014e-05,
"loss": 0.0847,
"step": 141
},
{
"epoch": 0.9161290322580645,
"grad_norm": 0.12146991491317749,
"learning_rate": 3.6597938144329896e-05,
"loss": 0.0813,
"step": 142
},
{
"epoch": 0.9225806451612903,
"grad_norm": 1.0139228105545044,
"learning_rate": 3.6855670103092785e-05,
"loss": 0.1059,
"step": 143
},
{
"epoch": 0.9290322580645162,
"grad_norm": 0.21279872953891754,
"learning_rate": 3.7113402061855674e-05,
"loss": 0.0966,
"step": 144
},
{
"epoch": 0.9354838709677419,
"grad_norm": 0.16659015417099,
"learning_rate": 3.7371134020618556e-05,
"loss": 0.0963,
"step": 145
},
{
"epoch": 0.9419354838709677,
"grad_norm": 0.1789676696062088,
"learning_rate": 3.7628865979381445e-05,
"loss": 0.099,
"step": 146
},
{
"epoch": 0.9483870967741935,
"grad_norm": 0.1633814126253128,
"learning_rate": 3.7886597938144333e-05,
"loss": 0.0958,
"step": 147
},
{
"epoch": 0.9548387096774194,
"grad_norm": 0.1775261014699936,
"learning_rate": 3.8144329896907216e-05,
"loss": 0.1017,
"step": 148
},
{
"epoch": 0.9612903225806452,
"grad_norm": 0.11921869218349457,
"learning_rate": 3.8402061855670104e-05,
"loss": 0.0945,
"step": 149
},
{
"epoch": 0.967741935483871,
"grad_norm": 0.14354467391967773,
"learning_rate": 3.865979381443299e-05,
"loss": 0.1077,
"step": 150
},
{
"epoch": 0.967741935483871,
"eval_loss": 0.08996601402759552,
"eval_runtime": 20.6312,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 150
},
{
"epoch": 0.9741935483870968,
"grad_norm": 0.13735030591487885,
"learning_rate": 3.8917525773195875e-05,
"loss": 0.099,
"step": 151
},
{
"epoch": 0.9806451612903225,
"grad_norm": 0.10479952394962311,
"learning_rate": 3.9175257731958764e-05,
"loss": 0.0775,
"step": 152
},
{
"epoch": 0.9870967741935484,
"grad_norm": 0.1122341901063919,
"learning_rate": 3.943298969072165e-05,
"loss": 0.0723,
"step": 153
},
{
"epoch": 0.9935483870967742,
"grad_norm": 0.1230883076786995,
"learning_rate": 3.9690721649484535e-05,
"loss": 0.0721,
"step": 154
},
{
"epoch": 1.0,
"grad_norm": 0.14200343191623688,
"learning_rate": 3.9948453608247424e-05,
"loss": 0.0826,
"step": 155
},
{
"epoch": 1.0064516129032257,
"grad_norm": 0.1111890971660614,
"learning_rate": 4.020618556701031e-05,
"loss": 0.0801,
"step": 156
},
{
"epoch": 1.0129032258064516,
"grad_norm": 0.12783704698085785,
"learning_rate": 4.0463917525773195e-05,
"loss": 0.0947,
"step": 157
},
{
"epoch": 1.0193548387096774,
"grad_norm": 0.12492270022630692,
"learning_rate": 4.0721649484536084e-05,
"loss": 0.0865,
"step": 158
},
{
"epoch": 1.0258064516129033,
"grad_norm": 0.1070851981639862,
"learning_rate": 4.097938144329897e-05,
"loss": 0.0748,
"step": 159
},
{
"epoch": 1.032258064516129,
"grad_norm": 0.1142066940665245,
"learning_rate": 4.1237113402061855e-05,
"loss": 0.0773,
"step": 160
},
{
"epoch": 1.038709677419355,
"grad_norm": 0.13485798239707947,
"learning_rate": 4.1494845360824744e-05,
"loss": 0.0886,
"step": 161
},
{
"epoch": 1.0451612903225806,
"grad_norm": 0.11550623178482056,
"learning_rate": 4.175257731958763e-05,
"loss": 0.0795,
"step": 162
},
{
"epoch": 1.0516129032258064,
"grad_norm": 0.1292477399110794,
"learning_rate": 4.2010309278350515e-05,
"loss": 0.0721,
"step": 163
},
{
"epoch": 1.0580645161290323,
"grad_norm": 0.1202891618013382,
"learning_rate": 4.2268041237113404e-05,
"loss": 0.0713,
"step": 164
},
{
"epoch": 1.064516129032258,
"grad_norm": 0.1149148941040039,
"learning_rate": 4.252577319587629e-05,
"loss": 0.0776,
"step": 165
},
{
"epoch": 1.070967741935484,
"grad_norm": 0.12351440638303757,
"learning_rate": 4.2783505154639175e-05,
"loss": 0.0802,
"step": 166
},
{
"epoch": 1.0774193548387097,
"grad_norm": 0.1218574196100235,
"learning_rate": 4.3041237113402064e-05,
"loss": 0.0791,
"step": 167
},
{
"epoch": 1.0838709677419356,
"grad_norm": 0.1170034110546112,
"learning_rate": 4.329896907216495e-05,
"loss": 0.0834,
"step": 168
},
{
"epoch": 1.0903225806451613,
"grad_norm": 0.10807105898857117,
"learning_rate": 4.3556701030927835e-05,
"loss": 0.074,
"step": 169
},
{
"epoch": 1.096774193548387,
"grad_norm": 0.14144091308116913,
"learning_rate": 4.3814432989690723e-05,
"loss": 0.0867,
"step": 170
},
{
"epoch": 1.103225806451613,
"grad_norm": 0.11608368158340454,
"learning_rate": 4.407216494845361e-05,
"loss": 0.0702,
"step": 171
},
{
"epoch": 1.1096774193548387,
"grad_norm": 0.11372846364974976,
"learning_rate": 4.4329896907216494e-05,
"loss": 0.0768,
"step": 172
},
{
"epoch": 1.1161290322580646,
"grad_norm": 0.13371770083904266,
"learning_rate": 4.458762886597938e-05,
"loss": 0.0754,
"step": 173
},
{
"epoch": 1.1225806451612903,
"grad_norm": 0.10914810746908188,
"learning_rate": 4.484536082474227e-05,
"loss": 0.0781,
"step": 174
},
{
"epoch": 1.129032258064516,
"grad_norm": 0.1071656197309494,
"learning_rate": 4.5103092783505154e-05,
"loss": 0.0688,
"step": 175
},
{
"epoch": 1.129032258064516,
"eval_loss": 0.07778502255678177,
"eval_runtime": 20.6276,
"eval_samples_per_second": 4.848,
"eval_steps_per_second": 0.097,
"step": 175
},
{
"epoch": 1.135483870967742,
"grad_norm": 0.11929682642221451,
"learning_rate": 4.536082474226804e-05,
"loss": 0.0865,
"step": 176
},
{
"epoch": 1.1419354838709677,
"grad_norm": 0.11669743806123734,
"learning_rate": 4.561855670103093e-05,
"loss": 0.0656,
"step": 177
},
{
"epoch": 1.1483870967741936,
"grad_norm": 0.11047486960887909,
"learning_rate": 4.5876288659793814e-05,
"loss": 0.0745,
"step": 178
},
{
"epoch": 1.1548387096774193,
"grad_norm": 0.10313151776790619,
"learning_rate": 4.61340206185567e-05,
"loss": 0.0695,
"step": 179
},
{
"epoch": 1.1612903225806452,
"grad_norm": 0.10674114525318146,
"learning_rate": 4.639175257731959e-05,
"loss": 0.0802,
"step": 180
},
{
"epoch": 1.167741935483871,
"grad_norm": 0.12059634923934937,
"learning_rate": 4.6649484536082474e-05,
"loss": 0.0733,
"step": 181
},
{
"epoch": 1.1741935483870969,
"grad_norm": 0.13753844797611237,
"learning_rate": 4.690721649484536e-05,
"loss": 0.0778,
"step": 182
},
{
"epoch": 1.1806451612903226,
"grad_norm": 0.12316151708364487,
"learning_rate": 4.716494845360825e-05,
"loss": 0.0732,
"step": 183
},
{
"epoch": 1.1870967741935483,
"grad_norm": 0.1259049028158188,
"learning_rate": 4.7422680412371134e-05,
"loss": 0.0768,
"step": 184
},
{
"epoch": 1.1935483870967742,
"grad_norm": 0.12398969382047653,
"learning_rate": 4.768041237113402e-05,
"loss": 0.0784,
"step": 185
},
{
"epoch": 1.2,
"grad_norm": 0.11158449947834015,
"learning_rate": 4.793814432989691e-05,
"loss": 0.063,
"step": 186
},
{
"epoch": 1.206451612903226,
"grad_norm": 0.1099303588271141,
"learning_rate": 4.8195876288659794e-05,
"loss": 0.0715,
"step": 187
},
{
"epoch": 1.2129032258064516,
"grad_norm": 0.12735599279403687,
"learning_rate": 4.845360824742268e-05,
"loss": 0.0727,
"step": 188
},
{
"epoch": 1.2193548387096773,
"grad_norm": 0.12002066522836685,
"learning_rate": 4.871134020618557e-05,
"loss": 0.0662,
"step": 189
},
{
"epoch": 1.2258064516129032,
"grad_norm": 0.11072317510843277,
"learning_rate": 4.8969072164948454e-05,
"loss": 0.0632,
"step": 190
},
{
"epoch": 1.232258064516129,
"grad_norm": 0.11942410469055176,
"learning_rate": 4.922680412371134e-05,
"loss": 0.0787,
"step": 191
},
{
"epoch": 1.238709677419355,
"grad_norm": 0.12091650813817978,
"learning_rate": 4.948453608247423e-05,
"loss": 0.068,
"step": 192
},
{
"epoch": 1.2451612903225806,
"grad_norm": 0.12011866271495819,
"learning_rate": 4.9742268041237114e-05,
"loss": 0.0718,
"step": 193
},
{
"epoch": 1.2516129032258063,
"grad_norm": 0.1052054762840271,
"learning_rate": 5e-05,
"loss": 0.0684,
"step": 194
},
{
"epoch": 1.2580645161290323,
"grad_norm": 0.14064306020736694,
"learning_rate": 5.025773195876289e-05,
"loss": 0.1004,
"step": 195
},
{
"epoch": 1.2645161290322582,
"grad_norm": 0.11315371841192245,
"learning_rate": 5.051546391752577e-05,
"loss": 0.0723,
"step": 196
},
{
"epoch": 1.270967741935484,
"grad_norm": 0.10275565087795258,
"learning_rate": 5.077319587628866e-05,
"loss": 0.0624,
"step": 197
},
{
"epoch": 1.2774193548387096,
"grad_norm": 0.10500184446573257,
"learning_rate": 5.1030927835051544e-05,
"loss": 0.0654,
"step": 198
},
{
"epoch": 1.2838709677419355,
"grad_norm": 0.11868240684270859,
"learning_rate": 5.128865979381443e-05,
"loss": 0.0682,
"step": 199
},
{
"epoch": 1.2903225806451613,
"grad_norm": 0.12519873678684235,
"learning_rate": 5.1546391752577315e-05,
"loss": 0.0682,
"step": 200
},
{
"epoch": 1.2903225806451613,
"eval_loss": 0.07227456569671631,
"eval_runtime": 21.4152,
"eval_samples_per_second": 4.67,
"eval_steps_per_second": 0.093,
"step": 200
},
{
"epoch": 1.2967741935483872,
"grad_norm": 0.11801688373088837,
"learning_rate": 5.180412371134021e-05,
"loss": 0.0661,
"step": 201
},
{
"epoch": 1.303225806451613,
"grad_norm": 0.12295070290565491,
"learning_rate": 5.20618556701031e-05,
"loss": 0.0572,
"step": 202
},
{
"epoch": 1.3096774193548386,
"grad_norm": 0.11511899530887604,
"learning_rate": 5.231958762886598e-05,
"loss": 0.0687,
"step": 203
},
{
"epoch": 1.3161290322580645,
"grad_norm": 0.10693535953760147,
"learning_rate": 5.257731958762887e-05,
"loss": 0.0606,
"step": 204
},
{
"epoch": 1.3225806451612903,
"grad_norm": 0.11811978369951248,
"learning_rate": 5.283505154639175e-05,
"loss": 0.0692,
"step": 205
},
{
"epoch": 1.3290322580645162,
"grad_norm": 0.11573677510023117,
"learning_rate": 5.309278350515464e-05,
"loss": 0.0778,
"step": 206
},
{
"epoch": 1.335483870967742,
"grad_norm": 0.1057257205247879,
"learning_rate": 5.335051546391753e-05,
"loss": 0.0537,
"step": 207
},
{
"epoch": 1.3419354838709676,
"grad_norm": 0.11804960668087006,
"learning_rate": 5.360824742268041e-05,
"loss": 0.0637,
"step": 208
},
{
"epoch": 1.3483870967741935,
"grad_norm": 0.10735280811786652,
"learning_rate": 5.38659793814433e-05,
"loss": 0.0694,
"step": 209
},
{
"epoch": 1.3548387096774195,
"grad_norm": 0.10669250041246414,
"learning_rate": 5.4123711340206184e-05,
"loss": 0.0581,
"step": 210
},
{
"epoch": 1.3612903225806452,
"grad_norm": 0.12118824571371078,
"learning_rate": 5.438144329896907e-05,
"loss": 0.0711,
"step": 211
},
{
"epoch": 1.367741935483871,
"grad_norm": 0.10448060184717178,
"learning_rate": 5.4639175257731955e-05,
"loss": 0.0641,
"step": 212
},
{
"epoch": 1.3741935483870968,
"grad_norm": 0.11256902664899826,
"learning_rate": 5.489690721649485e-05,
"loss": 0.0617,
"step": 213
},
{
"epoch": 1.3806451612903226,
"grad_norm": 0.12368633598089218,
"learning_rate": 5.515463917525774e-05,
"loss": 0.0698,
"step": 214
},
{
"epoch": 1.3870967741935485,
"grad_norm": 0.1177687793970108,
"learning_rate": 5.541237113402062e-05,
"loss": 0.0748,
"step": 215
},
{
"epoch": 1.3935483870967742,
"grad_norm": 0.11173977702856064,
"learning_rate": 5.567010309278351e-05,
"loss": 0.058,
"step": 216
},
{
"epoch": 1.4,
"grad_norm": 0.10333617776632309,
"learning_rate": 5.592783505154639e-05,
"loss": 0.0544,
"step": 217
},
{
"epoch": 1.4064516129032258,
"grad_norm": 0.12021996080875397,
"learning_rate": 5.618556701030928e-05,
"loss": 0.0654,
"step": 218
},
{
"epoch": 1.4129032258064516,
"grad_norm": 0.10966646671295166,
"learning_rate": 5.644329896907217e-05,
"loss": 0.0654,
"step": 219
},
{
"epoch": 1.4193548387096775,
"grad_norm": 0.1111980453133583,
"learning_rate": 5.670103092783505e-05,
"loss": 0.0683,
"step": 220
},
{
"epoch": 1.4258064516129032,
"grad_norm": 0.10386307537555695,
"learning_rate": 5.695876288659794e-05,
"loss": 0.0514,
"step": 221
},
{
"epoch": 1.432258064516129,
"grad_norm": 0.12473642826080322,
"learning_rate": 5.721649484536082e-05,
"loss": 0.0728,
"step": 222
},
{
"epoch": 1.4387096774193548,
"grad_norm": 0.12365364283323288,
"learning_rate": 5.747422680412371e-05,
"loss": 0.0719,
"step": 223
},
{
"epoch": 1.4451612903225808,
"grad_norm": 0.124308742582798,
"learning_rate": 5.7731958762886594e-05,
"loss": 0.0668,
"step": 224
},
{
"epoch": 1.4516129032258065,
"grad_norm": 0.10575917363166809,
"learning_rate": 5.798969072164949e-05,
"loss": 0.0621,
"step": 225
},
{
"epoch": 1.4516129032258065,
"eval_loss": 0.06675011664628983,
"eval_runtime": 20.6214,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 225
},
{
"epoch": 1.4580645161290322,
"grad_norm": 0.11430371552705765,
"learning_rate": 5.824742268041238e-05,
"loss": 0.0544,
"step": 226
},
{
"epoch": 1.4645161290322581,
"grad_norm": 0.11742933839559555,
"learning_rate": 5.850515463917526e-05,
"loss": 0.0628,
"step": 227
},
{
"epoch": 1.4709677419354839,
"grad_norm": 0.11245157569646835,
"learning_rate": 5.876288659793815e-05,
"loss": 0.0601,
"step": 228
},
{
"epoch": 1.4774193548387098,
"grad_norm": 0.11818389594554901,
"learning_rate": 5.902061855670103e-05,
"loss": 0.061,
"step": 229
},
{
"epoch": 1.4838709677419355,
"grad_norm": 0.11650433391332626,
"learning_rate": 5.927835051546392e-05,
"loss": 0.0618,
"step": 230
},
{
"epoch": 1.4903225806451612,
"grad_norm": 0.12074518948793411,
"learning_rate": 5.953608247422681e-05,
"loss": 0.0638,
"step": 231
},
{
"epoch": 1.4967741935483871,
"grad_norm": 0.10723990947008133,
"learning_rate": 5.979381443298969e-05,
"loss": 0.0645,
"step": 232
},
{
"epoch": 1.5032258064516129,
"grad_norm": 0.11135580390691757,
"learning_rate": 6.005154639175258e-05,
"loss": 0.0623,
"step": 233
},
{
"epoch": 1.5096774193548388,
"grad_norm": 0.12259957939386368,
"learning_rate": 6.030927835051546e-05,
"loss": 0.0735,
"step": 234
},
{
"epoch": 1.5161290322580645,
"grad_norm": 0.09547890722751617,
"learning_rate": 6.056701030927835e-05,
"loss": 0.0564,
"step": 235
},
{
"epoch": 1.5225806451612902,
"grad_norm": 0.10515465587377548,
"learning_rate": 6.0824742268041234e-05,
"loss": 0.062,
"step": 236
},
{
"epoch": 1.5290322580645161,
"grad_norm": 0.1125471293926239,
"learning_rate": 6.108247422680413e-05,
"loss": 0.0563,
"step": 237
},
{
"epoch": 1.535483870967742,
"grad_norm": 0.11628098785877228,
"learning_rate": 6.134020618556701e-05,
"loss": 0.0649,
"step": 238
},
{
"epoch": 1.5419354838709678,
"grad_norm": 0.1112600564956665,
"learning_rate": 6.159793814432991e-05,
"loss": 0.0539,
"step": 239
},
{
"epoch": 1.5483870967741935,
"grad_norm": 0.13200533390045166,
"learning_rate": 6.185567010309279e-05,
"loss": 0.0713,
"step": 240
},
{
"epoch": 1.5548387096774192,
"grad_norm": 0.11476446688175201,
"learning_rate": 6.211340206185567e-05,
"loss": 0.0626,
"step": 241
},
{
"epoch": 1.5612903225806452,
"grad_norm": 0.13394683599472046,
"learning_rate": 6.237113402061855e-05,
"loss": 0.0672,
"step": 242
},
{
"epoch": 1.567741935483871,
"grad_norm": 0.1322658658027649,
"learning_rate": 6.262886597938145e-05,
"loss": 0.0687,
"step": 243
},
{
"epoch": 1.5741935483870968,
"grad_norm": 0.10362022370100021,
"learning_rate": 6.288659793814433e-05,
"loss": 0.057,
"step": 244
},
{
"epoch": 1.5806451612903225,
"grad_norm": 0.1106092780828476,
"learning_rate": 6.314432989690721e-05,
"loss": 0.0541,
"step": 245
},
{
"epoch": 1.5870967741935482,
"grad_norm": 0.12139949202537537,
"learning_rate": 6.340206185567011e-05,
"loss": 0.0641,
"step": 246
},
{
"epoch": 1.5935483870967742,
"grad_norm": 0.09728733450174332,
"learning_rate": 6.365979381443299e-05,
"loss": 0.0491,
"step": 247
},
{
"epoch": 1.6,
"grad_norm": 0.10788699984550476,
"learning_rate": 6.391752577319587e-05,
"loss": 0.0545,
"step": 248
},
{
"epoch": 1.6064516129032258,
"grad_norm": 0.11605466157197952,
"learning_rate": 6.417525773195877e-05,
"loss": 0.0528,
"step": 249
},
{
"epoch": 1.6129032258064515,
"grad_norm": 0.10914571583271027,
"learning_rate": 6.443298969072165e-05,
"loss": 0.0668,
"step": 250
},
{
"epoch": 1.6129032258064515,
"eval_loss": 0.06458242982625961,
"eval_runtime": 20.6082,
"eval_samples_per_second": 4.852,
"eval_steps_per_second": 0.097,
"step": 250
},
{
"epoch": 1.6193548387096774,
"grad_norm": 0.10836615413427353,
"learning_rate": 6.469072164948455e-05,
"loss": 0.0647,
"step": 251
},
{
"epoch": 1.6258064516129034,
"grad_norm": 0.09761244803667068,
"learning_rate": 6.494845360824743e-05,
"loss": 0.0545,
"step": 252
},
{
"epoch": 1.632258064516129,
"grad_norm": 0.10434439033269882,
"learning_rate": 6.520618556701031e-05,
"loss": 0.058,
"step": 253
},
{
"epoch": 1.6387096774193548,
"grad_norm": 0.10498231649398804,
"learning_rate": 6.546391752577319e-05,
"loss": 0.0613,
"step": 254
},
{
"epoch": 1.6451612903225805,
"grad_norm": 0.09180215001106262,
"learning_rate": 6.572164948453609e-05,
"loss": 0.0499,
"step": 255
},
{
"epoch": 1.6516129032258065,
"grad_norm": 0.10550273954868317,
"learning_rate": 6.597938144329897e-05,
"loss": 0.0546,
"step": 256
},
{
"epoch": 1.6580645161290324,
"grad_norm": 0.11006518453359604,
"learning_rate": 6.623711340206185e-05,
"loss": 0.0573,
"step": 257
},
{
"epoch": 1.664516129032258,
"grad_norm": 0.09517877548933029,
"learning_rate": 6.649484536082475e-05,
"loss": 0.0571,
"step": 258
},
{
"epoch": 1.6709677419354838,
"grad_norm": 0.10255111753940582,
"learning_rate": 6.675257731958763e-05,
"loss": 0.067,
"step": 259
},
{
"epoch": 1.6774193548387095,
"grad_norm": 0.10252523422241211,
"learning_rate": 6.701030927835051e-05,
"loss": 0.0547,
"step": 260
},
{
"epoch": 1.6838709677419355,
"grad_norm": 0.10796947032213211,
"learning_rate": 6.726804123711341e-05,
"loss": 0.0555,
"step": 261
},
{
"epoch": 1.6903225806451614,
"grad_norm": 0.09685982763767242,
"learning_rate": 6.752577319587629e-05,
"loss": 0.056,
"step": 262
},
{
"epoch": 1.696774193548387,
"grad_norm": 0.10517261922359467,
"learning_rate": 6.778350515463919e-05,
"loss": 0.0721,
"step": 263
},
{
"epoch": 1.7032258064516128,
"grad_norm": 0.10260171443223953,
"learning_rate": 6.804123711340207e-05,
"loss": 0.063,
"step": 264
},
{
"epoch": 1.7096774193548387,
"grad_norm": 0.11811640858650208,
"learning_rate": 6.829896907216495e-05,
"loss": 0.0695,
"step": 265
},
{
"epoch": 1.7161290322580647,
"grad_norm": 0.11343566328287125,
"learning_rate": 6.855670103092783e-05,
"loss": 0.0644,
"step": 266
},
{
"epoch": 1.7225806451612904,
"grad_norm": 0.10600105673074722,
"learning_rate": 6.881443298969073e-05,
"loss": 0.058,
"step": 267
},
{
"epoch": 1.729032258064516,
"grad_norm": 0.11247742176055908,
"learning_rate": 6.907216494845361e-05,
"loss": 0.0585,
"step": 268
},
{
"epoch": 1.7354838709677418,
"grad_norm": 0.10289078205823898,
"learning_rate": 6.932989690721649e-05,
"loss": 0.06,
"step": 269
},
{
"epoch": 1.7419354838709677,
"grad_norm": 0.11505839973688126,
"learning_rate": 6.958762886597939e-05,
"loss": 0.0622,
"step": 270
},
{
"epoch": 1.7483870967741937,
"grad_norm": 0.10896378755569458,
"learning_rate": 6.984536082474227e-05,
"loss": 0.0564,
"step": 271
},
{
"epoch": 1.7548387096774194,
"grad_norm": 0.10462034493684769,
"learning_rate": 7.010309278350515e-05,
"loss": 0.0478,
"step": 272
},
{
"epoch": 1.761290322580645,
"grad_norm": 0.0960521250963211,
"learning_rate": 7.036082474226805e-05,
"loss": 0.0501,
"step": 273
},
{
"epoch": 1.7677419354838708,
"grad_norm": 0.113226018846035,
"learning_rate": 7.061855670103093e-05,
"loss": 0.0487,
"step": 274
},
{
"epoch": 1.7741935483870968,
"grad_norm": 0.12256605923175812,
"learning_rate": 7.087628865979383e-05,
"loss": 0.0672,
"step": 275
},
{
"epoch": 1.7741935483870968,
"eval_loss": 0.0586564727127552,
"eval_runtime": 20.6122,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.097,
"step": 275
},
{
"epoch": 1.7806451612903227,
"grad_norm": 0.11341353505849838,
"learning_rate": 7.113402061855671e-05,
"loss": 0.0605,
"step": 276
},
{
"epoch": 1.7870967741935484,
"grad_norm": 0.10689063370227814,
"learning_rate": 7.139175257731959e-05,
"loss": 0.0557,
"step": 277
},
{
"epoch": 1.793548387096774,
"grad_norm": 0.10520404577255249,
"learning_rate": 7.164948453608247e-05,
"loss": 0.0587,
"step": 278
},
{
"epoch": 1.8,
"grad_norm": 0.12727777659893036,
"learning_rate": 7.190721649484537e-05,
"loss": 0.0581,
"step": 279
},
{
"epoch": 1.8064516129032258,
"grad_norm": 0.11627282202243805,
"learning_rate": 7.216494845360825e-05,
"loss": 0.0561,
"step": 280
},
{
"epoch": 1.8129032258064517,
"grad_norm": 0.09967481344938278,
"learning_rate": 7.242268041237113e-05,
"loss": 0.0489,
"step": 281
},
{
"epoch": 1.8193548387096774,
"grad_norm": 0.09930626302957535,
"learning_rate": 7.268041237113403e-05,
"loss": 0.054,
"step": 282
},
{
"epoch": 1.8258064516129031,
"grad_norm": 0.09552939981222153,
"learning_rate": 7.293814432989691e-05,
"loss": 0.0555,
"step": 283
},
{
"epoch": 1.832258064516129,
"grad_norm": 0.11425229907035828,
"learning_rate": 7.319587628865979e-05,
"loss": 0.0636,
"step": 284
},
{
"epoch": 1.838709677419355,
"grad_norm": 0.1075424998998642,
"learning_rate": 7.345360824742269e-05,
"loss": 0.0533,
"step": 285
},
{
"epoch": 1.8451612903225807,
"grad_norm": 0.09101930260658264,
"learning_rate": 7.371134020618557e-05,
"loss": 0.0393,
"step": 286
},
{
"epoch": 1.8516129032258064,
"grad_norm": 0.11300979554653168,
"learning_rate": 7.396907216494847e-05,
"loss": 0.0512,
"step": 287
},
{
"epoch": 1.8580645161290321,
"grad_norm": 0.12898088991641998,
"learning_rate": 7.422680412371135e-05,
"loss": 0.0657,
"step": 288
},
{
"epoch": 1.864516129032258,
"grad_norm": 0.10775969177484512,
"learning_rate": 7.448453608247423e-05,
"loss": 0.0633,
"step": 289
},
{
"epoch": 1.870967741935484,
"grad_norm": 0.12368246912956238,
"learning_rate": 7.474226804123711e-05,
"loss": 0.0639,
"step": 290
},
{
"epoch": 1.8774193548387097,
"grad_norm": 0.11313799023628235,
"learning_rate": 7.500000000000001e-05,
"loss": 0.059,
"step": 291
},
{
"epoch": 1.8838709677419354,
"grad_norm": 0.10701242834329605,
"learning_rate": 7.525773195876289e-05,
"loss": 0.0473,
"step": 292
},
{
"epoch": 1.8903225806451613,
"grad_norm": 0.09166759997606277,
"learning_rate": 7.551546391752577e-05,
"loss": 0.0438,
"step": 293
},
{
"epoch": 1.896774193548387,
"grad_norm": 0.1073215901851654,
"learning_rate": 7.577319587628867e-05,
"loss": 0.0618,
"step": 294
},
{
"epoch": 1.903225806451613,
"grad_norm": 0.11972848325967789,
"learning_rate": 7.603092783505155e-05,
"loss": 0.06,
"step": 295
},
{
"epoch": 1.9096774193548387,
"grad_norm": 0.10502435266971588,
"learning_rate": 7.628865979381443e-05,
"loss": 0.0517,
"step": 296
},
{
"epoch": 1.9161290322580644,
"grad_norm": 0.10517352819442749,
"learning_rate": 7.654639175257731e-05,
"loss": 0.0499,
"step": 297
},
{
"epoch": 1.9225806451612903,
"grad_norm": 0.08997687697410583,
"learning_rate": 7.680412371134021e-05,
"loss": 0.0426,
"step": 298
},
{
"epoch": 1.9290322580645163,
"grad_norm": 0.10763172805309296,
"learning_rate": 7.70618556701031e-05,
"loss": 0.0624,
"step": 299
},
{
"epoch": 1.935483870967742,
"grad_norm": 0.09808403253555298,
"learning_rate": 7.731958762886599e-05,
"loss": 0.0484,
"step": 300
},
{
"epoch": 1.935483870967742,
"eval_loss": 0.05441552773118019,
"eval_runtime": 20.6185,
"eval_samples_per_second": 4.85,
"eval_steps_per_second": 0.097,
"step": 300
},
{
"epoch": 1.9419354838709677,
"grad_norm": 0.09616626799106598,
"learning_rate": 7.757731958762887e-05,
"loss": 0.0454,
"step": 301
},
{
"epoch": 1.9483870967741934,
"grad_norm": 0.12124036252498627,
"learning_rate": 7.783505154639175e-05,
"loss": 0.0551,
"step": 302
},
{
"epoch": 1.9548387096774194,
"grad_norm": 0.09645543247461319,
"learning_rate": 7.809278350515465e-05,
"loss": 0.0484,
"step": 303
},
{
"epoch": 1.9612903225806453,
"grad_norm": 0.1068158969283104,
"learning_rate": 7.835051546391753e-05,
"loss": 0.0606,
"step": 304
},
{
"epoch": 1.967741935483871,
"grad_norm": 0.09896648675203323,
"learning_rate": 7.860824742268041e-05,
"loss": 0.0437,
"step": 305
},
{
"epoch": 1.9741935483870967,
"grad_norm": 0.11050526052713394,
"learning_rate": 7.88659793814433e-05,
"loss": 0.0593,
"step": 306
},
{
"epoch": 1.9806451612903224,
"grad_norm": 0.10204251855611801,
"learning_rate": 7.912371134020619e-05,
"loss": 0.061,
"step": 307
},
{
"epoch": 1.9870967741935484,
"grad_norm": 0.10084798187017441,
"learning_rate": 7.938144329896907e-05,
"loss": 0.0473,
"step": 308
},
{
"epoch": 1.9935483870967743,
"grad_norm": 0.1033916026353836,
"learning_rate": 7.963917525773195e-05,
"loss": 0.0455,
"step": 309
},
{
"epoch": 2.0,
"grad_norm": 0.16185474395751953,
"learning_rate": 7.989690721649485e-05,
"loss": 0.0757,
"step": 310
},
{
"epoch": 2.0064516129032257,
"grad_norm": 0.09380825608968735,
"learning_rate": 8.015463917525774e-05,
"loss": 0.0454,
"step": 311
},
{
"epoch": 2.0129032258064514,
"grad_norm": 0.08151062577962875,
"learning_rate": 8.041237113402063e-05,
"loss": 0.0312,
"step": 312
},
{
"epoch": 2.0193548387096776,
"grad_norm": 0.08947654813528061,
"learning_rate": 8.067010309278351e-05,
"loss": 0.0428,
"step": 313
},
{
"epoch": 2.0258064516129033,
"grad_norm": 0.10064241290092468,
"learning_rate": 8.092783505154639e-05,
"loss": 0.0375,
"step": 314
},
{
"epoch": 2.032258064516129,
"grad_norm": 0.10445793718099594,
"learning_rate": 8.118556701030929e-05,
"loss": 0.0437,
"step": 315
},
{
"epoch": 2.0387096774193547,
"grad_norm": 0.08970332890748978,
"learning_rate": 8.144329896907217e-05,
"loss": 0.0345,
"step": 316
},
{
"epoch": 2.0451612903225804,
"grad_norm": 0.12089475244283676,
"learning_rate": 8.170103092783505e-05,
"loss": 0.0477,
"step": 317
},
{
"epoch": 2.0516129032258066,
"grad_norm": 0.12545561790466309,
"learning_rate": 8.195876288659795e-05,
"loss": 0.0632,
"step": 318
},
{
"epoch": 2.0580645161290323,
"grad_norm": 0.15998150408267975,
"learning_rate": 8.221649484536083e-05,
"loss": 0.0478,
"step": 319
},
{
"epoch": 2.064516129032258,
"grad_norm": 0.09755123406648636,
"learning_rate": 8.247422680412371e-05,
"loss": 0.0395,
"step": 320
},
{
"epoch": 2.0709677419354837,
"grad_norm": 0.10251940786838531,
"learning_rate": 8.273195876288659e-05,
"loss": 0.0474,
"step": 321
},
{
"epoch": 2.07741935483871,
"grad_norm": 0.09739600867033005,
"learning_rate": 8.298969072164949e-05,
"loss": 0.0443,
"step": 322
},
{
"epoch": 2.0838709677419356,
"grad_norm": 0.11511006206274033,
"learning_rate": 8.324742268041238e-05,
"loss": 0.0458,
"step": 323
},
{
"epoch": 2.0903225806451613,
"grad_norm": 0.09681696444749832,
"learning_rate": 8.350515463917527e-05,
"loss": 0.0396,
"step": 324
},
{
"epoch": 2.096774193548387,
"grad_norm": 0.1090191900730133,
"learning_rate": 8.376288659793815e-05,
"loss": 0.0468,
"step": 325
},
{
"epoch": 2.096774193548387,
"eval_loss": 0.05155442655086517,
"eval_runtime": 20.6145,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.097,
"step": 325
},
{
"epoch": 2.1032258064516127,
"grad_norm": 0.10617301613092422,
"learning_rate": 8.402061855670103e-05,
"loss": 0.0437,
"step": 326
},
{
"epoch": 2.109677419354839,
"grad_norm": 0.11813998222351074,
"learning_rate": 8.427835051546393e-05,
"loss": 0.0458,
"step": 327
},
{
"epoch": 2.1161290322580646,
"grad_norm": 0.09822124242782593,
"learning_rate": 8.453608247422681e-05,
"loss": 0.0414,
"step": 328
},
{
"epoch": 2.1225806451612903,
"grad_norm": 0.0994827002286911,
"learning_rate": 8.479381443298969e-05,
"loss": 0.0505,
"step": 329
},
{
"epoch": 2.129032258064516,
"grad_norm": 0.10852402448654175,
"learning_rate": 8.505154639175259e-05,
"loss": 0.0401,
"step": 330
},
{
"epoch": 2.135483870967742,
"grad_norm": 0.0973113551735878,
"learning_rate": 8.530927835051547e-05,
"loss": 0.0468,
"step": 331
},
{
"epoch": 2.141935483870968,
"grad_norm": 0.10243872553110123,
"learning_rate": 8.556701030927835e-05,
"loss": 0.0457,
"step": 332
},
{
"epoch": 2.1483870967741936,
"grad_norm": 0.11962252110242844,
"learning_rate": 8.582474226804123e-05,
"loss": 0.0548,
"step": 333
},
{
"epoch": 2.1548387096774193,
"grad_norm": 0.10123623162508011,
"learning_rate": 8.608247422680413e-05,
"loss": 0.0369,
"step": 334
},
{
"epoch": 2.161290322580645,
"grad_norm": 0.09829887747764587,
"learning_rate": 8.634020618556702e-05,
"loss": 0.0372,
"step": 335
},
{
"epoch": 2.167741935483871,
"grad_norm": 0.09360591322183609,
"learning_rate": 8.65979381443299e-05,
"loss": 0.0479,
"step": 336
},
{
"epoch": 2.174193548387097,
"grad_norm": 0.10151084512472153,
"learning_rate": 8.685567010309279e-05,
"loss": 0.0367,
"step": 337
},
{
"epoch": 2.1806451612903226,
"grad_norm": 0.12664125859737396,
"learning_rate": 8.711340206185567e-05,
"loss": 0.043,
"step": 338
},
{
"epoch": 2.1870967741935483,
"grad_norm": 0.11126257479190826,
"learning_rate": 8.737113402061856e-05,
"loss": 0.0393,
"step": 339
},
{
"epoch": 2.193548387096774,
"grad_norm": 0.09164562076330185,
"learning_rate": 8.762886597938145e-05,
"loss": 0.0436,
"step": 340
},
{
"epoch": 2.2,
"grad_norm": 0.10741306096315384,
"learning_rate": 8.788659793814433e-05,
"loss": 0.0463,
"step": 341
},
{
"epoch": 2.206451612903226,
"grad_norm": 0.09051238000392914,
"learning_rate": 8.814432989690722e-05,
"loss": 0.043,
"step": 342
},
{
"epoch": 2.2129032258064516,
"grad_norm": 0.07722020149230957,
"learning_rate": 8.840206185567011e-05,
"loss": 0.0354,
"step": 343
},
{
"epoch": 2.2193548387096773,
"grad_norm": 0.08493324369192123,
"learning_rate": 8.865979381443299e-05,
"loss": 0.0525,
"step": 344
},
{
"epoch": 2.225806451612903,
"grad_norm": 0.09371798485517502,
"learning_rate": 8.891752577319587e-05,
"loss": 0.0424,
"step": 345
},
{
"epoch": 2.232258064516129,
"grad_norm": 0.10090696066617966,
"learning_rate": 8.917525773195877e-05,
"loss": 0.0469,
"step": 346
},
{
"epoch": 2.238709677419355,
"grad_norm": 0.08301045000553131,
"learning_rate": 8.943298969072166e-05,
"loss": 0.0404,
"step": 347
},
{
"epoch": 2.2451612903225806,
"grad_norm": 0.0968640148639679,
"learning_rate": 8.969072164948454e-05,
"loss": 0.0534,
"step": 348
},
{
"epoch": 2.2516129032258063,
"grad_norm": 0.10685165971517563,
"learning_rate": 8.994845360824743e-05,
"loss": 0.0391,
"step": 349
},
{
"epoch": 2.258064516129032,
"grad_norm": 0.09006287902593613,
"learning_rate": 9.020618556701031e-05,
"loss": 0.0438,
"step": 350
},
{
"epoch": 2.258064516129032,
"eval_loss": 0.05031716451048851,
"eval_runtime": 20.609,
"eval_samples_per_second": 4.852,
"eval_steps_per_second": 0.097,
"step": 350
},
{
"epoch": 2.264516129032258,
"grad_norm": 0.10811453312635422,
"learning_rate": 9.04639175257732e-05,
"loss": 0.0488,
"step": 351
},
{
"epoch": 2.270967741935484,
"grad_norm": 0.10823424160480499,
"learning_rate": 9.072164948453609e-05,
"loss": 0.0456,
"step": 352
},
{
"epoch": 2.2774193548387096,
"grad_norm": 0.08600781857967377,
"learning_rate": 9.097938144329897e-05,
"loss": 0.0398,
"step": 353
},
{
"epoch": 2.2838709677419353,
"grad_norm": 0.10172222554683685,
"learning_rate": 9.123711340206186e-05,
"loss": 0.0427,
"step": 354
},
{
"epoch": 2.2903225806451615,
"grad_norm": 0.09986208379268646,
"learning_rate": 9.149484536082475e-05,
"loss": 0.0387,
"step": 355
},
{
"epoch": 2.296774193548387,
"grad_norm": 0.10122333467006683,
"learning_rate": 9.175257731958763e-05,
"loss": 0.0442,
"step": 356
},
{
"epoch": 2.303225806451613,
"grad_norm": 0.09510695189237595,
"learning_rate": 9.201030927835051e-05,
"loss": 0.0447,
"step": 357
},
{
"epoch": 2.3096774193548386,
"grad_norm": 0.09774709492921829,
"learning_rate": 9.22680412371134e-05,
"loss": 0.0344,
"step": 358
},
{
"epoch": 2.3161290322580643,
"grad_norm": 0.0921277180314064,
"learning_rate": 9.25257731958763e-05,
"loss": 0.0411,
"step": 359
},
{
"epoch": 2.3225806451612905,
"grad_norm": 0.10613450407981873,
"learning_rate": 9.278350515463918e-05,
"loss": 0.0443,
"step": 360
},
{
"epoch": 2.329032258064516,
"grad_norm": 0.10701876133680344,
"learning_rate": 9.304123711340207e-05,
"loss": 0.039,
"step": 361
},
{
"epoch": 2.335483870967742,
"grad_norm": 0.09017759561538696,
"learning_rate": 9.329896907216495e-05,
"loss": 0.0369,
"step": 362
},
{
"epoch": 2.3419354838709676,
"grad_norm": 0.10165076702833176,
"learning_rate": 9.355670103092784e-05,
"loss": 0.041,
"step": 363
},
{
"epoch": 2.3483870967741938,
"grad_norm": 0.09364940971136093,
"learning_rate": 9.381443298969073e-05,
"loss": 0.0463,
"step": 364
},
{
"epoch": 2.3548387096774195,
"grad_norm": 0.08863069862127304,
"learning_rate": 9.407216494845361e-05,
"loss": 0.0363,
"step": 365
},
{
"epoch": 2.361290322580645,
"grad_norm": 0.10201136022806168,
"learning_rate": 9.43298969072165e-05,
"loss": 0.0384,
"step": 366
},
{
"epoch": 2.367741935483871,
"grad_norm": 0.09771512448787689,
"learning_rate": 9.458762886597939e-05,
"loss": 0.0386,
"step": 367
},
{
"epoch": 2.3741935483870966,
"grad_norm": 0.09068713337182999,
"learning_rate": 9.484536082474227e-05,
"loss": 0.0389,
"step": 368
},
{
"epoch": 2.3806451612903228,
"grad_norm": 0.10729826241731644,
"learning_rate": 9.510309278350515e-05,
"loss": 0.0419,
"step": 369
},
{
"epoch": 2.3870967741935485,
"grad_norm": 0.10047543793916702,
"learning_rate": 9.536082474226805e-05,
"loss": 0.046,
"step": 370
},
{
"epoch": 2.393548387096774,
"grad_norm": 0.08138115704059601,
"learning_rate": 9.561855670103094e-05,
"loss": 0.03,
"step": 371
},
{
"epoch": 2.4,
"grad_norm": 0.08465878665447235,
"learning_rate": 9.587628865979382e-05,
"loss": 0.0383,
"step": 372
},
{
"epoch": 2.4064516129032256,
"grad_norm": 0.10496239364147186,
"learning_rate": 9.61340206185567e-05,
"loss": 0.0423,
"step": 373
},
{
"epoch": 2.412903225806452,
"grad_norm": 0.09935601055622101,
"learning_rate": 9.639175257731959e-05,
"loss": 0.043,
"step": 374
},
{
"epoch": 2.4193548387096775,
"grad_norm": 0.0851937010884285,
"learning_rate": 9.664948453608248e-05,
"loss": 0.0364,
"step": 375
},
{
"epoch": 2.4193548387096775,
"eval_loss": 0.04934929683804512,
"eval_runtime": 20.6137,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.097,
"step": 375
},
{
"epoch": 2.425806451612903,
"grad_norm": 0.07887910306453705,
"learning_rate": 9.690721649484537e-05,
"loss": 0.0307,
"step": 376
},
{
"epoch": 2.432258064516129,
"grad_norm": 0.10647827386856079,
"learning_rate": 9.716494845360825e-05,
"loss": 0.0455,
"step": 377
},
{
"epoch": 2.4387096774193546,
"grad_norm": 0.09436062723398209,
"learning_rate": 9.742268041237114e-05,
"loss": 0.0476,
"step": 378
},
{
"epoch": 2.445161290322581,
"grad_norm": 0.10967639833688736,
"learning_rate": 9.768041237113403e-05,
"loss": 0.0368,
"step": 379
},
{
"epoch": 2.4516129032258065,
"grad_norm": 0.0899633839726448,
"learning_rate": 9.793814432989691e-05,
"loss": 0.0291,
"step": 380
},
{
"epoch": 2.458064516129032,
"grad_norm": 0.10881899297237396,
"learning_rate": 9.819587628865979e-05,
"loss": 0.0452,
"step": 381
},
{
"epoch": 2.464516129032258,
"grad_norm": 0.09735005348920822,
"learning_rate": 9.845360824742269e-05,
"loss": 0.0329,
"step": 382
},
{
"epoch": 2.4709677419354836,
"grad_norm": 0.10843765735626221,
"learning_rate": 9.871134020618558e-05,
"loss": 0.0379,
"step": 383
},
{
"epoch": 2.47741935483871,
"grad_norm": 0.09915640950202942,
"learning_rate": 9.896907216494846e-05,
"loss": 0.0416,
"step": 384
},
{
"epoch": 2.4838709677419355,
"grad_norm": 0.08816944807767868,
"learning_rate": 9.922680412371134e-05,
"loss": 0.035,
"step": 385
},
{
"epoch": 2.490322580645161,
"grad_norm": 0.08104445785284042,
"learning_rate": 9.948453608247423e-05,
"loss": 0.0356,
"step": 386
},
{
"epoch": 2.496774193548387,
"grad_norm": 0.10272940993309021,
"learning_rate": 9.974226804123712e-05,
"loss": 0.0404,
"step": 387
},
{
"epoch": 2.5032258064516126,
"grad_norm": 0.08973879367113113,
"learning_rate": 0.0001,
"loss": 0.0305,
"step": 388
},
{
"epoch": 2.509677419354839,
"grad_norm": 0.1103263571858406,
"learning_rate": 9.999997970748714e-05,
"loss": 0.0459,
"step": 389
},
{
"epoch": 2.5161290322580645,
"grad_norm": 0.09237506985664368,
"learning_rate": 9.999991882996502e-05,
"loss": 0.0363,
"step": 390
},
{
"epoch": 2.52258064516129,
"grad_norm": 0.08599911630153656,
"learning_rate": 9.999981736748307e-05,
"loss": 0.0377,
"step": 391
},
{
"epoch": 2.5290322580645164,
"grad_norm": 0.08956660330295563,
"learning_rate": 9.999967532012362e-05,
"loss": 0.0355,
"step": 392
},
{
"epoch": 2.535483870967742,
"grad_norm": 0.08925971388816833,
"learning_rate": 9.9999492688002e-05,
"loss": 0.038,
"step": 393
},
{
"epoch": 2.541935483870968,
"grad_norm": 0.08915024995803833,
"learning_rate": 9.999926947126643e-05,
"loss": 0.0376,
"step": 394
},
{
"epoch": 2.5483870967741935,
"grad_norm": 0.09460075944662094,
"learning_rate": 9.999900567009811e-05,
"loss": 0.0367,
"step": 395
},
{
"epoch": 2.554838709677419,
"grad_norm": 0.0868026539683342,
"learning_rate": 9.999870128471116e-05,
"loss": 0.0356,
"step": 396
},
{
"epoch": 2.5612903225806454,
"grad_norm": 0.09221860021352768,
"learning_rate": 9.999835631535266e-05,
"loss": 0.0441,
"step": 397
},
{
"epoch": 2.567741935483871,
"grad_norm": 0.1023736447095871,
"learning_rate": 9.99979707623026e-05,
"loss": 0.0451,
"step": 398
},
{
"epoch": 2.574193548387097,
"grad_norm": 0.08648494631052017,
"learning_rate": 9.999754462587395e-05,
"loss": 0.0375,
"step": 399
},
{
"epoch": 2.5806451612903225,
"grad_norm": 0.08478187024593353,
"learning_rate": 9.99970779064126e-05,
"loss": 0.0365,
"step": 400
},
{
"epoch": 2.5806451612903225,
"eval_loss": 0.046045903116464615,
"eval_runtime": 20.63,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 400
},
{
"epoch": 2.587096774193548,
"grad_norm": 0.09959060698747635,
"learning_rate": 9.99965706042974e-05,
"loss": 0.0467,
"step": 401
},
{
"epoch": 2.5935483870967744,
"grad_norm": 0.09755048900842667,
"learning_rate": 9.99960227199401e-05,
"loss": 0.0533,
"step": 402
},
{
"epoch": 2.6,
"grad_norm": 0.09167367219924927,
"learning_rate": 9.999543425378546e-05,
"loss": 0.0325,
"step": 403
},
{
"epoch": 2.606451612903226,
"grad_norm": 0.09100550413131714,
"learning_rate": 9.999480520631108e-05,
"loss": 0.0325,
"step": 404
},
{
"epoch": 2.6129032258064515,
"grad_norm": 0.09340760111808777,
"learning_rate": 9.999413557802761e-05,
"loss": 0.0342,
"step": 405
},
{
"epoch": 2.6193548387096772,
"grad_norm": 0.08973444998264313,
"learning_rate": 9.999342536947856e-05,
"loss": 0.0276,
"step": 406
},
{
"epoch": 2.6258064516129034,
"grad_norm": 0.09528621286153793,
"learning_rate": 9.999267458124042e-05,
"loss": 0.0375,
"step": 407
},
{
"epoch": 2.632258064516129,
"grad_norm": 0.0875079482793808,
"learning_rate": 9.999188321392261e-05,
"loss": 0.0307,
"step": 408
},
{
"epoch": 2.638709677419355,
"grad_norm": 0.08494490385055542,
"learning_rate": 9.999105126816746e-05,
"loss": 0.0331,
"step": 409
},
{
"epoch": 2.6451612903225805,
"grad_norm": 0.08419065922498703,
"learning_rate": 9.999017874465026e-05,
"loss": 0.0299,
"step": 410
},
{
"epoch": 2.6516129032258062,
"grad_norm": 0.1131298691034317,
"learning_rate": 9.998926564407929e-05,
"loss": 0.045,
"step": 411
},
{
"epoch": 2.6580645161290324,
"grad_norm": 0.09079862385988235,
"learning_rate": 9.998831196719565e-05,
"loss": 0.0386,
"step": 412
},
{
"epoch": 2.664516129032258,
"grad_norm": 0.082625612616539,
"learning_rate": 9.998731771477347e-05,
"loss": 0.0334,
"step": 413
},
{
"epoch": 2.670967741935484,
"grad_norm": 0.09609875082969666,
"learning_rate": 9.998628288761977e-05,
"loss": 0.038,
"step": 414
},
{
"epoch": 2.6774193548387095,
"grad_norm": 0.09008405357599258,
"learning_rate": 9.998520748657454e-05,
"loss": 0.0412,
"step": 415
},
{
"epoch": 2.6838709677419352,
"grad_norm": 0.09257365018129349,
"learning_rate": 9.998409151251069e-05,
"loss": 0.0339,
"step": 416
},
{
"epoch": 2.6903225806451614,
"grad_norm": 0.0978696420788765,
"learning_rate": 9.998293496633402e-05,
"loss": 0.0322,
"step": 417
},
{
"epoch": 2.696774193548387,
"grad_norm": 0.10335182398557663,
"learning_rate": 9.998173784898333e-05,
"loss": 0.0361,
"step": 418
},
{
"epoch": 2.703225806451613,
"grad_norm": 0.10199162364006042,
"learning_rate": 9.99805001614303e-05,
"loss": 0.0455,
"step": 419
},
{
"epoch": 2.709677419354839,
"grad_norm": 0.09214220941066742,
"learning_rate": 9.997922190467958e-05,
"loss": 0.0348,
"step": 420
},
{
"epoch": 2.7161290322580647,
"grad_norm": 0.08050487190485,
"learning_rate": 9.997790307976873e-05,
"loss": 0.0309,
"step": 421
},
{
"epoch": 2.7225806451612904,
"grad_norm": 0.08799546957015991,
"learning_rate": 9.997654368776823e-05,
"loss": 0.0424,
"step": 422
},
{
"epoch": 2.729032258064516,
"grad_norm": 0.09670068323612213,
"learning_rate": 9.997514372978151e-05,
"loss": 0.049,
"step": 423
},
{
"epoch": 2.735483870967742,
"grad_norm": 0.08845686167478561,
"learning_rate": 9.997370320694492e-05,
"loss": 0.0328,
"step": 424
},
{
"epoch": 2.741935483870968,
"grad_norm": 0.09527582675218582,
"learning_rate": 9.997222212042771e-05,
"loss": 0.0469,
"step": 425
},
{
"epoch": 2.741935483870968,
"eval_loss": 0.04324241355061531,
"eval_runtime": 20.6183,
"eval_samples_per_second": 4.85,
"eval_steps_per_second": 0.097,
"step": 425
},
{
"epoch": 2.7483870967741937,
"grad_norm": 0.0964616984128952,
"learning_rate": 9.997070047143209e-05,
"loss": 0.0349,
"step": 426
},
{
"epoch": 2.7548387096774194,
"grad_norm": 0.08241977542638779,
"learning_rate": 9.996913826119319e-05,
"loss": 0.0342,
"step": 427
},
{
"epoch": 2.761290322580645,
"grad_norm": 0.08565547317266464,
"learning_rate": 9.996753549097906e-05,
"loss": 0.0272,
"step": 428
},
{
"epoch": 2.767741935483871,
"grad_norm": 0.0907561182975769,
"learning_rate": 9.996589216209065e-05,
"loss": 0.0283,
"step": 429
},
{
"epoch": 2.774193548387097,
"grad_norm": 0.09964805096387863,
"learning_rate": 9.996420827586187e-05,
"loss": 0.037,
"step": 430
},
{
"epoch": 2.7806451612903227,
"grad_norm": 0.08956048637628555,
"learning_rate": 9.996248383365951e-05,
"loss": 0.0355,
"step": 431
},
{
"epoch": 2.7870967741935484,
"grad_norm": 0.09447566419839859,
"learning_rate": 9.996071883688334e-05,
"loss": 0.0304,
"step": 432
},
{
"epoch": 2.793548387096774,
"grad_norm": 0.07058817893266678,
"learning_rate": 9.995891328696599e-05,
"loss": 0.0315,
"step": 433
},
{
"epoch": 2.8,
"grad_norm": 0.08722875267267227,
"learning_rate": 9.995706718537299e-05,
"loss": 0.0364,
"step": 434
},
{
"epoch": 2.806451612903226,
"grad_norm": 0.08992493152618408,
"learning_rate": 9.995518053360287e-05,
"loss": 0.0448,
"step": 435
},
{
"epoch": 2.8129032258064517,
"grad_norm": 0.0942826047539711,
"learning_rate": 9.995325333318698e-05,
"loss": 0.0385,
"step": 436
},
{
"epoch": 2.8193548387096774,
"grad_norm": 0.08824922144412994,
"learning_rate": 9.995128558568969e-05,
"loss": 0.0322,
"step": 437
},
{
"epoch": 2.825806451612903,
"grad_norm": 0.08112802356481552,
"learning_rate": 9.994927729270818e-05,
"loss": 0.0364,
"step": 438
},
{
"epoch": 2.832258064516129,
"grad_norm": 0.09755432605743408,
"learning_rate": 9.994722845587258e-05,
"loss": 0.0386,
"step": 439
},
{
"epoch": 2.838709677419355,
"grad_norm": 0.08299075067043304,
"learning_rate": 9.994513907684597e-05,
"loss": 0.0339,
"step": 440
},
{
"epoch": 2.8451612903225807,
"grad_norm": 0.08259541541337967,
"learning_rate": 9.994300915732426e-05,
"loss": 0.0363,
"step": 441
},
{
"epoch": 2.8516129032258064,
"grad_norm": 0.09055620431900024,
"learning_rate": 9.994083869903631e-05,
"loss": 0.0351,
"step": 442
},
{
"epoch": 2.858064516129032,
"grad_norm": 0.08169237524271011,
"learning_rate": 9.99386277037439e-05,
"loss": 0.0347,
"step": 443
},
{
"epoch": 2.864516129032258,
"grad_norm": 0.07610059529542923,
"learning_rate": 9.993637617324169e-05,
"loss": 0.0299,
"step": 444
},
{
"epoch": 2.870967741935484,
"grad_norm": 0.076594777405262,
"learning_rate": 9.993408410935724e-05,
"loss": 0.0249,
"step": 445
},
{
"epoch": 2.8774193548387097,
"grad_norm": 0.09088917821645737,
"learning_rate": 9.993175151395103e-05,
"loss": 0.0324,
"step": 446
},
{
"epoch": 2.8838709677419354,
"grad_norm": 0.09005070477724075,
"learning_rate": 9.992937838891643e-05,
"loss": 0.0386,
"step": 447
},
{
"epoch": 2.8903225806451616,
"grad_norm": 0.10479908436536789,
"learning_rate": 9.99269647361797e-05,
"loss": 0.0466,
"step": 448
},
{
"epoch": 2.896774193548387,
"grad_norm": 0.08293581753969193,
"learning_rate": 9.99245105577e-05,
"loss": 0.0293,
"step": 449
},
{
"epoch": 2.903225806451613,
"grad_norm": 0.07779109477996826,
"learning_rate": 9.992201585546939e-05,
"loss": 0.027,
"step": 450
},
{
"epoch": 2.903225806451613,
"eval_loss": 0.037865012884140015,
"eval_runtime": 20.634,
"eval_samples_per_second": 4.846,
"eval_steps_per_second": 0.097,
"step": 450
},
{
"epoch": 2.9096774193548387,
"grad_norm": 0.09607955068349838,
"learning_rate": 9.991948063151282e-05,
"loss": 0.0331,
"step": 451
},
{
"epoch": 2.9161290322580644,
"grad_norm": 0.07993067800998688,
"learning_rate": 9.991690488788815e-05,
"loss": 0.0293,
"step": 452
},
{
"epoch": 2.9225806451612906,
"grad_norm": 0.08637774735689163,
"learning_rate": 9.99142886266861e-05,
"loss": 0.0273,
"step": 453
},
{
"epoch": 2.9290322580645163,
"grad_norm": 0.08489525318145752,
"learning_rate": 9.991163185003029e-05,
"loss": 0.0295,
"step": 454
},
{
"epoch": 2.935483870967742,
"grad_norm": 0.08981441706418991,
"learning_rate": 9.990893456007721e-05,
"loss": 0.0308,
"step": 455
},
{
"epoch": 2.9419354838709677,
"grad_norm": 0.07571941614151001,
"learning_rate": 9.99061967590163e-05,
"loss": 0.0254,
"step": 456
},
{
"epoch": 2.9483870967741934,
"grad_norm": 0.07855682075023651,
"learning_rate": 9.990341844906977e-05,
"loss": 0.0253,
"step": 457
},
{
"epoch": 2.9548387096774196,
"grad_norm": 0.08273386210203171,
"learning_rate": 9.99005996324928e-05,
"loss": 0.0278,
"step": 458
},
{
"epoch": 2.9612903225806453,
"grad_norm": 0.08668919652700424,
"learning_rate": 9.989774031157346e-05,
"loss": 0.0355,
"step": 459
},
{
"epoch": 2.967741935483871,
"grad_norm": 0.07949639856815338,
"learning_rate": 9.989484048863263e-05,
"loss": 0.0311,
"step": 460
},
{
"epoch": 2.9741935483870967,
"grad_norm": 0.07660870999097824,
"learning_rate": 9.98919001660241e-05,
"loss": 0.0281,
"step": 461
},
{
"epoch": 2.9806451612903224,
"grad_norm": 0.0825650691986084,
"learning_rate": 9.988891934613453e-05,
"loss": 0.0334,
"step": 462
},
{
"epoch": 2.9870967741935486,
"grad_norm": 0.09388405084609985,
"learning_rate": 9.988589803138348e-05,
"loss": 0.0389,
"step": 463
},
{
"epoch": 2.9935483870967743,
"grad_norm": 0.09062936902046204,
"learning_rate": 9.988283622422332e-05,
"loss": 0.0383,
"step": 464
},
{
"epoch": 3.0,
"grad_norm": 0.09347395598888397,
"learning_rate": 9.987973392713932e-05,
"loss": 0.0312,
"step": 465
},
{
"epoch": 3.0064516129032257,
"grad_norm": 0.07513423264026642,
"learning_rate": 9.987659114264962e-05,
"loss": 0.0262,
"step": 466
},
{
"epoch": 3.0129032258064514,
"grad_norm": 0.07603180408477783,
"learning_rate": 9.987340787330525e-05,
"loss": 0.0276,
"step": 467
},
{
"epoch": 3.0193548387096776,
"grad_norm": 0.06836437433958054,
"learning_rate": 9.987018412169004e-05,
"loss": 0.021,
"step": 468
},
{
"epoch": 3.0258064516129033,
"grad_norm": 0.07807572931051254,
"learning_rate": 9.986691989042072e-05,
"loss": 0.0207,
"step": 469
},
{
"epoch": 3.032258064516129,
"grad_norm": 0.0833655521273613,
"learning_rate": 9.986361518214686e-05,
"loss": 0.0196,
"step": 470
},
{
"epoch": 3.0387096774193547,
"grad_norm": 0.08109772205352783,
"learning_rate": 9.98602699995509e-05,
"loss": 0.0236,
"step": 471
},
{
"epoch": 3.0451612903225804,
"grad_norm": 0.08900588750839233,
"learning_rate": 9.985688434534812e-05,
"loss": 0.0255,
"step": 472
},
{
"epoch": 3.0516129032258066,
"grad_norm": 0.08419376611709595,
"learning_rate": 9.985345822228669e-05,
"loss": 0.02,
"step": 473
},
{
"epoch": 3.0580645161290323,
"grad_norm": 0.0820038914680481,
"learning_rate": 9.984999163314754e-05,
"loss": 0.022,
"step": 474
},
{
"epoch": 3.064516129032258,
"grad_norm": 0.07802214473485947,
"learning_rate": 9.984648458074456e-05,
"loss": 0.026,
"step": 475
},
{
"epoch": 3.064516129032258,
"eval_loss": 0.03555970638990402,
"eval_runtime": 20.6207,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 475
},
{
"epoch": 3.0709677419354837,
"grad_norm": 0.08685624599456787,
"learning_rate": 9.984293706792438e-05,
"loss": 0.0325,
"step": 476
},
{
"epoch": 3.07741935483871,
"grad_norm": 0.06740245223045349,
"learning_rate": 9.983934909756655e-05,
"loss": 0.0228,
"step": 477
},
{
"epoch": 3.0838709677419356,
"grad_norm": 0.0687062218785286,
"learning_rate": 9.98357206725834e-05,
"loss": 0.0205,
"step": 478
},
{
"epoch": 3.0903225806451613,
"grad_norm": 0.07554581761360168,
"learning_rate": 9.983205179592015e-05,
"loss": 0.0231,
"step": 479
},
{
"epoch": 3.096774193548387,
"grad_norm": 0.08018746972084045,
"learning_rate": 9.982834247055481e-05,
"loss": 0.0222,
"step": 480
},
{
"epoch": 3.1032258064516127,
"grad_norm": 0.08768939971923828,
"learning_rate": 9.982459269949826e-05,
"loss": 0.025,
"step": 481
},
{
"epoch": 3.109677419354839,
"grad_norm": 0.07732031494379044,
"learning_rate": 9.982080248579417e-05,
"loss": 0.021,
"step": 482
},
{
"epoch": 3.1161290322580646,
"grad_norm": 0.08360020071268082,
"learning_rate": 9.981697183251906e-05,
"loss": 0.0256,
"step": 483
},
{
"epoch": 3.1225806451612903,
"grad_norm": 0.0702652707695961,
"learning_rate": 9.98131007427823e-05,
"loss": 0.0204,
"step": 484
},
{
"epoch": 3.129032258064516,
"grad_norm": 0.07399879395961761,
"learning_rate": 9.980918921972602e-05,
"loss": 0.019,
"step": 485
},
{
"epoch": 3.135483870967742,
"grad_norm": 0.08124198019504547,
"learning_rate": 9.980523726652523e-05,
"loss": 0.0229,
"step": 486
},
{
"epoch": 3.141935483870968,
"grad_norm": 0.07998952269554138,
"learning_rate": 9.980124488638774e-05,
"loss": 0.0246,
"step": 487
},
{
"epoch": 3.1483870967741936,
"grad_norm": 0.09725457429885864,
"learning_rate": 9.979721208255412e-05,
"loss": 0.0253,
"step": 488
},
{
"epoch": 3.1548387096774193,
"grad_norm": 0.08378903567790985,
"learning_rate": 9.979313885829785e-05,
"loss": 0.0265,
"step": 489
},
{
"epoch": 3.161290322580645,
"grad_norm": 0.07236087322235107,
"learning_rate": 9.978902521692515e-05,
"loss": 0.0212,
"step": 490
},
{
"epoch": 3.167741935483871,
"grad_norm": 0.06853360682725906,
"learning_rate": 9.978487116177506e-05,
"loss": 0.0193,
"step": 491
},
{
"epoch": 3.174193548387097,
"grad_norm": 0.09208260476589203,
"learning_rate": 9.978067669621943e-05,
"loss": 0.0224,
"step": 492
},
{
"epoch": 3.1806451612903226,
"grad_norm": 0.07864844053983688,
"learning_rate": 9.977644182366292e-05,
"loss": 0.0235,
"step": 493
},
{
"epoch": 3.1870967741935483,
"grad_norm": 0.07481813430786133,
"learning_rate": 9.977216654754296e-05,
"loss": 0.0201,
"step": 494
},
{
"epoch": 3.193548387096774,
"grad_norm": 0.08573871105909348,
"learning_rate": 9.976785087132981e-05,
"loss": 0.0219,
"step": 495
},
{
"epoch": 3.2,
"grad_norm": 0.09021998196840286,
"learning_rate": 9.97634947985265e-05,
"loss": 0.0228,
"step": 496
},
{
"epoch": 3.206451612903226,
"grad_norm": 0.07071111351251602,
"learning_rate": 9.975909833266885e-05,
"loss": 0.0207,
"step": 497
},
{
"epoch": 3.2129032258064516,
"grad_norm": 0.08035361021757126,
"learning_rate": 9.97546614773255e-05,
"loss": 0.0199,
"step": 498
},
{
"epoch": 3.2193548387096773,
"grad_norm": 0.07872354239225388,
"learning_rate": 9.975018423609781e-05,
"loss": 0.0238,
"step": 499
},
{
"epoch": 3.225806451612903,
"grad_norm": 0.08297586441040039,
"learning_rate": 9.974566661261999e-05,
"loss": 0.0223,
"step": 500
},
{
"epoch": 3.225806451612903,
"eval_loss": 0.0357317216694355,
"eval_runtime": 20.6238,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 500
},
{
"epoch": 3.232258064516129,
"grad_norm": 0.09199152141809464,
"learning_rate": 9.974110861055899e-05,
"loss": 0.0235,
"step": 501
},
{
"epoch": 3.238709677419355,
"grad_norm": 0.0826394334435463,
"learning_rate": 9.973651023361452e-05,
"loss": 0.0201,
"step": 502
},
{
"epoch": 3.2451612903225806,
"grad_norm": 0.11516128480434418,
"learning_rate": 9.973187148551911e-05,
"loss": 0.0253,
"step": 503
},
{
"epoch": 3.2516129032258063,
"grad_norm": 0.09063907712697983,
"learning_rate": 9.972719237003802e-05,
"loss": 0.0223,
"step": 504
},
{
"epoch": 3.258064516129032,
"grad_norm": 0.08523458242416382,
"learning_rate": 9.972247289096932e-05,
"loss": 0.0211,
"step": 505
},
{
"epoch": 3.264516129032258,
"grad_norm": 0.10173799842596054,
"learning_rate": 9.971771305214377e-05,
"loss": 0.0327,
"step": 506
},
{
"epoch": 3.270967741935484,
"grad_norm": 0.0834076777100563,
"learning_rate": 9.971291285742497e-05,
"loss": 0.0257,
"step": 507
},
{
"epoch": 3.2774193548387096,
"grad_norm": 0.07420746237039566,
"learning_rate": 9.970807231070921e-05,
"loss": 0.0209,
"step": 508
},
{
"epoch": 3.2838709677419353,
"grad_norm": 0.08463042974472046,
"learning_rate": 9.97031914159256e-05,
"loss": 0.0212,
"step": 509
},
{
"epoch": 3.2903225806451615,
"grad_norm": 0.08214029669761658,
"learning_rate": 9.969827017703593e-05,
"loss": 0.0264,
"step": 510
},
{
"epoch": 3.296774193548387,
"grad_norm": 0.08387239277362823,
"learning_rate": 9.969330859803478e-05,
"loss": 0.0199,
"step": 511
},
{
"epoch": 3.303225806451613,
"grad_norm": 0.08673185110092163,
"learning_rate": 9.968830668294947e-05,
"loss": 0.0214,
"step": 512
},
{
"epoch": 3.3096774193548386,
"grad_norm": 0.08083758503198624,
"learning_rate": 9.968326443584007e-05,
"loss": 0.0213,
"step": 513
},
{
"epoch": 3.3161290322580643,
"grad_norm": 0.08173336833715439,
"learning_rate": 9.967818186079936e-05,
"loss": 0.0217,
"step": 514
},
{
"epoch": 3.3225806451612905,
"grad_norm": 0.07553200423717499,
"learning_rate": 9.967305896195288e-05,
"loss": 0.019,
"step": 515
},
{
"epoch": 3.329032258064516,
"grad_norm": 0.08780141919851303,
"learning_rate": 9.966789574345888e-05,
"loss": 0.025,
"step": 516
},
{
"epoch": 3.335483870967742,
"grad_norm": 0.08397892117500305,
"learning_rate": 9.966269220950835e-05,
"loss": 0.0259,
"step": 517
},
{
"epoch": 3.3419354838709676,
"grad_norm": 0.10555091500282288,
"learning_rate": 9.9657448364325e-05,
"loss": 0.0269,
"step": 518
},
{
"epoch": 3.3483870967741938,
"grad_norm": 0.07310711592435837,
"learning_rate": 9.965216421216526e-05,
"loss": 0.0214,
"step": 519
},
{
"epoch": 3.3548387096774195,
"grad_norm": 0.07622754573822021,
"learning_rate": 9.964683975731828e-05,
"loss": 0.0202,
"step": 520
},
{
"epoch": 3.361290322580645,
"grad_norm": 0.0859362930059433,
"learning_rate": 9.964147500410592e-05,
"loss": 0.0235,
"step": 521
},
{
"epoch": 3.367741935483871,
"grad_norm": 0.08965717256069183,
"learning_rate": 9.963606995688277e-05,
"loss": 0.0189,
"step": 522
},
{
"epoch": 3.3741935483870966,
"grad_norm": 0.0818329006433487,
"learning_rate": 9.963062462003609e-05,
"loss": 0.0199,
"step": 523
},
{
"epoch": 3.3806451612903228,
"grad_norm": 0.07498865574598312,
"learning_rate": 9.962513899798587e-05,
"loss": 0.0213,
"step": 524
},
{
"epoch": 3.3870967741935485,
"grad_norm": 0.08389492332935333,
"learning_rate": 9.961961309518479e-05,
"loss": 0.0228,
"step": 525
},
{
"epoch": 3.3870967741935485,
"eval_loss": 0.035191066563129425,
"eval_runtime": 20.6312,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 525
},
{
"epoch": 3.393548387096774,
"grad_norm": 0.0822795033454895,
"learning_rate": 9.961404691611825e-05,
"loss": 0.0215,
"step": 526
},
{
"epoch": 3.4,
"grad_norm": 0.08398600667715073,
"learning_rate": 9.960844046530428e-05,
"loss": 0.0244,
"step": 527
},
{
"epoch": 3.4064516129032256,
"grad_norm": 0.08129465579986572,
"learning_rate": 9.960279374729367e-05,
"loss": 0.0239,
"step": 528
},
{
"epoch": 3.412903225806452,
"grad_norm": 0.07414031773805618,
"learning_rate": 9.959710676666985e-05,
"loss": 0.0244,
"step": 529
},
{
"epoch": 3.4193548387096775,
"grad_norm": 0.08356715738773346,
"learning_rate": 9.959137952804894e-05,
"loss": 0.0188,
"step": 530
},
{
"epoch": 3.425806451612903,
"grad_norm": 0.0784551128745079,
"learning_rate": 9.958561203607975e-05,
"loss": 0.023,
"step": 531
},
{
"epoch": 3.432258064516129,
"grad_norm": 0.08018410205841064,
"learning_rate": 9.957980429544377e-05,
"loss": 0.0228,
"step": 532
},
{
"epoch": 3.4387096774193546,
"grad_norm": 0.07876982539892197,
"learning_rate": 9.957395631085513e-05,
"loss": 0.0232,
"step": 533
},
{
"epoch": 3.445161290322581,
"grad_norm": 0.08994334936141968,
"learning_rate": 9.956806808706064e-05,
"loss": 0.0248,
"step": 534
},
{
"epoch": 3.4516129032258065,
"grad_norm": 0.097404845058918,
"learning_rate": 9.956213962883977e-05,
"loss": 0.025,
"step": 535
},
{
"epoch": 3.458064516129032,
"grad_norm": 0.09073007851839066,
"learning_rate": 9.955617094100469e-05,
"loss": 0.0264,
"step": 536
},
{
"epoch": 3.464516129032258,
"grad_norm": 0.08371078222990036,
"learning_rate": 9.955016202840014e-05,
"loss": 0.0239,
"step": 537
},
{
"epoch": 3.4709677419354836,
"grad_norm": 0.08191969245672226,
"learning_rate": 9.954411289590358e-05,
"loss": 0.023,
"step": 538
},
{
"epoch": 3.47741935483871,
"grad_norm": 0.07514638453722,
"learning_rate": 9.95380235484251e-05,
"loss": 0.0209,
"step": 539
},
{
"epoch": 3.4838709677419355,
"grad_norm": 0.07831292599439621,
"learning_rate": 9.95318939909074e-05,
"loss": 0.0223,
"step": 540
},
{
"epoch": 3.490322580645161,
"grad_norm": 0.0708436518907547,
"learning_rate": 9.952572422832586e-05,
"loss": 0.0245,
"step": 541
},
{
"epoch": 3.496774193548387,
"grad_norm": 0.08529362827539444,
"learning_rate": 9.951951426568851e-05,
"loss": 0.0192,
"step": 542
},
{
"epoch": 3.5032258064516126,
"grad_norm": 0.06932336837053299,
"learning_rate": 9.951326410803593e-05,
"loss": 0.0211,
"step": 543
},
{
"epoch": 3.509677419354839,
"grad_norm": 0.0799434706568718,
"learning_rate": 9.95069737604414e-05,
"loss": 0.0216,
"step": 544
},
{
"epoch": 3.5161290322580645,
"grad_norm": 0.08105307072401047,
"learning_rate": 9.95006432280108e-05,
"loss": 0.0217,
"step": 545
},
{
"epoch": 3.52258064516129,
"grad_norm": 0.0905054360628128,
"learning_rate": 9.949427251588263e-05,
"loss": 0.0229,
"step": 546
},
{
"epoch": 3.5290322580645164,
"grad_norm": 0.0961836725473404,
"learning_rate": 9.948786162922799e-05,
"loss": 0.0198,
"step": 547
},
{
"epoch": 3.535483870967742,
"grad_norm": 0.08218283206224442,
"learning_rate": 9.948141057325057e-05,
"loss": 0.0205,
"step": 548
},
{
"epoch": 3.541935483870968,
"grad_norm": 0.07585642486810684,
"learning_rate": 9.947491935318676e-05,
"loss": 0.0209,
"step": 549
},
{
"epoch": 3.5483870967741935,
"grad_norm": 0.08271976560354233,
"learning_rate": 9.946838797430546e-05,
"loss": 0.0199,
"step": 550
},
{
"epoch": 3.5483870967741935,
"eval_loss": 0.03360068425536156,
"eval_runtime": 20.7001,
"eval_samples_per_second": 4.831,
"eval_steps_per_second": 0.097,
"step": 550
},
{
"epoch": 3.554838709677419,
"grad_norm": 0.07116498053073883,
"learning_rate": 9.946181644190817e-05,
"loss": 0.0212,
"step": 551
},
{
"epoch": 3.5612903225806454,
"grad_norm": 0.09358586370944977,
"learning_rate": 9.945520476132903e-05,
"loss": 0.0226,
"step": 552
},
{
"epoch": 3.567741935483871,
"grad_norm": 0.07434282451868057,
"learning_rate": 9.944855293793475e-05,
"loss": 0.0173,
"step": 553
},
{
"epoch": 3.574193548387097,
"grad_norm": 0.08777125179767609,
"learning_rate": 9.94418609771246e-05,
"loss": 0.0288,
"step": 554
},
{
"epoch": 3.5806451612903225,
"grad_norm": 0.0815097913146019,
"learning_rate": 9.943512888433047e-05,
"loss": 0.0215,
"step": 555
},
{
"epoch": 3.587096774193548,
"grad_norm": 0.0928550437092781,
"learning_rate": 9.942835666501676e-05,
"loss": 0.0279,
"step": 556
},
{
"epoch": 3.5935483870967744,
"grad_norm": 0.07668986171483994,
"learning_rate": 9.942154432468054e-05,
"loss": 0.0209,
"step": 557
},
{
"epoch": 3.6,
"grad_norm": 0.06721030920743942,
"learning_rate": 9.941469186885135e-05,
"loss": 0.0168,
"step": 558
},
{
"epoch": 3.606451612903226,
"grad_norm": 0.07109752297401428,
"learning_rate": 9.940779930309134e-05,
"loss": 0.0181,
"step": 559
},
{
"epoch": 3.6129032258064515,
"grad_norm": 0.08308808505535126,
"learning_rate": 9.940086663299522e-05,
"loss": 0.018,
"step": 560
},
{
"epoch": 3.6193548387096772,
"grad_norm": 0.09721226990222931,
"learning_rate": 9.939389386419024e-05,
"loss": 0.0267,
"step": 561
},
{
"epoch": 3.6258064516129034,
"grad_norm": 0.0851580798625946,
"learning_rate": 9.93868810023362e-05,
"loss": 0.021,
"step": 562
},
{
"epoch": 3.632258064516129,
"grad_norm": 0.0647260770201683,
"learning_rate": 9.937982805312543e-05,
"loss": 0.0135,
"step": 563
},
{
"epoch": 3.638709677419355,
"grad_norm": 0.06940098106861115,
"learning_rate": 9.937273502228281e-05,
"loss": 0.0199,
"step": 564
},
{
"epoch": 3.6451612903225805,
"grad_norm": 0.07867590337991714,
"learning_rate": 9.936560191556579e-05,
"loss": 0.0191,
"step": 565
},
{
"epoch": 3.6516129032258062,
"grad_norm": 0.07656869292259216,
"learning_rate": 9.935842873876429e-05,
"loss": 0.0247,
"step": 566
},
{
"epoch": 3.6580645161290324,
"grad_norm": 0.09072941541671753,
"learning_rate": 9.935121549770078e-05,
"loss": 0.0239,
"step": 567
},
{
"epoch": 3.664516129032258,
"grad_norm": 0.06793881952762604,
"learning_rate": 9.934396219823026e-05,
"loss": 0.0179,
"step": 568
},
{
"epoch": 3.670967741935484,
"grad_norm": 0.07314693927764893,
"learning_rate": 9.933666884624025e-05,
"loss": 0.0215,
"step": 569
},
{
"epoch": 3.6774193548387095,
"grad_norm": 0.06927098333835602,
"learning_rate": 9.932933544765073e-05,
"loss": 0.0218,
"step": 570
},
{
"epoch": 3.6838709677419352,
"grad_norm": 0.07695493847131729,
"learning_rate": 9.932196200841426e-05,
"loss": 0.0237,
"step": 571
},
{
"epoch": 3.6903225806451614,
"grad_norm": 0.07632816582918167,
"learning_rate": 9.931454853451586e-05,
"loss": 0.0164,
"step": 572
},
{
"epoch": 3.696774193548387,
"grad_norm": 0.07444462180137634,
"learning_rate": 9.930709503197302e-05,
"loss": 0.0192,
"step": 573
},
{
"epoch": 3.703225806451613,
"grad_norm": 0.08099304139614105,
"learning_rate": 9.929960150683577e-05,
"loss": 0.0204,
"step": 574
},
{
"epoch": 3.709677419354839,
"grad_norm": 0.07179006189107895,
"learning_rate": 9.929206796518662e-05,
"loss": 0.0227,
"step": 575
},
{
"epoch": 3.709677419354839,
"eval_loss": 0.03084612637758255,
"eval_runtime": 20.6116,
"eval_samples_per_second": 4.852,
"eval_steps_per_second": 0.097,
"step": 575
},
{
"epoch": 3.7161290322580647,
"grad_norm": 0.07330773770809174,
"learning_rate": 9.928449441314055e-05,
"loss": 0.0195,
"step": 576
},
{
"epoch": 3.7225806451612904,
"grad_norm": 0.08855849504470825,
"learning_rate": 9.927688085684499e-05,
"loss": 0.028,
"step": 577
},
{
"epoch": 3.729032258064516,
"grad_norm": 0.06930747628211975,
"learning_rate": 9.92692273024799e-05,
"loss": 0.0196,
"step": 578
},
{
"epoch": 3.735483870967742,
"grad_norm": 0.08203380554914474,
"learning_rate": 9.926153375625765e-05,
"loss": 0.0192,
"step": 579
},
{
"epoch": 3.741935483870968,
"grad_norm": 0.07359786331653595,
"learning_rate": 9.925380022442311e-05,
"loss": 0.0251,
"step": 580
},
{
"epoch": 3.7483870967741937,
"grad_norm": 0.08028274029493332,
"learning_rate": 9.924602671325358e-05,
"loss": 0.024,
"step": 581
},
{
"epoch": 3.7548387096774194,
"grad_norm": 0.07106206566095352,
"learning_rate": 9.923821322905883e-05,
"loss": 0.0189,
"step": 582
},
{
"epoch": 3.761290322580645,
"grad_norm": 0.088934987783432,
"learning_rate": 9.923035977818107e-05,
"loss": 0.0202,
"step": 583
},
{
"epoch": 3.767741935483871,
"grad_norm": 0.09591435641050339,
"learning_rate": 9.922246636699494e-05,
"loss": 0.0219,
"step": 584
},
{
"epoch": 3.774193548387097,
"grad_norm": 0.08910396695137024,
"learning_rate": 9.921453300190755e-05,
"loss": 0.0206,
"step": 585
},
{
"epoch": 3.7806451612903227,
"grad_norm": 0.08721159398555756,
"learning_rate": 9.920655968935838e-05,
"loss": 0.0229,
"step": 586
},
{
"epoch": 3.7870967741935484,
"grad_norm": 0.0917913094162941,
"learning_rate": 9.919854643581941e-05,
"loss": 0.0236,
"step": 587
},
{
"epoch": 3.793548387096774,
"grad_norm": 0.0874079242348671,
"learning_rate": 9.919049324779497e-05,
"loss": 0.018,
"step": 588
},
{
"epoch": 3.8,
"grad_norm": 0.07590654492378235,
"learning_rate": 9.918240013182185e-05,
"loss": 0.0175,
"step": 589
},
{
"epoch": 3.806451612903226,
"grad_norm": 0.07079242169857025,
"learning_rate": 9.917426709446925e-05,
"loss": 0.019,
"step": 590
},
{
"epoch": 3.8129032258064517,
"grad_norm": 0.09207978844642639,
"learning_rate": 9.916609414233873e-05,
"loss": 0.0203,
"step": 591
},
{
"epoch": 3.8193548387096774,
"grad_norm": 0.0762355849146843,
"learning_rate": 9.915788128206432e-05,
"loss": 0.0161,
"step": 592
},
{
"epoch": 3.825806451612903,
"grad_norm": 0.09506982564926147,
"learning_rate": 9.914962852031237e-05,
"loss": 0.0213,
"step": 593
},
{
"epoch": 3.832258064516129,
"grad_norm": 0.09398619085550308,
"learning_rate": 9.914133586378166e-05,
"loss": 0.0243,
"step": 594
},
{
"epoch": 3.838709677419355,
"grad_norm": 0.09069452434778214,
"learning_rate": 9.913300331920334e-05,
"loss": 0.0286,
"step": 595
},
{
"epoch": 3.8451612903225807,
"grad_norm": 0.0756390318274498,
"learning_rate": 9.912463089334097e-05,
"loss": 0.0209,
"step": 596
},
{
"epoch": 3.8516129032258064,
"grad_norm": 0.08355611562728882,
"learning_rate": 9.91162185929904e-05,
"loss": 0.0224,
"step": 597
},
{
"epoch": 3.858064516129032,
"grad_norm": 0.0704779401421547,
"learning_rate": 9.910776642497994e-05,
"loss": 0.0169,
"step": 598
},
{
"epoch": 3.864516129032258,
"grad_norm": 0.08580672740936279,
"learning_rate": 9.90992743961702e-05,
"loss": 0.0255,
"step": 599
},
{
"epoch": 3.870967741935484,
"grad_norm": 0.06796646118164062,
"learning_rate": 9.909074251345417e-05,
"loss": 0.0207,
"step": 600
},
{
"epoch": 3.870967741935484,
"eval_loss": 0.02920311503112316,
"eval_runtime": 20.6688,
"eval_samples_per_second": 4.838,
"eval_steps_per_second": 0.097,
"step": 600
},
{
"epoch": 3.8774193548387097,
"grad_norm": 0.07997417449951172,
"learning_rate": 9.908217078375718e-05,
"loss": 0.0232,
"step": 601
},
{
"epoch": 3.8838709677419354,
"grad_norm": 0.06856135278940201,
"learning_rate": 9.907355921403691e-05,
"loss": 0.0183,
"step": 602
},
{
"epoch": 3.8903225806451616,
"grad_norm": 0.06765090674161911,
"learning_rate": 9.906490781128339e-05,
"loss": 0.0205,
"step": 603
},
{
"epoch": 3.896774193548387,
"grad_norm": 0.08263225853443146,
"learning_rate": 9.905621658251896e-05,
"loss": 0.0204,
"step": 604
},
{
"epoch": 3.903225806451613,
"grad_norm": 0.06636311858892441,
"learning_rate": 9.904748553479827e-05,
"loss": 0.0182,
"step": 605
},
{
"epoch": 3.9096774193548387,
"grad_norm": 0.07616009563207626,
"learning_rate": 9.903871467520835e-05,
"loss": 0.0175,
"step": 606
},
{
"epoch": 3.9161290322580644,
"grad_norm": 0.08746340870857239,
"learning_rate": 9.902990401086849e-05,
"loss": 0.0242,
"step": 607
},
{
"epoch": 3.9225806451612906,
"grad_norm": 0.07934951782226562,
"learning_rate": 9.90210535489303e-05,
"loss": 0.0199,
"step": 608
},
{
"epoch": 3.9290322580645163,
"grad_norm": 0.07786279171705246,
"learning_rate": 9.901216329657774e-05,
"loss": 0.0199,
"step": 609
},
{
"epoch": 3.935483870967742,
"grad_norm": 0.08316848427057266,
"learning_rate": 9.900323326102702e-05,
"loss": 0.0208,
"step": 610
},
{
"epoch": 3.9419354838709677,
"grad_norm": 0.07723607867956161,
"learning_rate": 9.899426344952663e-05,
"loss": 0.0201,
"step": 611
},
{
"epoch": 3.9483870967741934,
"grad_norm": 0.07270605862140656,
"learning_rate": 9.89852538693574e-05,
"loss": 0.0187,
"step": 612
},
{
"epoch": 3.9548387096774196,
"grad_norm": 0.0690370425581932,
"learning_rate": 9.897620452783241e-05,
"loss": 0.0162,
"step": 613
},
{
"epoch": 3.9612903225806453,
"grad_norm": 0.07613126933574677,
"learning_rate": 9.896711543229698e-05,
"loss": 0.0179,
"step": 614
},
{
"epoch": 3.967741935483871,
"grad_norm": 0.0803714394569397,
"learning_rate": 9.895798659012879e-05,
"loss": 0.0209,
"step": 615
},
{
"epoch": 3.9741935483870967,
"grad_norm": 0.0816982090473175,
"learning_rate": 9.894881800873768e-05,
"loss": 0.0215,
"step": 616
},
{
"epoch": 3.9806451612903224,
"grad_norm": 0.10295988619327545,
"learning_rate": 9.89396096955658e-05,
"loss": 0.0242,
"step": 617
},
{
"epoch": 3.9870967741935486,
"grad_norm": 0.0716472640633583,
"learning_rate": 9.893036165808755e-05,
"loss": 0.0222,
"step": 618
},
{
"epoch": 3.9935483870967743,
"grad_norm": 0.07746679335832596,
"learning_rate": 9.892107390380958e-05,
"loss": 0.0206,
"step": 619
},
{
"epoch": 4.0,
"grad_norm": 0.07583904266357422,
"learning_rate": 9.891174644027073e-05,
"loss": 0.0172,
"step": 620
},
{
"epoch": 4.006451612903226,
"grad_norm": 0.07562244683504105,
"learning_rate": 9.890237927504214e-05,
"loss": 0.0155,
"step": 621
},
{
"epoch": 4.012903225806451,
"grad_norm": 0.05778181925415993,
"learning_rate": 9.889297241572714e-05,
"loss": 0.0141,
"step": 622
},
{
"epoch": 4.019354838709678,
"grad_norm": 0.06385020166635513,
"learning_rate": 9.888352586996126e-05,
"loss": 0.0114,
"step": 623
},
{
"epoch": 4.025806451612903,
"grad_norm": 0.071347676217556,
"learning_rate": 9.887403964541228e-05,
"loss": 0.0133,
"step": 624
},
{
"epoch": 4.032258064516129,
"grad_norm": 0.07100800424814224,
"learning_rate": 9.886451374978018e-05,
"loss": 0.0125,
"step": 625
},
{
"epoch": 4.032258064516129,
"eval_loss": 0.030386490747332573,
"eval_runtime": 20.6264,
"eval_samples_per_second": 4.848,
"eval_steps_per_second": 0.097,
"step": 625
},
{
"epoch": 4.038709677419355,
"grad_norm": 0.07996921241283417,
"learning_rate": 9.885494819079713e-05,
"loss": 0.0154,
"step": 626
},
{
"epoch": 4.04516129032258,
"grad_norm": 0.07129693031311035,
"learning_rate": 9.884534297622749e-05,
"loss": 0.0132,
"step": 627
},
{
"epoch": 4.051612903225807,
"grad_norm": 0.06520695984363556,
"learning_rate": 9.883569811386782e-05,
"loss": 0.012,
"step": 628
},
{
"epoch": 4.058064516129032,
"grad_norm": 0.06700082868337631,
"learning_rate": 9.882601361154687e-05,
"loss": 0.0112,
"step": 629
},
{
"epoch": 4.064516129032258,
"grad_norm": 0.07065930962562561,
"learning_rate": 9.881628947712556e-05,
"loss": 0.0125,
"step": 630
},
{
"epoch": 4.070967741935484,
"grad_norm": 0.07708729058504105,
"learning_rate": 9.880652571849695e-05,
"loss": 0.0152,
"step": 631
},
{
"epoch": 4.077419354838709,
"grad_norm": 0.07915516197681427,
"learning_rate": 9.879672234358631e-05,
"loss": 0.0132,
"step": 632
},
{
"epoch": 4.083870967741936,
"grad_norm": 0.08086412400007248,
"learning_rate": 9.878687936035103e-05,
"loss": 0.0128,
"step": 633
},
{
"epoch": 4.090322580645161,
"grad_norm": 0.07018841058015823,
"learning_rate": 9.877699677678067e-05,
"loss": 0.0126,
"step": 634
},
{
"epoch": 4.096774193548387,
"grad_norm": 0.08451451361179352,
"learning_rate": 9.876707460089692e-05,
"loss": 0.0149,
"step": 635
},
{
"epoch": 4.103225806451613,
"grad_norm": 0.08706196397542953,
"learning_rate": 9.875711284075364e-05,
"loss": 0.0155,
"step": 636
},
{
"epoch": 4.109677419354838,
"grad_norm": 0.0794413611292839,
"learning_rate": 9.874711150443678e-05,
"loss": 0.0144,
"step": 637
},
{
"epoch": 4.116129032258065,
"grad_norm": 0.0688992291688919,
"learning_rate": 9.873707060006442e-05,
"loss": 0.0127,
"step": 638
},
{
"epoch": 4.122580645161291,
"grad_norm": 0.0626332089304924,
"learning_rate": 9.872699013578679e-05,
"loss": 0.012,
"step": 639
},
{
"epoch": 4.129032258064516,
"grad_norm": 0.07945838570594788,
"learning_rate": 9.871687011978618e-05,
"loss": 0.0168,
"step": 640
},
{
"epoch": 4.135483870967742,
"grad_norm": 0.07975637167692184,
"learning_rate": 9.870671056027705e-05,
"loss": 0.0129,
"step": 641
},
{
"epoch": 4.141935483870967,
"grad_norm": 0.0599512979388237,
"learning_rate": 9.86965114655059e-05,
"loss": 0.0121,
"step": 642
},
{
"epoch": 4.148387096774194,
"grad_norm": 0.08625789731740952,
"learning_rate": 9.868627284375132e-05,
"loss": 0.0158,
"step": 643
},
{
"epoch": 4.15483870967742,
"grad_norm": 0.06459569931030273,
"learning_rate": 9.867599470332402e-05,
"loss": 0.0136,
"step": 644
},
{
"epoch": 4.161290322580645,
"grad_norm": 0.06582548469305038,
"learning_rate": 9.866567705256678e-05,
"loss": 0.0131,
"step": 645
},
{
"epoch": 4.167741935483871,
"grad_norm": 0.07506062835454941,
"learning_rate": 9.865531989985443e-05,
"loss": 0.0118,
"step": 646
},
{
"epoch": 4.174193548387096,
"grad_norm": 0.06777074187994003,
"learning_rate": 9.86449232535939e-05,
"loss": 0.0124,
"step": 647
},
{
"epoch": 4.180645161290323,
"grad_norm": 0.06837598979473114,
"learning_rate": 9.863448712222411e-05,
"loss": 0.0122,
"step": 648
},
{
"epoch": 4.187096774193549,
"grad_norm": 0.07759612798690796,
"learning_rate": 9.862401151421612e-05,
"loss": 0.0152,
"step": 649
},
{
"epoch": 4.193548387096774,
"grad_norm": 0.08142206817865372,
"learning_rate": 9.861349643807295e-05,
"loss": 0.0146,
"step": 650
},
{
"epoch": 4.193548387096774,
"eval_loss": 0.027878066524863243,
"eval_runtime": 20.628,
"eval_samples_per_second": 4.848,
"eval_steps_per_second": 0.097,
"step": 650
},
{
"epoch": 4.2,
"grad_norm": 0.10191861540079117,
"learning_rate": 9.860294190232972e-05,
"loss": 0.0168,
"step": 651
},
{
"epoch": 4.2064516129032254,
"grad_norm": 0.07277819514274597,
"learning_rate": 9.859234791555355e-05,
"loss": 0.0106,
"step": 652
},
{
"epoch": 4.212903225806452,
"grad_norm": 0.06661704927682877,
"learning_rate": 9.858171448634357e-05,
"loss": 0.0116,
"step": 653
},
{
"epoch": 4.219354838709678,
"grad_norm": 0.07337664812803268,
"learning_rate": 9.857104162333092e-05,
"loss": 0.0158,
"step": 654
},
{
"epoch": 4.225806451612903,
"grad_norm": 0.060086801648139954,
"learning_rate": 9.856032933517883e-05,
"loss": 0.0113,
"step": 655
},
{
"epoch": 4.232258064516129,
"grad_norm": 0.0661039799451828,
"learning_rate": 9.854957763058241e-05,
"loss": 0.016,
"step": 656
},
{
"epoch": 4.2387096774193544,
"grad_norm": 0.06683111935853958,
"learning_rate": 9.853878651826886e-05,
"loss": 0.015,
"step": 657
},
{
"epoch": 4.245161290322581,
"grad_norm": 0.0577155277132988,
"learning_rate": 9.852795600699731e-05,
"loss": 0.0102,
"step": 658
},
{
"epoch": 4.251612903225807,
"grad_norm": 0.06298208236694336,
"learning_rate": 9.85170861055589e-05,
"loss": 0.0116,
"step": 659
},
{
"epoch": 4.258064516129032,
"grad_norm": 0.06872560828924179,
"learning_rate": 9.850617682277674e-05,
"loss": 0.0144,
"step": 660
},
{
"epoch": 4.264516129032258,
"grad_norm": 0.07436688244342804,
"learning_rate": 9.84952281675059e-05,
"loss": 0.0138,
"step": 661
},
{
"epoch": 4.270967741935484,
"grad_norm": 0.07236282527446747,
"learning_rate": 9.848424014863337e-05,
"loss": 0.0137,
"step": 662
},
{
"epoch": 4.27741935483871,
"grad_norm": 0.06464178115129471,
"learning_rate": 9.84732127750782e-05,
"loss": 0.0129,
"step": 663
},
{
"epoch": 4.283870967741936,
"grad_norm": 0.07325076311826706,
"learning_rate": 9.846214605579127e-05,
"loss": 0.0144,
"step": 664
},
{
"epoch": 4.290322580645161,
"grad_norm": 0.06621360778808594,
"learning_rate": 9.845103999975547e-05,
"loss": 0.0136,
"step": 665
},
{
"epoch": 4.296774193548387,
"grad_norm": 0.06366943567991257,
"learning_rate": 9.843989461598555e-05,
"loss": 0.0124,
"step": 666
},
{
"epoch": 4.3032258064516125,
"grad_norm": 0.05970180034637451,
"learning_rate": 9.842870991352825e-05,
"loss": 0.0113,
"step": 667
},
{
"epoch": 4.309677419354839,
"grad_norm": 0.07171665132045746,
"learning_rate": 9.84174859014622e-05,
"loss": 0.0142,
"step": 668
},
{
"epoch": 4.316129032258065,
"grad_norm": 0.07346490025520325,
"learning_rate": 9.840622258889794e-05,
"loss": 0.0152,
"step": 669
},
{
"epoch": 4.32258064516129,
"grad_norm": 0.06221551075577736,
"learning_rate": 9.839491998497787e-05,
"loss": 0.0116,
"step": 670
},
{
"epoch": 4.329032258064516,
"grad_norm": 0.07008364796638489,
"learning_rate": 9.838357809887637e-05,
"loss": 0.0135,
"step": 671
},
{
"epoch": 4.335483870967742,
"grad_norm": 0.07009419053792953,
"learning_rate": 9.837219693979963e-05,
"loss": 0.0135,
"step": 672
},
{
"epoch": 4.341935483870968,
"grad_norm": 0.06749093532562256,
"learning_rate": 9.836077651698574e-05,
"loss": 0.0098,
"step": 673
},
{
"epoch": 4.348387096774194,
"grad_norm": 0.057939253747463226,
"learning_rate": 9.834931683970468e-05,
"loss": 0.0105,
"step": 674
},
{
"epoch": 4.354838709677419,
"grad_norm": 0.08264298737049103,
"learning_rate": 9.833781791725825e-05,
"loss": 0.0126,
"step": 675
},
{
"epoch": 4.354838709677419,
"eval_loss": 0.028300685808062553,
"eval_runtime": 20.6434,
"eval_samples_per_second": 4.844,
"eval_steps_per_second": 0.097,
"step": 675
},
{
"epoch": 4.361290322580645,
"grad_norm": 0.0651557594537735,
"learning_rate": 9.832627975898015e-05,
"loss": 0.0143,
"step": 676
},
{
"epoch": 4.367741935483871,
"grad_norm": 0.07030880451202393,
"learning_rate": 9.831470237423592e-05,
"loss": 0.0143,
"step": 677
},
{
"epoch": 4.374193548387097,
"grad_norm": 0.06443504244089127,
"learning_rate": 9.83030857724229e-05,
"loss": 0.0128,
"step": 678
},
{
"epoch": 4.380645161290323,
"grad_norm": 0.08351744711399078,
"learning_rate": 9.82914299629703e-05,
"loss": 0.0133,
"step": 679
},
{
"epoch": 4.387096774193548,
"grad_norm": 0.09203293919563293,
"learning_rate": 9.827973495533915e-05,
"loss": 0.0196,
"step": 680
},
{
"epoch": 4.393548387096774,
"grad_norm": 0.06061858683824539,
"learning_rate": 9.826800075902232e-05,
"loss": 0.0117,
"step": 681
},
{
"epoch": 4.4,
"grad_norm": 0.0718703344464302,
"learning_rate": 9.825622738354441e-05,
"loss": 0.016,
"step": 682
},
{
"epoch": 4.406451612903226,
"grad_norm": 0.07132145017385483,
"learning_rate": 9.824441483846191e-05,
"loss": 0.0149,
"step": 683
},
{
"epoch": 4.412903225806452,
"grad_norm": 0.05910409241914749,
"learning_rate": 9.823256313336308e-05,
"loss": 0.0125,
"step": 684
},
{
"epoch": 4.419354838709677,
"grad_norm": 0.06985270231962204,
"learning_rate": 9.822067227786793e-05,
"loss": 0.0155,
"step": 685
},
{
"epoch": 4.425806451612903,
"grad_norm": 0.0629580095410347,
"learning_rate": 9.820874228162827e-05,
"loss": 0.0119,
"step": 686
},
{
"epoch": 4.432258064516129,
"grad_norm": 0.07799185812473297,
"learning_rate": 9.81967731543277e-05,
"loss": 0.0143,
"step": 687
},
{
"epoch": 4.438709677419355,
"grad_norm": 0.08001742511987686,
"learning_rate": 9.818476490568157e-05,
"loss": 0.0119,
"step": 688
},
{
"epoch": 4.445161290322581,
"grad_norm": 0.058317631483078,
"learning_rate": 9.817271754543698e-05,
"loss": 0.0114,
"step": 689
},
{
"epoch": 4.451612903225806,
"grad_norm": 0.08109927922487259,
"learning_rate": 9.816063108337274e-05,
"loss": 0.013,
"step": 690
},
{
"epoch": 4.458064516129032,
"grad_norm": 0.07449546456336975,
"learning_rate": 9.81485055292995e-05,
"loss": 0.0129,
"step": 691
},
{
"epoch": 4.464516129032258,
"grad_norm": 0.07314932346343994,
"learning_rate": 9.813634089305955e-05,
"loss": 0.0128,
"step": 692
},
{
"epoch": 4.470967741935484,
"grad_norm": 0.07322119176387787,
"learning_rate": 9.81241371845269e-05,
"loss": 0.0141,
"step": 693
},
{
"epoch": 4.47741935483871,
"grad_norm": 0.07292065024375916,
"learning_rate": 9.811189441360737e-05,
"loss": 0.0131,
"step": 694
},
{
"epoch": 4.483870967741936,
"grad_norm": 0.07420699298381805,
"learning_rate": 9.809961259023838e-05,
"loss": 0.0132,
"step": 695
},
{
"epoch": 4.490322580645161,
"grad_norm": 0.07527197152376175,
"learning_rate": 9.808729172438908e-05,
"loss": 0.0114,
"step": 696
},
{
"epoch": 4.496774193548387,
"grad_norm": 0.07561551779508591,
"learning_rate": 9.807493182606037e-05,
"loss": 0.014,
"step": 697
},
{
"epoch": 4.503225806451613,
"grad_norm": 0.0866156667470932,
"learning_rate": 9.806253290528476e-05,
"loss": 0.0153,
"step": 698
},
{
"epoch": 4.509677419354839,
"grad_norm": 0.07429295778274536,
"learning_rate": 9.805009497212645e-05,
"loss": 0.0154,
"step": 699
},
{
"epoch": 4.516129032258064,
"grad_norm": 0.06641758978366852,
"learning_rate": 9.803761803668135e-05,
"loss": 0.0141,
"step": 700
},
{
"epoch": 4.516129032258064,
"eval_loss": 0.026979686692357063,
"eval_runtime": 20.638,
"eval_samples_per_second": 4.845,
"eval_steps_per_second": 0.097,
"step": 700
},
{
"epoch": 4.52258064516129,
"grad_norm": 0.0701785758137703,
"learning_rate": 9.802510210907694e-05,
"loss": 0.0155,
"step": 701
},
{
"epoch": 4.529032258064516,
"grad_norm": 0.06845016777515411,
"learning_rate": 9.801254719947246e-05,
"loss": 0.0134,
"step": 702
},
{
"epoch": 4.535483870967742,
"grad_norm": 0.05970216915011406,
"learning_rate": 9.799995331805871e-05,
"loss": 0.0129,
"step": 703
},
{
"epoch": 4.541935483870968,
"grad_norm": 0.0771360918879509,
"learning_rate": 9.798732047505815e-05,
"loss": 0.0234,
"step": 704
},
{
"epoch": 4.548387096774194,
"grad_norm": 0.06995438039302826,
"learning_rate": 9.797464868072488e-05,
"loss": 0.0149,
"step": 705
},
{
"epoch": 4.554838709677419,
"grad_norm": 0.055232495069503784,
"learning_rate": 9.796193794534458e-05,
"loss": 0.0117,
"step": 706
},
{
"epoch": 4.561290322580645,
"grad_norm": 0.06754062324762344,
"learning_rate": 9.794918827923458e-05,
"loss": 0.0113,
"step": 707
},
{
"epoch": 4.567741935483871,
"grad_norm": 0.06377311795949936,
"learning_rate": 9.793639969274378e-05,
"loss": 0.012,
"step": 708
},
{
"epoch": 4.574193548387097,
"grad_norm": 0.07187410444021225,
"learning_rate": 9.792357219625268e-05,
"loss": 0.0134,
"step": 709
},
{
"epoch": 4.580645161290323,
"grad_norm": 0.06659362465143204,
"learning_rate": 9.791070580017337e-05,
"loss": 0.0125,
"step": 710
},
{
"epoch": 4.587096774193548,
"grad_norm": 0.07713113725185394,
"learning_rate": 9.789780051494952e-05,
"loss": 0.0127,
"step": 711
},
{
"epoch": 4.593548387096774,
"grad_norm": 0.07113997638225555,
"learning_rate": 9.788485635105635e-05,
"loss": 0.0143,
"step": 712
},
{
"epoch": 4.6,
"grad_norm": 0.06326919794082642,
"learning_rate": 9.787187331900062e-05,
"loss": 0.0101,
"step": 713
},
{
"epoch": 4.606451612903226,
"grad_norm": 0.054118432104587555,
"learning_rate": 9.785885142932072e-05,
"loss": 0.0109,
"step": 714
},
{
"epoch": 4.612903225806452,
"grad_norm": 0.0691436380147934,
"learning_rate": 9.784579069258646e-05,
"loss": 0.0132,
"step": 715
},
{
"epoch": 4.619354838709677,
"grad_norm": 0.06021921709179878,
"learning_rate": 9.78326911193993e-05,
"loss": 0.0114,
"step": 716
},
{
"epoch": 4.625806451612903,
"grad_norm": 0.06648483127355576,
"learning_rate": 9.781955272039213e-05,
"loss": 0.0126,
"step": 717
},
{
"epoch": 4.632258064516129,
"grad_norm": 0.0594315268099308,
"learning_rate": 9.780637550622941e-05,
"loss": 0.0123,
"step": 718
},
{
"epoch": 4.638709677419355,
"grad_norm": 0.05935937538743019,
"learning_rate": 9.77931594876071e-05,
"loss": 0.0099,
"step": 719
},
{
"epoch": 4.645161290322581,
"grad_norm": 0.07264725118875504,
"learning_rate": 9.777990467525265e-05,
"loss": 0.0133,
"step": 720
},
{
"epoch": 4.651612903225806,
"grad_norm": 0.0625627338886261,
"learning_rate": 9.7766611079925e-05,
"loss": 0.0137,
"step": 721
},
{
"epoch": 4.658064516129032,
"grad_norm": 0.07260245829820633,
"learning_rate": 9.775327871241453e-05,
"loss": 0.0119,
"step": 722
},
{
"epoch": 4.664516129032258,
"grad_norm": 0.06769385188817978,
"learning_rate": 9.773990758354317e-05,
"loss": 0.011,
"step": 723
},
{
"epoch": 4.670967741935484,
"grad_norm": 0.07323268800973892,
"learning_rate": 9.772649770416428e-05,
"loss": 0.0104,
"step": 724
},
{
"epoch": 4.67741935483871,
"grad_norm": 0.07127388566732407,
"learning_rate": 9.771304908516263e-05,
"loss": 0.0133,
"step": 725
},
{
"epoch": 4.67741935483871,
"eval_loss": 0.02536088228225708,
"eval_runtime": 20.6309,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 725
},
{
"epoch": 4.683870967741935,
"grad_norm": 0.07082170993089676,
"learning_rate": 9.769956173745449e-05,
"loss": 0.0098,
"step": 726
},
{
"epoch": 4.690322580645161,
"grad_norm": 0.06554052233695984,
"learning_rate": 9.768603567198755e-05,
"loss": 0.0122,
"step": 727
},
{
"epoch": 4.6967741935483875,
"grad_norm": 0.06847799569368362,
"learning_rate": 9.767247089974091e-05,
"loss": 0.0111,
"step": 728
},
{
"epoch": 4.703225806451613,
"grad_norm": 0.06368619948625565,
"learning_rate": 9.765886743172511e-05,
"loss": 0.0112,
"step": 729
},
{
"epoch": 4.709677419354839,
"grad_norm": 0.07584304362535477,
"learning_rate": 9.76452252789821e-05,
"loss": 0.0132,
"step": 730
},
{
"epoch": 4.716129032258064,
"grad_norm": 0.0686490386724472,
"learning_rate": 9.763154445258522e-05,
"loss": 0.0145,
"step": 731
},
{
"epoch": 4.72258064516129,
"grad_norm": 0.07918071746826172,
"learning_rate": 9.76178249636392e-05,
"loss": 0.0116,
"step": 732
},
{
"epoch": 4.729032258064516,
"grad_norm": 0.07499564439058304,
"learning_rate": 9.760406682328015e-05,
"loss": 0.0112,
"step": 733
},
{
"epoch": 4.735483870967742,
"grad_norm": 0.07125238329172134,
"learning_rate": 9.759027004267558e-05,
"loss": 0.0129,
"step": 734
},
{
"epoch": 4.741935483870968,
"grad_norm": 0.0714704766869545,
"learning_rate": 9.757643463302433e-05,
"loss": 0.013,
"step": 735
},
{
"epoch": 4.748387096774193,
"grad_norm": 0.057601042091846466,
"learning_rate": 9.756256060555659e-05,
"loss": 0.0105,
"step": 736
},
{
"epoch": 4.754838709677419,
"grad_norm": 0.07206641882658005,
"learning_rate": 9.754864797153394e-05,
"loss": 0.0112,
"step": 737
},
{
"epoch": 4.7612903225806456,
"grad_norm": 0.07657714933156967,
"learning_rate": 9.753469674224928e-05,
"loss": 0.0173,
"step": 738
},
{
"epoch": 4.767741935483871,
"grad_norm": 0.07161399722099304,
"learning_rate": 9.752070692902679e-05,
"loss": 0.0128,
"step": 739
},
{
"epoch": 4.774193548387097,
"grad_norm": 0.059535328298807144,
"learning_rate": 9.750667854322207e-05,
"loss": 0.012,
"step": 740
},
{
"epoch": 4.780645161290322,
"grad_norm": 0.05769206956028938,
"learning_rate": 9.749261159622192e-05,
"loss": 0.0108,
"step": 741
},
{
"epoch": 4.787096774193548,
"grad_norm": 0.0739009827375412,
"learning_rate": 9.747850609944449e-05,
"loss": 0.0124,
"step": 742
},
{
"epoch": 4.7935483870967746,
"grad_norm": 0.059828855097293854,
"learning_rate": 9.746436206433923e-05,
"loss": 0.01,
"step": 743
},
{
"epoch": 4.8,
"grad_norm": 0.05995893478393555,
"learning_rate": 9.745017950238687e-05,
"loss": 0.0091,
"step": 744
},
{
"epoch": 4.806451612903226,
"grad_norm": 0.08231929689645767,
"learning_rate": 9.74359584250994e-05,
"loss": 0.0148,
"step": 745
},
{
"epoch": 4.812903225806451,
"grad_norm": 0.06564637273550034,
"learning_rate": 9.742169884402006e-05,
"loss": 0.011,
"step": 746
},
{
"epoch": 4.819354838709677,
"grad_norm": 0.06673679500818253,
"learning_rate": 9.740740077072337e-05,
"loss": 0.0119,
"step": 747
},
{
"epoch": 4.825806451612904,
"grad_norm": 0.07423175126314163,
"learning_rate": 9.739306421681506e-05,
"loss": 0.0114,
"step": 748
},
{
"epoch": 4.832258064516129,
"grad_norm": 0.06966380029916763,
"learning_rate": 9.737868919393218e-05,
"loss": 0.0124,
"step": 749
},
{
"epoch": 4.838709677419355,
"grad_norm": 0.07534418255090714,
"learning_rate": 9.736427571374288e-05,
"loss": 0.0098,
"step": 750
},
{
"epoch": 4.838709677419355,
"eval_loss": 0.025038301944732666,
"eval_runtime": 20.6375,
"eval_samples_per_second": 4.846,
"eval_steps_per_second": 0.097,
"step": 750
},
{
"epoch": 4.845161290322581,
"grad_norm": 0.06416190415620804,
"learning_rate": 9.734982378794661e-05,
"loss": 0.0092,
"step": 751
},
{
"epoch": 4.851612903225806,
"grad_norm": 0.0716695487499237,
"learning_rate": 9.733533342827403e-05,
"loss": 0.0118,
"step": 752
},
{
"epoch": 4.858064516129033,
"grad_norm": 0.058158546686172485,
"learning_rate": 9.732080464648694e-05,
"loss": 0.0107,
"step": 753
},
{
"epoch": 4.864516129032258,
"grad_norm": 0.07941026240587234,
"learning_rate": 9.730623745437836e-05,
"loss": 0.0106,
"step": 754
},
{
"epoch": 4.870967741935484,
"grad_norm": 0.06183311715722084,
"learning_rate": 9.72916318637725e-05,
"loss": 0.0101,
"step": 755
},
{
"epoch": 4.877419354838709,
"grad_norm": 0.07516051828861237,
"learning_rate": 9.727698788652474e-05,
"loss": 0.013,
"step": 756
},
{
"epoch": 4.883870967741935,
"grad_norm": 0.06480251252651215,
"learning_rate": 9.726230553452158e-05,
"loss": 0.0109,
"step": 757
},
{
"epoch": 4.890322580645162,
"grad_norm": 0.08192919194698334,
"learning_rate": 9.72475848196807e-05,
"loss": 0.0128,
"step": 758
},
{
"epoch": 4.896774193548387,
"grad_norm": 0.07532630115747452,
"learning_rate": 9.72328257539509e-05,
"loss": 0.0143,
"step": 759
},
{
"epoch": 4.903225806451613,
"grad_norm": 0.06316731870174408,
"learning_rate": 9.721802834931215e-05,
"loss": 0.0116,
"step": 760
},
{
"epoch": 4.909677419354839,
"grad_norm": 0.0601482093334198,
"learning_rate": 9.720319261777549e-05,
"loss": 0.0113,
"step": 761
},
{
"epoch": 4.916129032258064,
"grad_norm": 0.05570144206285477,
"learning_rate": 9.718831857138309e-05,
"loss": 0.0124,
"step": 762
},
{
"epoch": 4.922580645161291,
"grad_norm": 0.06473618745803833,
"learning_rate": 9.717340622220823e-05,
"loss": 0.0127,
"step": 763
},
{
"epoch": 4.929032258064516,
"grad_norm": 0.05129361152648926,
"learning_rate": 9.715845558235525e-05,
"loss": 0.0097,
"step": 764
},
{
"epoch": 4.935483870967742,
"grad_norm": 0.05756600573658943,
"learning_rate": 9.714346666395963e-05,
"loss": 0.0114,
"step": 765
},
{
"epoch": 4.941935483870967,
"grad_norm": 0.057772960513830185,
"learning_rate": 9.712843947918786e-05,
"loss": 0.0097,
"step": 766
},
{
"epoch": 4.948387096774193,
"grad_norm": 0.06774067878723145,
"learning_rate": 9.711337404023752e-05,
"loss": 0.011,
"step": 767
},
{
"epoch": 4.95483870967742,
"grad_norm": 0.06922607868909836,
"learning_rate": 9.709827035933722e-05,
"loss": 0.0112,
"step": 768
},
{
"epoch": 4.961290322580645,
"grad_norm": 0.06101693958044052,
"learning_rate": 9.708312844874664e-05,
"loss": 0.0108,
"step": 769
},
{
"epoch": 4.967741935483871,
"grad_norm": 0.07150861620903015,
"learning_rate": 9.706794832075647e-05,
"loss": 0.0144,
"step": 770
},
{
"epoch": 4.974193548387097,
"grad_norm": 0.07714319974184036,
"learning_rate": 9.705272998768844e-05,
"loss": 0.0138,
"step": 771
},
{
"epoch": 4.980645161290322,
"grad_norm": 0.06570860743522644,
"learning_rate": 9.703747346189527e-05,
"loss": 0.0113,
"step": 772
},
{
"epoch": 4.987096774193549,
"grad_norm": 0.06901867687702179,
"learning_rate": 9.702217875576068e-05,
"loss": 0.0119,
"step": 773
},
{
"epoch": 4.993548387096774,
"grad_norm": 0.06524281948804855,
"learning_rate": 9.700684588169939e-05,
"loss": 0.0112,
"step": 774
},
{
"epoch": 5.0,
"grad_norm": 0.07196003198623657,
"learning_rate": 9.699147485215713e-05,
"loss": 0.0093,
"step": 775
},
{
"epoch": 5.0,
"eval_loss": 0.023430930450558662,
"eval_runtime": 20.6296,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 775
},
{
"epoch": 5.006451612903226,
"grad_norm": 0.05366189032793045,
"learning_rate": 9.697606567961056e-05,
"loss": 0.009,
"step": 776
},
{
"epoch": 5.012903225806451,
"grad_norm": 0.06412062793970108,
"learning_rate": 9.696061837656727e-05,
"loss": 0.0091,
"step": 777
},
{
"epoch": 5.019354838709678,
"grad_norm": 0.06010841950774193,
"learning_rate": 9.694513295556591e-05,
"loss": 0.0103,
"step": 778
},
{
"epoch": 5.025806451612903,
"grad_norm": 0.057299621403217316,
"learning_rate": 9.692960942917597e-05,
"loss": 0.0083,
"step": 779
},
{
"epoch": 5.032258064516129,
"grad_norm": 0.06616218388080597,
"learning_rate": 9.69140478099979e-05,
"loss": 0.0096,
"step": 780
},
{
"epoch": 5.038709677419355,
"grad_norm": 0.06639255583286285,
"learning_rate": 9.689844811066311e-05,
"loss": 0.0081,
"step": 781
},
{
"epoch": 5.04516129032258,
"grad_norm": 0.05673486739397049,
"learning_rate": 9.688281034383384e-05,
"loss": 0.0069,
"step": 782
},
{
"epoch": 5.051612903225807,
"grad_norm": 0.0635608583688736,
"learning_rate": 9.68671345222033e-05,
"loss": 0.0078,
"step": 783
},
{
"epoch": 5.058064516129032,
"grad_norm": 0.07250364869832993,
"learning_rate": 9.685142065849556e-05,
"loss": 0.0084,
"step": 784
},
{
"epoch": 5.064516129032258,
"grad_norm": 0.0699780210852623,
"learning_rate": 9.683566876546555e-05,
"loss": 0.0065,
"step": 785
},
{
"epoch": 5.070967741935484,
"grad_norm": 0.07433956116437912,
"learning_rate": 9.68198788558991e-05,
"loss": 0.0084,
"step": 786
},
{
"epoch": 5.077419354838709,
"grad_norm": 0.06806059181690216,
"learning_rate": 9.680405094261291e-05,
"loss": 0.0074,
"step": 787
},
{
"epoch": 5.083870967741936,
"grad_norm": 0.058462999761104584,
"learning_rate": 9.678818503845448e-05,
"loss": 0.0082,
"step": 788
},
{
"epoch": 5.090322580645161,
"grad_norm": 0.05572250112891197,
"learning_rate": 9.677228115630218e-05,
"loss": 0.0065,
"step": 789
},
{
"epoch": 5.096774193548387,
"grad_norm": 0.07094363868236542,
"learning_rate": 9.67563393090652e-05,
"loss": 0.0072,
"step": 790
},
{
"epoch": 5.103225806451613,
"grad_norm": 0.05128363147377968,
"learning_rate": 9.674035950968354e-05,
"loss": 0.0068,
"step": 791
},
{
"epoch": 5.109677419354838,
"grad_norm": 0.06875258684158325,
"learning_rate": 9.672434177112803e-05,
"loss": 0.0078,
"step": 792
},
{
"epoch": 5.116129032258065,
"grad_norm": 0.05699928104877472,
"learning_rate": 9.670828610640026e-05,
"loss": 0.0085,
"step": 793
},
{
"epoch": 5.122580645161291,
"grad_norm": 0.06592380255460739,
"learning_rate": 9.669219252853261e-05,
"loss": 0.0078,
"step": 794
},
{
"epoch": 5.129032258064516,
"grad_norm": 0.07786811143159866,
"learning_rate": 9.667606105058828e-05,
"loss": 0.0092,
"step": 795
},
{
"epoch": 5.135483870967742,
"grad_norm": 0.05944572389125824,
"learning_rate": 9.665989168566116e-05,
"loss": 0.0065,
"step": 796
},
{
"epoch": 5.141935483870967,
"grad_norm": 0.06329312175512314,
"learning_rate": 9.664368444687597e-05,
"loss": 0.0087,
"step": 797
},
{
"epoch": 5.148387096774194,
"grad_norm": 0.073185496032238,
"learning_rate": 9.662743934738811e-05,
"loss": 0.0081,
"step": 798
},
{
"epoch": 5.15483870967742,
"grad_norm": 0.08725328743457794,
"learning_rate": 9.661115640038373e-05,
"loss": 0.008,
"step": 799
},
{
"epoch": 5.161290322580645,
"grad_norm": 0.059760626405477524,
"learning_rate": 9.659483561907974e-05,
"loss": 0.0073,
"step": 800
},
{
"epoch": 5.161290322580645,
"eval_loss": 0.02465805411338806,
"eval_runtime": 20.6536,
"eval_samples_per_second": 4.842,
"eval_steps_per_second": 0.097,
"step": 800
},
{
"epoch": 5.167741935483871,
"grad_norm": 0.06096651777625084,
"learning_rate": 9.657847701672368e-05,
"loss": 0.0071,
"step": 801
},
{
"epoch": 5.174193548387096,
"grad_norm": 0.05319224298000336,
"learning_rate": 9.656208060659387e-05,
"loss": 0.0065,
"step": 802
},
{
"epoch": 5.180645161290323,
"grad_norm": 0.052184510976076126,
"learning_rate": 9.654564640199927e-05,
"loss": 0.0069,
"step": 803
},
{
"epoch": 5.187096774193549,
"grad_norm": 0.06279890239238739,
"learning_rate": 9.652917441627954e-05,
"loss": 0.0104,
"step": 804
},
{
"epoch": 5.193548387096774,
"grad_norm": 0.07697561383247375,
"learning_rate": 9.651266466280499e-05,
"loss": 0.0091,
"step": 805
},
{
"epoch": 5.2,
"grad_norm": 0.07125485688447952,
"learning_rate": 9.649611715497662e-05,
"loss": 0.008,
"step": 806
},
{
"epoch": 5.2064516129032254,
"grad_norm": 0.051803454756736755,
"learning_rate": 9.647953190622602e-05,
"loss": 0.0062,
"step": 807
},
{
"epoch": 5.212903225806452,
"grad_norm": 0.05186665058135986,
"learning_rate": 9.646290893001547e-05,
"loss": 0.0066,
"step": 808
},
{
"epoch": 5.219354838709678,
"grad_norm": 0.056108903139829636,
"learning_rate": 9.644624823983782e-05,
"loss": 0.0075,
"step": 809
},
{
"epoch": 5.225806451612903,
"grad_norm": 0.07074058055877686,
"learning_rate": 9.642954984921657e-05,
"loss": 0.0103,
"step": 810
},
{
"epoch": 5.232258064516129,
"grad_norm": 0.05524521321058273,
"learning_rate": 9.641281377170581e-05,
"loss": 0.007,
"step": 811
},
{
"epoch": 5.2387096774193544,
"grad_norm": 0.05544526129961014,
"learning_rate": 9.639604002089025e-05,
"loss": 0.0084,
"step": 812
},
{
"epoch": 5.245161290322581,
"grad_norm": 0.06004936620593071,
"learning_rate": 9.637922861038513e-05,
"loss": 0.0069,
"step": 813
},
{
"epoch": 5.251612903225807,
"grad_norm": 0.06896119564771652,
"learning_rate": 9.636237955383629e-05,
"loss": 0.0137,
"step": 814
},
{
"epoch": 5.258064516129032,
"grad_norm": 0.07235223799943924,
"learning_rate": 9.63454928649201e-05,
"loss": 0.0106,
"step": 815
},
{
"epoch": 5.264516129032258,
"grad_norm": 0.06411406397819519,
"learning_rate": 9.632856855734352e-05,
"loss": 0.0083,
"step": 816
},
{
"epoch": 5.270967741935484,
"grad_norm": 0.057488951832056046,
"learning_rate": 9.631160664484398e-05,
"loss": 0.0079,
"step": 817
},
{
"epoch": 5.27741935483871,
"grad_norm": 0.04805266857147217,
"learning_rate": 9.629460714118952e-05,
"loss": 0.0061,
"step": 818
},
{
"epoch": 5.283870967741936,
"grad_norm": 0.06258268654346466,
"learning_rate": 9.627757006017861e-05,
"loss": 0.0092,
"step": 819
},
{
"epoch": 5.290322580645161,
"grad_norm": 0.051763035356998444,
"learning_rate": 9.626049541564028e-05,
"loss": 0.0067,
"step": 820
},
{
"epoch": 5.296774193548387,
"grad_norm": 0.06160223111510277,
"learning_rate": 9.624338322143403e-05,
"loss": 0.0091,
"step": 821
},
{
"epoch": 5.3032258064516125,
"grad_norm": 0.06290455907583237,
"learning_rate": 9.622623349144982e-05,
"loss": 0.008,
"step": 822
},
{
"epoch": 5.309677419354839,
"grad_norm": 0.059179775416851044,
"learning_rate": 9.620904623960808e-05,
"loss": 0.0081,
"step": 823
},
{
"epoch": 5.316129032258065,
"grad_norm": 0.05398336052894592,
"learning_rate": 9.619182147985975e-05,
"loss": 0.0072,
"step": 824
},
{
"epoch": 5.32258064516129,
"grad_norm": 0.06514342129230499,
"learning_rate": 9.617455922618614e-05,
"loss": 0.0087,
"step": 825
},
{
"epoch": 5.32258064516129,
"eval_loss": 0.02535240724682808,
"eval_runtime": 20.6322,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 825
},
{
"epoch": 5.329032258064516,
"grad_norm": 0.0695829913020134,
"learning_rate": 9.615725949259906e-05,
"loss": 0.0089,
"step": 826
},
{
"epoch": 5.335483870967742,
"grad_norm": 0.06091795116662979,
"learning_rate": 9.61399222931407e-05,
"loss": 0.0076,
"step": 827
},
{
"epoch": 5.341935483870968,
"grad_norm": 0.055582962930202484,
"learning_rate": 9.612254764188369e-05,
"loss": 0.0079,
"step": 828
},
{
"epoch": 5.348387096774194,
"grad_norm": 0.07405589520931244,
"learning_rate": 9.610513555293101e-05,
"loss": 0.0105,
"step": 829
},
{
"epoch": 5.354838709677419,
"grad_norm": 0.050850674510002136,
"learning_rate": 9.608768604041609e-05,
"loss": 0.0066,
"step": 830
},
{
"epoch": 5.361290322580645,
"grad_norm": 0.05596979707479477,
"learning_rate": 9.607019911850269e-05,
"loss": 0.0083,
"step": 831
},
{
"epoch": 5.367741935483871,
"grad_norm": 0.06340134888887405,
"learning_rate": 9.605267480138496e-05,
"loss": 0.0109,
"step": 832
},
{
"epoch": 5.374193548387097,
"grad_norm": 0.053815796971321106,
"learning_rate": 9.60351131032874e-05,
"loss": 0.0074,
"step": 833
},
{
"epoch": 5.380645161290323,
"grad_norm": 0.07047566026449203,
"learning_rate": 9.601751403846486e-05,
"loss": 0.007,
"step": 834
},
{
"epoch": 5.387096774193548,
"grad_norm": 0.06638946384191513,
"learning_rate": 9.599987762120248e-05,
"loss": 0.0095,
"step": 835
},
{
"epoch": 5.393548387096774,
"grad_norm": 0.07727184891700745,
"learning_rate": 9.598220386581577e-05,
"loss": 0.0098,
"step": 836
},
{
"epoch": 5.4,
"grad_norm": 0.07882851362228394,
"learning_rate": 9.596449278665053e-05,
"loss": 0.0081,
"step": 837
},
{
"epoch": 5.406451612903226,
"grad_norm": 0.055430445820093155,
"learning_rate": 9.594674439808284e-05,
"loss": 0.0074,
"step": 838
},
{
"epoch": 5.412903225806452,
"grad_norm": 0.07451054453849792,
"learning_rate": 9.592895871451908e-05,
"loss": 0.0071,
"step": 839
},
{
"epoch": 5.419354838709677,
"grad_norm": 0.054919954389333725,
"learning_rate": 9.591113575039589e-05,
"loss": 0.008,
"step": 840
},
{
"epoch": 5.425806451612903,
"grad_norm": 0.0726809874176979,
"learning_rate": 9.589327552018021e-05,
"loss": 0.0079,
"step": 841
},
{
"epoch": 5.432258064516129,
"grad_norm": 0.06910938024520874,
"learning_rate": 9.587537803836918e-05,
"loss": 0.007,
"step": 842
},
{
"epoch": 5.438709677419355,
"grad_norm": 0.052713364362716675,
"learning_rate": 9.585744331949018e-05,
"loss": 0.0067,
"step": 843
},
{
"epoch": 5.445161290322581,
"grad_norm": 0.06306347250938416,
"learning_rate": 9.583947137810085e-05,
"loss": 0.0085,
"step": 844
},
{
"epoch": 5.451612903225806,
"grad_norm": 0.06878506392240524,
"learning_rate": 9.5821462228789e-05,
"loss": 0.0091,
"step": 845
},
{
"epoch": 5.458064516129032,
"grad_norm": 0.07872054725885391,
"learning_rate": 9.58034158861727e-05,
"loss": 0.0108,
"step": 846
},
{
"epoch": 5.464516129032258,
"grad_norm": 0.06477072834968567,
"learning_rate": 9.578533236490015e-05,
"loss": 0.0091,
"step": 847
},
{
"epoch": 5.470967741935484,
"grad_norm": 0.07100635021924973,
"learning_rate": 9.576721167964975e-05,
"loss": 0.0094,
"step": 848
},
{
"epoch": 5.47741935483871,
"grad_norm": 0.06277081370353699,
"learning_rate": 9.57490538451301e-05,
"loss": 0.0087,
"step": 849
},
{
"epoch": 5.483870967741936,
"grad_norm": 0.06297729909420013,
"learning_rate": 9.573085887607991e-05,
"loss": 0.0102,
"step": 850
},
{
"epoch": 5.483870967741936,
"eval_loss": 0.02419852837920189,
"eval_runtime": 20.6309,
"eval_samples_per_second": 4.847,
"eval_steps_per_second": 0.097,
"step": 850
},
{
"epoch": 5.490322580645161,
"grad_norm": 0.05889495834708214,
"learning_rate": 9.571262678726802e-05,
"loss": 0.0075,
"step": 851
},
{
"epoch": 5.496774193548387,
"grad_norm": 0.07738583534955978,
"learning_rate": 9.569435759349347e-05,
"loss": 0.0098,
"step": 852
},
{
"epoch": 5.503225806451613,
"grad_norm": 0.06201765313744545,
"learning_rate": 9.567605130958532e-05,
"loss": 0.008,
"step": 853
},
{
"epoch": 5.509677419354839,
"grad_norm": 0.0606846958398819,
"learning_rate": 9.565770795040286e-05,
"loss": 0.0086,
"step": 854
},
{
"epoch": 5.516129032258064,
"grad_norm": 0.06933721899986267,
"learning_rate": 9.563932753083534e-05,
"loss": 0.009,
"step": 855
},
{
"epoch": 5.52258064516129,
"grad_norm": 0.05704152211546898,
"learning_rate": 9.562091006580219e-05,
"loss": 0.0081,
"step": 856
},
{
"epoch": 5.529032258064516,
"grad_norm": 0.046698153018951416,
"learning_rate": 9.560245557025285e-05,
"loss": 0.0069,
"step": 857
},
{
"epoch": 5.535483870967742,
"grad_norm": 0.05271074175834656,
"learning_rate": 9.558396405916686e-05,
"loss": 0.0069,
"step": 858
},
{
"epoch": 5.541935483870968,
"grad_norm": 0.053661540150642395,
"learning_rate": 9.55654355475538e-05,
"loss": 0.0092,
"step": 859
},
{
"epoch": 5.548387096774194,
"grad_norm": 0.0608212947845459,
"learning_rate": 9.554687005045327e-05,
"loss": 0.0082,
"step": 860
},
{
"epoch": 5.554838709677419,
"grad_norm": 0.06301247328519821,
"learning_rate": 9.552826758293487e-05,
"loss": 0.0084,
"step": 861
},
{
"epoch": 5.561290322580645,
"grad_norm": 0.06680440157651901,
"learning_rate": 9.550962816009825e-05,
"loss": 0.0094,
"step": 862
},
{
"epoch": 5.567741935483871,
"grad_norm": 0.06409584730863571,
"learning_rate": 9.549095179707304e-05,
"loss": 0.01,
"step": 863
},
{
"epoch": 5.574193548387097,
"grad_norm": 0.07452081888914108,
"learning_rate": 9.547223850901885e-05,
"loss": 0.0091,
"step": 864
},
{
"epoch": 5.580645161290323,
"grad_norm": 0.07207295298576355,
"learning_rate": 9.545348831112525e-05,
"loss": 0.0081,
"step": 865
},
{
"epoch": 5.587096774193548,
"grad_norm": 0.05220545455813408,
"learning_rate": 9.543470121861183e-05,
"loss": 0.0078,
"step": 866
},
{
"epoch": 5.593548387096774,
"grad_norm": 0.05606219172477722,
"learning_rate": 9.541587724672805e-05,
"loss": 0.0072,
"step": 867
},
{
"epoch": 5.6,
"grad_norm": 0.060226067900657654,
"learning_rate": 9.539701641075333e-05,
"loss": 0.0078,
"step": 868
},
{
"epoch": 5.606451612903226,
"grad_norm": 0.05184217914938927,
"learning_rate": 9.537811872599702e-05,
"loss": 0.0082,
"step": 869
},
{
"epoch": 5.612903225806452,
"grad_norm": 0.052946384996175766,
"learning_rate": 9.535918420779841e-05,
"loss": 0.0063,
"step": 870
},
{
"epoch": 5.619354838709677,
"grad_norm": 0.057897213846445084,
"learning_rate": 9.534021287152663e-05,
"loss": 0.0084,
"step": 871
},
{
"epoch": 5.625806451612903,
"grad_norm": 0.06058947369456291,
"learning_rate": 9.532120473258075e-05,
"loss": 0.0089,
"step": 872
},
{
"epoch": 5.632258064516129,
"grad_norm": 0.06276161968708038,
"learning_rate": 9.530215980638966e-05,
"loss": 0.009,
"step": 873
},
{
"epoch": 5.638709677419355,
"grad_norm": 0.05766284838318825,
"learning_rate": 9.528307810841214e-05,
"loss": 0.0094,
"step": 874
},
{
"epoch": 5.645161290322581,
"grad_norm": 0.0866272896528244,
"learning_rate": 9.526395965413682e-05,
"loss": 0.0077,
"step": 875
},
{
"epoch": 5.645161290322581,
"eval_loss": 0.023009639233350754,
"eval_runtime": 20.6229,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 875
},
{
"epoch": 5.651612903225806,
"grad_norm": 0.056562308222055435,
"learning_rate": 9.524480445908216e-05,
"loss": 0.0073,
"step": 876
},
{
"epoch": 5.658064516129032,
"grad_norm": 0.07527109235525131,
"learning_rate": 9.522561253879644e-05,
"loss": 0.0086,
"step": 877
},
{
"epoch": 5.664516129032258,
"grad_norm": 0.06116427481174469,
"learning_rate": 9.520638390885777e-05,
"loss": 0.0086,
"step": 878
},
{
"epoch": 5.670967741935484,
"grad_norm": 0.0696583017706871,
"learning_rate": 9.5187118584874e-05,
"loss": 0.0091,
"step": 879
},
{
"epoch": 5.67741935483871,
"grad_norm": 0.05933855101466179,
"learning_rate": 9.516781658248284e-05,
"loss": 0.0076,
"step": 880
},
{
"epoch": 5.683870967741935,
"grad_norm": 0.05984939634799957,
"learning_rate": 9.514847791735171e-05,
"loss": 0.0078,
"step": 881
},
{
"epoch": 5.690322580645161,
"grad_norm": 0.05205286294221878,
"learning_rate": 9.512910260517782e-05,
"loss": 0.0087,
"step": 882
},
{
"epoch": 5.6967741935483875,
"grad_norm": 0.05857549607753754,
"learning_rate": 9.510969066168813e-05,
"loss": 0.0098,
"step": 883
},
{
"epoch": 5.703225806451613,
"grad_norm": 0.05208706110715866,
"learning_rate": 9.509024210263933e-05,
"loss": 0.0081,
"step": 884
},
{
"epoch": 5.709677419354839,
"grad_norm": 0.0637330710887909,
"learning_rate": 9.507075694381782e-05,
"loss": 0.0097,
"step": 885
},
{
"epoch": 5.716129032258064,
"grad_norm": 0.055275216698646545,
"learning_rate": 9.505123520103968e-05,
"loss": 0.0066,
"step": 886
},
{
"epoch": 5.72258064516129,
"grad_norm": 0.05725083872675896,
"learning_rate": 9.503167689015077e-05,
"loss": 0.0074,
"step": 887
},
{
"epoch": 5.729032258064516,
"grad_norm": 0.061076343059539795,
"learning_rate": 9.501208202702656e-05,
"loss": 0.0091,
"step": 888
},
{
"epoch": 5.735483870967742,
"grad_norm": 0.060000624507665634,
"learning_rate": 9.49924506275722e-05,
"loss": 0.01,
"step": 889
},
{
"epoch": 5.741935483870968,
"grad_norm": 0.12105455994606018,
"learning_rate": 9.497278270772251e-05,
"loss": 0.0075,
"step": 890
},
{
"epoch": 5.748387096774193,
"grad_norm": 0.06688644737005234,
"learning_rate": 9.495307828344198e-05,
"loss": 0.0097,
"step": 891
},
{
"epoch": 5.754838709677419,
"grad_norm": 0.07520955801010132,
"learning_rate": 9.493333737072465e-05,
"loss": 0.012,
"step": 892
},
{
"epoch": 5.7612903225806456,
"grad_norm": 0.06276785582304001,
"learning_rate": 9.491355998559426e-05,
"loss": 0.0072,
"step": 893
},
{
"epoch": 5.767741935483871,
"grad_norm": 0.0585506446659565,
"learning_rate": 9.489374614410413e-05,
"loss": 0.0073,
"step": 894
},
{
"epoch": 5.774193548387097,
"grad_norm": 0.054901156574487686,
"learning_rate": 9.487389586233717e-05,
"loss": 0.0076,
"step": 895
},
{
"epoch": 5.780645161290322,
"grad_norm": 0.05712338536977768,
"learning_rate": 9.485400915640583e-05,
"loss": 0.0084,
"step": 896
},
{
"epoch": 5.787096774193548,
"grad_norm": 0.07083632797002792,
"learning_rate": 9.483408604245218e-05,
"loss": 0.0085,
"step": 897
},
{
"epoch": 5.7935483870967746,
"grad_norm": 0.07239817827939987,
"learning_rate": 9.481412653664784e-05,
"loss": 0.0105,
"step": 898
},
{
"epoch": 5.8,
"grad_norm": 0.06843467801809311,
"learning_rate": 9.47941306551939e-05,
"loss": 0.0065,
"step": 899
},
{
"epoch": 5.806451612903226,
"grad_norm": 0.06935566663742065,
"learning_rate": 9.477409841432108e-05,
"loss": 0.0085,
"step": 900
},
{
"epoch": 5.806451612903226,
"eval_loss": 0.022989710792899132,
"eval_runtime": 20.6155,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.097,
"step": 900
},
{
"epoch": 5.812903225806451,
"grad_norm": 0.0664864331483841,
"learning_rate": 9.475402983028953e-05,
"loss": 0.0079,
"step": 901
},
{
"epoch": 5.819354838709677,
"grad_norm": 0.08055754005908966,
"learning_rate": 9.473392491938895e-05,
"loss": 0.0075,
"step": 902
},
{
"epoch": 5.825806451612904,
"grad_norm": 0.08719275146722794,
"learning_rate": 9.47137836979385e-05,
"loss": 0.0101,
"step": 903
},
{
"epoch": 5.832258064516129,
"grad_norm": 0.06438102573156357,
"learning_rate": 9.46936061822868e-05,
"loss": 0.0082,
"step": 904
},
{
"epoch": 5.838709677419355,
"grad_norm": 0.0579158253967762,
"learning_rate": 9.467339238881199e-05,
"loss": 0.0086,
"step": 905
},
{
"epoch": 5.845161290322581,
"grad_norm": 0.06194271147251129,
"learning_rate": 9.465314233392158e-05,
"loss": 0.0104,
"step": 906
},
{
"epoch": 5.851612903225806,
"grad_norm": 0.05593113228678703,
"learning_rate": 9.463285603405255e-05,
"loss": 0.0091,
"step": 907
},
{
"epoch": 5.858064516129033,
"grad_norm": 0.06721027195453644,
"learning_rate": 9.461253350567132e-05,
"loss": 0.0085,
"step": 908
},
{
"epoch": 5.864516129032258,
"grad_norm": 0.05705774575471878,
"learning_rate": 9.459217476527368e-05,
"loss": 0.0075,
"step": 909
},
{
"epoch": 5.870967741935484,
"grad_norm": 0.05105244368314743,
"learning_rate": 9.457177982938485e-05,
"loss": 0.0085,
"step": 910
},
{
"epoch": 5.877419354838709,
"grad_norm": 0.06674633175134659,
"learning_rate": 9.455134871455941e-05,
"loss": 0.0077,
"step": 911
},
{
"epoch": 5.883870967741935,
"grad_norm": 0.058289535343647,
"learning_rate": 9.453088143738129e-05,
"loss": 0.0086,
"step": 912
},
{
"epoch": 5.890322580645162,
"grad_norm": 0.07466110587120056,
"learning_rate": 9.451037801446378e-05,
"loss": 0.0105,
"step": 913
},
{
"epoch": 5.896774193548387,
"grad_norm": 0.0619179792702198,
"learning_rate": 9.448983846244955e-05,
"loss": 0.0085,
"step": 914
},
{
"epoch": 5.903225806451613,
"grad_norm": 0.06765176355838776,
"learning_rate": 9.446926279801053e-05,
"loss": 0.0102,
"step": 915
},
{
"epoch": 5.909677419354839,
"grad_norm": 0.060166824609041214,
"learning_rate": 9.444865103784804e-05,
"loss": 0.0075,
"step": 916
},
{
"epoch": 5.916129032258064,
"grad_norm": 0.06444356590509415,
"learning_rate": 9.442800319869262e-05,
"loss": 0.0071,
"step": 917
},
{
"epoch": 5.922580645161291,
"grad_norm": 0.07145165652036667,
"learning_rate": 9.440731929730415e-05,
"loss": 0.0096,
"step": 918
},
{
"epoch": 5.929032258064516,
"grad_norm": 0.05392426252365112,
"learning_rate": 9.438659935047175e-05,
"loss": 0.0064,
"step": 919
},
{
"epoch": 5.935483870967742,
"grad_norm": 0.06370414048433304,
"learning_rate": 9.436584337501382e-05,
"loss": 0.0083,
"step": 920
},
{
"epoch": 5.941935483870967,
"grad_norm": 0.0562657006084919,
"learning_rate": 9.4345051387778e-05,
"loss": 0.0086,
"step": 921
},
{
"epoch": 5.948387096774193,
"grad_norm": 0.06523040682077408,
"learning_rate": 9.432422340564115e-05,
"loss": 0.0115,
"step": 922
},
{
"epoch": 5.95483870967742,
"grad_norm": 0.08145349472761154,
"learning_rate": 9.430335944550936e-05,
"loss": 0.0127,
"step": 923
},
{
"epoch": 5.961290322580645,
"grad_norm": 0.05838509649038315,
"learning_rate": 9.42824595243179e-05,
"loss": 0.0068,
"step": 924
},
{
"epoch": 5.967741935483871,
"grad_norm": 0.05700171738862991,
"learning_rate": 9.426152365903127e-05,
"loss": 0.0069,
"step": 925
},
{
"epoch": 5.967741935483871,
"eval_loss": 0.021346276625990868,
"eval_runtime": 20.6536,
"eval_samples_per_second": 4.842,
"eval_steps_per_second": 0.097,
"step": 925
},
{
"epoch": 5.974193548387097,
"grad_norm": 0.05546183884143829,
"learning_rate": 9.424055186664311e-05,
"loss": 0.0086,
"step": 926
},
{
"epoch": 5.980645161290322,
"grad_norm": 0.051414016634225845,
"learning_rate": 9.421954416417624e-05,
"loss": 0.0089,
"step": 927
},
{
"epoch": 5.987096774193549,
"grad_norm": 0.06089739874005318,
"learning_rate": 9.419850056868262e-05,
"loss": 0.0085,
"step": 928
},
{
"epoch": 5.993548387096774,
"grad_norm": 0.056536007672548294,
"learning_rate": 9.417742109724334e-05,
"loss": 0.0086,
"step": 929
},
{
"epoch": 6.0,
"grad_norm": 0.09017127752304077,
"learning_rate": 9.415630576696863e-05,
"loss": 0.0097,
"step": 930
},
{
"epoch": 6.006451612903226,
"grad_norm": 0.04831506684422493,
"learning_rate": 9.413515459499782e-05,
"loss": 0.0065,
"step": 931
},
{
"epoch": 6.012903225806451,
"grad_norm": 0.04744061455130577,
"learning_rate": 9.411396759849931e-05,
"loss": 0.0062,
"step": 932
},
{
"epoch": 6.019354838709678,
"grad_norm": 0.048048291355371475,
"learning_rate": 9.409274479467058e-05,
"loss": 0.006,
"step": 933
},
{
"epoch": 6.025806451612903,
"grad_norm": 0.04612509161233902,
"learning_rate": 9.407148620073824e-05,
"loss": 0.0054,
"step": 934
},
{
"epoch": 6.032258064516129,
"grad_norm": 0.04895896464586258,
"learning_rate": 9.405019183395787e-05,
"loss": 0.0061,
"step": 935
},
{
"epoch": 6.038709677419355,
"grad_norm": 0.04923215135931969,
"learning_rate": 9.402886171161411e-05,
"loss": 0.0056,
"step": 936
},
{
"epoch": 6.04516129032258,
"grad_norm": 0.05468977242708206,
"learning_rate": 9.400749585102067e-05,
"loss": 0.0056,
"step": 937
},
{
"epoch": 6.051612903225807,
"grad_norm": 0.04829660430550575,
"learning_rate": 9.398609426952019e-05,
"loss": 0.0049,
"step": 938
},
{
"epoch": 6.058064516129032,
"grad_norm": 0.0703238770365715,
"learning_rate": 9.396465698448435e-05,
"loss": 0.0064,
"step": 939
},
{
"epoch": 6.064516129032258,
"grad_norm": 0.06474682688713074,
"learning_rate": 9.394318401331383e-05,
"loss": 0.0071,
"step": 940
},
{
"epoch": 6.070967741935484,
"grad_norm": 0.0701901838183403,
"learning_rate": 9.392167537343823e-05,
"loss": 0.0072,
"step": 941
},
{
"epoch": 6.077419354838709,
"grad_norm": 0.05554347485303879,
"learning_rate": 9.390013108231614e-05,
"loss": 0.0069,
"step": 942
},
{
"epoch": 6.083870967741936,
"grad_norm": 0.06273981183767319,
"learning_rate": 9.387855115743505e-05,
"loss": 0.0065,
"step": 943
},
{
"epoch": 6.090322580645161,
"grad_norm": 0.05572091042995453,
"learning_rate": 9.385693561631141e-05,
"loss": 0.0059,
"step": 944
},
{
"epoch": 6.096774193548387,
"grad_norm": 0.03943298012018204,
"learning_rate": 9.383528447649057e-05,
"loss": 0.0054,
"step": 945
},
{
"epoch": 6.103225806451613,
"grad_norm": 0.04861641675233841,
"learning_rate": 9.381359775554675e-05,
"loss": 0.0055,
"step": 946
},
{
"epoch": 6.109677419354838,
"grad_norm": 0.0495055690407753,
"learning_rate": 9.379187547108312e-05,
"loss": 0.0055,
"step": 947
},
{
"epoch": 6.116129032258065,
"grad_norm": 0.048694632947444916,
"learning_rate": 9.377011764073162e-05,
"loss": 0.0058,
"step": 948
},
{
"epoch": 6.122580645161291,
"grad_norm": 0.06473204493522644,
"learning_rate": 9.37483242821531e-05,
"loss": 0.0055,
"step": 949
},
{
"epoch": 6.129032258064516,
"grad_norm": 0.05870223045349121,
"learning_rate": 9.372649541303726e-05,
"loss": 0.0056,
"step": 950
},
{
"epoch": 6.129032258064516,
"eval_loss": 0.022576194256544113,
"eval_runtime": 20.6217,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 950
},
{
"epoch": 6.135483870967742,
"grad_norm": 0.049414485692977905,
"learning_rate": 9.37046310511026e-05,
"loss": 0.0057,
"step": 951
},
{
"epoch": 6.141935483870967,
"grad_norm": 0.05746641382575035,
"learning_rate": 9.368273121409644e-05,
"loss": 0.0068,
"step": 952
},
{
"epoch": 6.148387096774194,
"grad_norm": 0.05265411362051964,
"learning_rate": 9.366079591979486e-05,
"loss": 0.0057,
"step": 953
},
{
"epoch": 6.15483870967742,
"grad_norm": 0.07672982662916183,
"learning_rate": 9.363882518600275e-05,
"loss": 0.0068,
"step": 954
},
{
"epoch": 6.161290322580645,
"grad_norm": 0.060838665813207626,
"learning_rate": 9.361681903055382e-05,
"loss": 0.0054,
"step": 955
},
{
"epoch": 6.167741935483871,
"grad_norm": 0.052103929221630096,
"learning_rate": 9.359477747131042e-05,
"loss": 0.005,
"step": 956
},
{
"epoch": 6.174193548387096,
"grad_norm": 0.05221958085894585,
"learning_rate": 9.357270052616372e-05,
"loss": 0.0065,
"step": 957
},
{
"epoch": 6.180645161290323,
"grad_norm": 0.050105560570955276,
"learning_rate": 9.355058821303359e-05,
"loss": 0.0054,
"step": 958
},
{
"epoch": 6.187096774193549,
"grad_norm": 0.0577901154756546,
"learning_rate": 9.352844054986857e-05,
"loss": 0.0067,
"step": 959
},
{
"epoch": 6.193548387096774,
"grad_norm": 0.05385618656873703,
"learning_rate": 9.3506257554646e-05,
"loss": 0.0069,
"step": 960
},
{
"epoch": 6.2,
"grad_norm": 0.06024151295423508,
"learning_rate": 9.348403924537177e-05,
"loss": 0.007,
"step": 961
},
{
"epoch": 6.2064516129032254,
"grad_norm": 0.05268295854330063,
"learning_rate": 9.346178564008052e-05,
"loss": 0.0067,
"step": 962
},
{
"epoch": 6.212903225806452,
"grad_norm": 0.04031063988804817,
"learning_rate": 9.343949675683549e-05,
"loss": 0.0049,
"step": 963
},
{
"epoch": 6.219354838709678,
"grad_norm": 0.06445210427045822,
"learning_rate": 9.34171726137286e-05,
"loss": 0.0069,
"step": 964
},
{
"epoch": 6.225806451612903,
"grad_norm": 0.0537705235183239,
"learning_rate": 9.339481322888035e-05,
"loss": 0.0068,
"step": 965
},
{
"epoch": 6.232258064516129,
"grad_norm": 0.05740392953157425,
"learning_rate": 9.337241862043988e-05,
"loss": 0.0056,
"step": 966
},
{
"epoch": 6.2387096774193544,
"grad_norm": 0.06430184841156006,
"learning_rate": 9.33499888065849e-05,
"loss": 0.0063,
"step": 967
},
{
"epoch": 6.245161290322581,
"grad_norm": 0.07137993723154068,
"learning_rate": 9.33275238055217e-05,
"loss": 0.0071,
"step": 968
},
{
"epoch": 6.251612903225807,
"grad_norm": 0.05400635674595833,
"learning_rate": 9.330502363548515e-05,
"loss": 0.007,
"step": 969
},
{
"epoch": 6.258064516129032,
"grad_norm": 0.05895206704735756,
"learning_rate": 9.32824883147386e-05,
"loss": 0.0063,
"step": 970
},
{
"epoch": 6.264516129032258,
"grad_norm": 0.05703943967819214,
"learning_rate": 9.325991786157404e-05,
"loss": 0.0063,
"step": 971
},
{
"epoch": 6.270967741935484,
"grad_norm": 0.05772950127720833,
"learning_rate": 9.32373122943119e-05,
"loss": 0.0068,
"step": 972
},
{
"epoch": 6.27741935483871,
"grad_norm": 0.06098498776555061,
"learning_rate": 9.32146716313011e-05,
"loss": 0.0063,
"step": 973
},
{
"epoch": 6.283870967741936,
"grad_norm": 0.05442085489630699,
"learning_rate": 9.319199589091911e-05,
"loss": 0.0059,
"step": 974
},
{
"epoch": 6.290322580645161,
"grad_norm": 0.06356194615364075,
"learning_rate": 9.316928509157185e-05,
"loss": 0.0063,
"step": 975
},
{
"epoch": 6.290322580645161,
"eval_loss": 0.022397752851247787,
"eval_runtime": 20.6405,
"eval_samples_per_second": 4.845,
"eval_steps_per_second": 0.097,
"step": 975
},
{
"epoch": 6.296774193548387,
"grad_norm": 0.05127127468585968,
"learning_rate": 9.314653925169365e-05,
"loss": 0.0068,
"step": 976
},
{
"epoch": 6.3032258064516125,
"grad_norm": 0.05523466691374779,
"learning_rate": 9.312375838974735e-05,
"loss": 0.0054,
"step": 977
},
{
"epoch": 6.309677419354839,
"grad_norm": 0.05204270780086517,
"learning_rate": 9.310094252422417e-05,
"loss": 0.0054,
"step": 978
},
{
"epoch": 6.316129032258065,
"grad_norm": 0.07963524013757706,
"learning_rate": 9.307809167364377e-05,
"loss": 0.0066,
"step": 979
},
{
"epoch": 6.32258064516129,
"grad_norm": 0.05632827803492546,
"learning_rate": 9.30552058565542e-05,
"loss": 0.0049,
"step": 980
},
{
"epoch": 6.329032258064516,
"grad_norm": 0.07022556662559509,
"learning_rate": 9.303228509153188e-05,
"loss": 0.0071,
"step": 981
},
{
"epoch": 6.335483870967742,
"grad_norm": 0.05617734044790268,
"learning_rate": 9.300932939718159e-05,
"loss": 0.007,
"step": 982
},
{
"epoch": 6.341935483870968,
"grad_norm": 0.0606408566236496,
"learning_rate": 9.298633879213652e-05,
"loss": 0.0055,
"step": 983
},
{
"epoch": 6.348387096774194,
"grad_norm": 0.05535956844687462,
"learning_rate": 9.296331329505813e-05,
"loss": 0.0068,
"step": 984
},
{
"epoch": 6.354838709677419,
"grad_norm": 0.05563861131668091,
"learning_rate": 9.294025292463623e-05,
"loss": 0.0049,
"step": 985
},
{
"epoch": 6.361290322580645,
"grad_norm": 0.05694695562124252,
"learning_rate": 9.291715769958895e-05,
"loss": 0.0056,
"step": 986
},
{
"epoch": 6.367741935483871,
"grad_norm": 0.06230534613132477,
"learning_rate": 9.289402763866266e-05,
"loss": 0.0058,
"step": 987
},
{
"epoch": 6.374193548387097,
"grad_norm": 0.05194805935025215,
"learning_rate": 9.287086276063208e-05,
"loss": 0.0058,
"step": 988
},
{
"epoch": 6.380645161290323,
"grad_norm": 0.05432300642132759,
"learning_rate": 9.284766308430014e-05,
"loss": 0.0054,
"step": 989
},
{
"epoch": 6.387096774193548,
"grad_norm": 0.04841138422489166,
"learning_rate": 9.282442862849803e-05,
"loss": 0.0056,
"step": 990
},
{
"epoch": 6.393548387096774,
"grad_norm": 0.05156128108501434,
"learning_rate": 9.280115941208516e-05,
"loss": 0.0065,
"step": 991
},
{
"epoch": 6.4,
"grad_norm": 0.05393640324473381,
"learning_rate": 9.277785545394917e-05,
"loss": 0.0063,
"step": 992
},
{
"epoch": 6.406451612903226,
"grad_norm": 0.04842933639883995,
"learning_rate": 9.275451677300591e-05,
"loss": 0.0059,
"step": 993
},
{
"epoch": 6.412903225806452,
"grad_norm": 0.05428517609834671,
"learning_rate": 9.273114338819938e-05,
"loss": 0.0065,
"step": 994
},
{
"epoch": 6.419354838709677,
"grad_norm": 0.05844871327280998,
"learning_rate": 9.270773531850179e-05,
"loss": 0.006,
"step": 995
},
{
"epoch": 6.425806451612903,
"grad_norm": 0.06882674247026443,
"learning_rate": 9.268429258291345e-05,
"loss": 0.006,
"step": 996
},
{
"epoch": 6.432258064516129,
"grad_norm": 0.05051298066973686,
"learning_rate": 9.266081520046286e-05,
"loss": 0.0063,
"step": 997
},
{
"epoch": 6.438709677419355,
"grad_norm": 0.05807333439588547,
"learning_rate": 9.263730319020663e-05,
"loss": 0.0058,
"step": 998
},
{
"epoch": 6.445161290322581,
"grad_norm": 0.07875418663024902,
"learning_rate": 9.261375657122946e-05,
"loss": 0.0076,
"step": 999
},
{
"epoch": 6.451612903225806,
"grad_norm": 0.05101834610104561,
"learning_rate": 9.259017536264414e-05,
"loss": 0.0055,
"step": 1000
},
{
"epoch": 6.451612903225806,
"eval_loss": 0.022700941190123558,
"eval_runtime": 20.6129,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.097,
"step": 1000
},
{
"epoch": 6.458064516129032,
"grad_norm": 0.06487668305635452,
"learning_rate": 9.256655958359159e-05,
"loss": 0.0065,
"step": 1001
},
{
"epoch": 6.464516129032258,
"grad_norm": 0.05859556794166565,
"learning_rate": 9.25429092532407e-05,
"loss": 0.0063,
"step": 1002
},
{
"epoch": 6.470967741935484,
"grad_norm": 0.060135431587696075,
"learning_rate": 9.25192243907885e-05,
"loss": 0.0071,
"step": 1003
},
{
"epoch": 6.47741935483871,
"grad_norm": 0.049589548259973526,
"learning_rate": 9.249550501545997e-05,
"loss": 0.0056,
"step": 1004
},
{
"epoch": 6.483870967741936,
"grad_norm": 0.0466407835483551,
"learning_rate": 9.247175114650817e-05,
"loss": 0.0064,
"step": 1005
},
{
"epoch": 6.490322580645161,
"grad_norm": 0.056752149015665054,
"learning_rate": 9.24479628032141e-05,
"loss": 0.0054,
"step": 1006
},
{
"epoch": 6.496774193548387,
"grad_norm": 0.06155959144234657,
"learning_rate": 9.242414000488677e-05,
"loss": 0.0069,
"step": 1007
},
{
"epoch": 6.503225806451613,
"grad_norm": 0.06714151054620743,
"learning_rate": 9.240028277086319e-05,
"loss": 0.0071,
"step": 1008
},
{
"epoch": 6.509677419354839,
"grad_norm": 0.05602521076798439,
"learning_rate": 9.237639112050827e-05,
"loss": 0.0059,
"step": 1009
},
{
"epoch": 6.516129032258064,
"grad_norm": 0.05501384660601616,
"learning_rate": 9.235246507321487e-05,
"loss": 0.0057,
"step": 1010
},
{
"epoch": 6.52258064516129,
"grad_norm": 0.04764142259955406,
"learning_rate": 9.232850464840377e-05,
"loss": 0.0057,
"step": 1011
},
{
"epoch": 6.529032258064516,
"grad_norm": 0.04876996576786041,
"learning_rate": 9.230450986552368e-05,
"loss": 0.0064,
"step": 1012
},
{
"epoch": 6.535483870967742,
"grad_norm": 0.05286463722586632,
"learning_rate": 9.228048074405115e-05,
"loss": 0.0055,
"step": 1013
},
{
"epoch": 6.541935483870968,
"grad_norm": 0.055027544498443604,
"learning_rate": 9.225641730349066e-05,
"loss": 0.006,
"step": 1014
},
{
"epoch": 6.548387096774194,
"grad_norm": 0.06131773442029953,
"learning_rate": 9.22323195633745e-05,
"loss": 0.0058,
"step": 1015
},
{
"epoch": 6.554838709677419,
"grad_norm": 0.049361925572156906,
"learning_rate": 9.220818754326282e-05,
"loss": 0.0052,
"step": 1016
},
{
"epoch": 6.561290322580645,
"grad_norm": 0.047937195748090744,
"learning_rate": 9.218402126274358e-05,
"loss": 0.0055,
"step": 1017
},
{
"epoch": 6.567741935483871,
"grad_norm": 0.05250125750899315,
"learning_rate": 9.215982074143259e-05,
"loss": 0.0061,
"step": 1018
},
{
"epoch": 6.574193548387097,
"grad_norm": 0.056667257100343704,
"learning_rate": 9.213558599897341e-05,
"loss": 0.0053,
"step": 1019
},
{
"epoch": 6.580645161290323,
"grad_norm": 0.06139816343784332,
"learning_rate": 9.211131705503739e-05,
"loss": 0.0062,
"step": 1020
},
{
"epoch": 6.587096774193548,
"grad_norm": 0.05592657998204231,
"learning_rate": 9.208701392932367e-05,
"loss": 0.0065,
"step": 1021
},
{
"epoch": 6.593548387096774,
"grad_norm": 0.05883883312344551,
"learning_rate": 9.206267664155907e-05,
"loss": 0.0061,
"step": 1022
},
{
"epoch": 6.6,
"grad_norm": 0.057344142347574234,
"learning_rate": 9.20383052114982e-05,
"loss": 0.0065,
"step": 1023
},
{
"epoch": 6.606451612903226,
"grad_norm": 0.044084545224905014,
"learning_rate": 9.201389965892336e-05,
"loss": 0.0056,
"step": 1024
},
{
"epoch": 6.612903225806452,
"grad_norm": 0.05402151495218277,
"learning_rate": 9.198946000364454e-05,
"loss": 0.0067,
"step": 1025
},
{
"epoch": 6.612903225806452,
"eval_loss": 0.022883424535393715,
"eval_runtime": 20.6396,
"eval_samples_per_second": 4.845,
"eval_steps_per_second": 0.097,
"step": 1025
},
{
"epoch": 6.619354838709677,
"grad_norm": 0.06240704283118248,
"learning_rate": 9.196498626549945e-05,
"loss": 0.0076,
"step": 1026
},
{
"epoch": 6.625806451612903,
"grad_norm": 0.06148531660437584,
"learning_rate": 9.194047846435341e-05,
"loss": 0.0049,
"step": 1027
},
{
"epoch": 6.632258064516129,
"grad_norm": 0.05714572221040726,
"learning_rate": 9.191593662009941e-05,
"loss": 0.0053,
"step": 1028
},
{
"epoch": 6.638709677419355,
"grad_norm": 0.05543628707528114,
"learning_rate": 9.18913607526581e-05,
"loss": 0.0052,
"step": 1029
},
{
"epoch": 6.645161290322581,
"grad_norm": 0.0643729567527771,
"learning_rate": 9.186675088197772e-05,
"loss": 0.0069,
"step": 1030
},
{
"epoch": 6.651612903225806,
"grad_norm": 0.06447968631982803,
"learning_rate": 9.184210702803409e-05,
"loss": 0.0069,
"step": 1031
},
{
"epoch": 6.658064516129032,
"grad_norm": 0.04606137052178383,
"learning_rate": 9.181742921083065e-05,
"loss": 0.0053,
"step": 1032
},
{
"epoch": 6.664516129032258,
"grad_norm": 0.07696260511875153,
"learning_rate": 9.179271745039843e-05,
"loss": 0.0079,
"step": 1033
},
{
"epoch": 6.670967741935484,
"grad_norm": 0.06481795012950897,
"learning_rate": 9.176797176679593e-05,
"loss": 0.0065,
"step": 1034
},
{
"epoch": 6.67741935483871,
"grad_norm": 0.05713411420583725,
"learning_rate": 9.174319218010928e-05,
"loss": 0.0054,
"step": 1035
},
{
"epoch": 6.683870967741935,
"grad_norm": 0.055928200483322144,
"learning_rate": 9.171837871045204e-05,
"loss": 0.0064,
"step": 1036
},
{
"epoch": 6.690322580645161,
"grad_norm": 0.054814040660858154,
"learning_rate": 9.169353137796534e-05,
"loss": 0.0069,
"step": 1037
},
{
"epoch": 6.6967741935483875,
"grad_norm": 0.062209051102399826,
"learning_rate": 9.166865020281777e-05,
"loss": 0.0063,
"step": 1038
},
{
"epoch": 6.703225806451613,
"grad_norm": 0.057959817349910736,
"learning_rate": 9.16437352052054e-05,
"loss": 0.0071,
"step": 1039
},
{
"epoch": 6.709677419354839,
"grad_norm": 0.06060996651649475,
"learning_rate": 9.16187864053517e-05,
"loss": 0.0064,
"step": 1040
},
{
"epoch": 6.716129032258064,
"grad_norm": 0.05503141134977341,
"learning_rate": 9.15938038235077e-05,
"loss": 0.0052,
"step": 1041
},
{
"epoch": 6.72258064516129,
"grad_norm": 0.0538954995572567,
"learning_rate": 9.156878747995172e-05,
"loss": 0.0064,
"step": 1042
},
{
"epoch": 6.729032258064516,
"grad_norm": 0.05267870053648949,
"learning_rate": 9.154373739498955e-05,
"loss": 0.0053,
"step": 1043
},
{
"epoch": 6.735483870967742,
"grad_norm": 0.07564899325370789,
"learning_rate": 9.151865358895439e-05,
"loss": 0.007,
"step": 1044
},
{
"epoch": 6.741935483870968,
"grad_norm": 0.05886682868003845,
"learning_rate": 9.149353608220672e-05,
"loss": 0.0055,
"step": 1045
},
{
"epoch": 6.748387096774193,
"grad_norm": 0.033152226358652115,
"learning_rate": 9.146838489513447e-05,
"loss": 0.005,
"step": 1046
},
{
"epoch": 6.754838709677419,
"grad_norm": 0.05886426195502281,
"learning_rate": 9.144320004815286e-05,
"loss": 0.0059,
"step": 1047
},
{
"epoch": 6.7612903225806456,
"grad_norm": 0.07072407007217407,
"learning_rate": 9.141798156170446e-05,
"loss": 0.0097,
"step": 1048
},
{
"epoch": 6.767741935483871,
"grad_norm": 0.05597013607621193,
"learning_rate": 9.13927294562591e-05,
"loss": 0.006,
"step": 1049
},
{
"epoch": 6.774193548387097,
"grad_norm": 0.046950407326221466,
"learning_rate": 9.136744375231397e-05,
"loss": 0.0052,
"step": 1050
},
{
"epoch": 6.774193548387097,
"eval_loss": 0.0224434956908226,
"eval_runtime": 20.6125,
"eval_samples_per_second": 4.851,
"eval_steps_per_second": 0.097,
"step": 1050
},
{
"epoch": 6.780645161290322,
"grad_norm": 0.05880718678236008,
"learning_rate": 9.134212447039343e-05,
"loss": 0.007,
"step": 1051
},
{
"epoch": 6.787096774193548,
"grad_norm": 0.04981274530291557,
"learning_rate": 9.13167716310492e-05,
"loss": 0.005,
"step": 1052
},
{
"epoch": 6.7935483870967746,
"grad_norm": 0.05875200033187866,
"learning_rate": 9.129138525486019e-05,
"loss": 0.0056,
"step": 1053
},
{
"epoch": 6.8,
"grad_norm": 0.046490080654621124,
"learning_rate": 9.12659653624325e-05,
"loss": 0.0051,
"step": 1054
},
{
"epoch": 6.806451612903226,
"grad_norm": 0.05880575627088547,
"learning_rate": 9.12405119743995e-05,
"loss": 0.006,
"step": 1055
},
{
"epoch": 6.812903225806451,
"grad_norm": 0.046049751341342926,
"learning_rate": 9.121502511142172e-05,
"loss": 0.0058,
"step": 1056
},
{
"epoch": 6.819354838709677,
"grad_norm": 0.04743240028619766,
"learning_rate": 9.118950479418684e-05,
"loss": 0.0063,
"step": 1057
},
{
"epoch": 6.825806451612904,
"grad_norm": 0.056094031780958176,
"learning_rate": 9.116395104340974e-05,
"loss": 0.0067,
"step": 1058
},
{
"epoch": 6.832258064516129,
"grad_norm": 0.052488215267658234,
"learning_rate": 9.113836387983239e-05,
"loss": 0.0044,
"step": 1059
},
{
"epoch": 6.838709677419355,
"grad_norm": 0.05668174475431442,
"learning_rate": 9.11127433242239e-05,
"loss": 0.0069,
"step": 1060
},
{
"epoch": 6.845161290322581,
"grad_norm": 0.06447296589612961,
"learning_rate": 9.108708939738051e-05,
"loss": 0.0056,
"step": 1061
},
{
"epoch": 6.851612903225806,
"grad_norm": 0.05771500989794731,
"learning_rate": 9.106140212012553e-05,
"loss": 0.006,
"step": 1062
},
{
"epoch": 6.858064516129033,
"grad_norm": 0.04775027185678482,
"learning_rate": 9.10356815133093e-05,
"loss": 0.005,
"step": 1063
},
{
"epoch": 6.864516129032258,
"grad_norm": 0.060601286590099335,
"learning_rate": 9.100992759780928e-05,
"loss": 0.0082,
"step": 1064
},
{
"epoch": 6.870967741935484,
"grad_norm": 0.05034959316253662,
"learning_rate": 9.098414039452993e-05,
"loss": 0.0067,
"step": 1065
},
{
"epoch": 6.877419354838709,
"grad_norm": 0.061979107558727264,
"learning_rate": 9.095831992440273e-05,
"loss": 0.0075,
"step": 1066
},
{
"epoch": 6.883870967741935,
"grad_norm": 0.04176148772239685,
"learning_rate": 9.093246620838618e-05,
"loss": 0.0057,
"step": 1067
},
{
"epoch": 6.890322580645162,
"grad_norm": 0.05120096728205681,
"learning_rate": 9.090657926746575e-05,
"loss": 0.0063,
"step": 1068
},
{
"epoch": 6.896774193548387,
"grad_norm": 0.043810728937387466,
"learning_rate": 9.088065912265386e-05,
"loss": 0.006,
"step": 1069
},
{
"epoch": 6.903225806451613,
"grad_norm": 0.046423133462667465,
"learning_rate": 9.085470579498995e-05,
"loss": 0.0056,
"step": 1070
},
{
"epoch": 6.909677419354839,
"grad_norm": 0.04729986563324928,
"learning_rate": 9.082871930554031e-05,
"loss": 0.0069,
"step": 1071
},
{
"epoch": 6.916129032258064,
"grad_norm": 0.046129655092954636,
"learning_rate": 9.080269967539823e-05,
"loss": 0.0054,
"step": 1072
},
{
"epoch": 6.922580645161291,
"grad_norm": 0.05045049265027046,
"learning_rate": 9.07766469256838e-05,
"loss": 0.006,
"step": 1073
},
{
"epoch": 6.929032258064516,
"grad_norm": 0.06024559587240219,
"learning_rate": 9.07505610775441e-05,
"loss": 0.0081,
"step": 1074
},
{
"epoch": 6.935483870967742,
"grad_norm": 0.061700381338596344,
"learning_rate": 9.072444215215299e-05,
"loss": 0.008,
"step": 1075
},
{
"epoch": 6.935483870967742,
"eval_loss": 0.021896785125136375,
"eval_runtime": 20.6536,
"eval_samples_per_second": 4.842,
"eval_steps_per_second": 0.097,
"step": 1075
},
{
"epoch": 6.941935483870967,
"grad_norm": 0.04216156154870987,
"learning_rate": 9.069829017071123e-05,
"loss": 0.0054,
"step": 1076
},
{
"epoch": 6.948387096774193,
"grad_norm": 0.05374651029706001,
"learning_rate": 9.06721051544464e-05,
"loss": 0.0071,
"step": 1077
},
{
"epoch": 6.95483870967742,
"grad_norm": 0.04941989853978157,
"learning_rate": 9.06458871246129e-05,
"loss": 0.0058,
"step": 1078
},
{
"epoch": 6.961290322580645,
"grad_norm": 0.06097535789012909,
"learning_rate": 9.06196361024919e-05,
"loss": 0.0055,
"step": 1079
},
{
"epoch": 6.967741935483871,
"grad_norm": 0.04807058721780777,
"learning_rate": 9.059335210939137e-05,
"loss": 0.0044,
"step": 1080
},
{
"epoch": 6.974193548387097,
"grad_norm": 0.04932430759072304,
"learning_rate": 9.056703516664606e-05,
"loss": 0.006,
"step": 1081
},
{
"epoch": 6.980645161290322,
"grad_norm": 0.043658170849084854,
"learning_rate": 9.054068529561744e-05,
"loss": 0.0051,
"step": 1082
},
{
"epoch": 6.987096774193549,
"grad_norm": 0.06009669229388237,
"learning_rate": 9.051430251769368e-05,
"loss": 0.0049,
"step": 1083
},
{
"epoch": 6.993548387096774,
"grad_norm": 0.04545913636684418,
"learning_rate": 9.048788685428974e-05,
"loss": 0.0045,
"step": 1084
},
{
"epoch": 7.0,
"grad_norm": 0.07388494163751602,
"learning_rate": 9.046143832684719e-05,
"loss": 0.0065,
"step": 1085
},
{
"epoch": 7.006451612903226,
"grad_norm": 0.052344489842653275,
"learning_rate": 9.043495695683435e-05,
"loss": 0.0053,
"step": 1086
},
{
"epoch": 7.012903225806451,
"grad_norm": 0.03753964230418205,
"learning_rate": 9.040844276574614e-05,
"loss": 0.0041,
"step": 1087
},
{
"epoch": 7.019354838709678,
"grad_norm": 0.050138022750616074,
"learning_rate": 9.038189577510415e-05,
"loss": 0.0048,
"step": 1088
},
{
"epoch": 7.025806451612903,
"grad_norm": 0.037361446768045425,
"learning_rate": 9.035531600645657e-05,
"loss": 0.0037,
"step": 1089
},
{
"epoch": 7.032258064516129,
"grad_norm": 0.0569661408662796,
"learning_rate": 9.032870348137823e-05,
"loss": 0.0048,
"step": 1090
},
{
"epoch": 7.038709677419355,
"grad_norm": 0.055356767028570175,
"learning_rate": 9.030205822147052e-05,
"loss": 0.0033,
"step": 1091
},
{
"epoch": 7.04516129032258,
"grad_norm": 0.037388719618320465,
"learning_rate": 9.027538024836142e-05,
"loss": 0.0048,
"step": 1092
},
{
"epoch": 7.051612903225807,
"grad_norm": 0.04436417669057846,
"learning_rate": 9.024866958370544e-05,
"loss": 0.0055,
"step": 1093
},
{
"epoch": 7.058064516129032,
"grad_norm": 0.04340614750981331,
"learning_rate": 9.022192624918366e-05,
"loss": 0.0043,
"step": 1094
},
{
"epoch": 7.064516129032258,
"grad_norm": 0.0408780612051487,
"learning_rate": 9.019515026650365e-05,
"loss": 0.0039,
"step": 1095
},
{
"epoch": 7.070967741935484,
"grad_norm": 0.04257588088512421,
"learning_rate": 9.016834165739948e-05,
"loss": 0.0049,
"step": 1096
},
{
"epoch": 7.077419354838709,
"grad_norm": 0.053617022931575775,
"learning_rate": 9.014150044363171e-05,
"loss": 0.0044,
"step": 1097
},
{
"epoch": 7.083870967741936,
"grad_norm": 0.049007218331098557,
"learning_rate": 9.01146266469874e-05,
"loss": 0.0041,
"step": 1098
},
{
"epoch": 7.090322580645161,
"grad_norm": 0.04696111008524895,
"learning_rate": 9.008772028927998e-05,
"loss": 0.0034,
"step": 1099
},
{
"epoch": 7.096774193548387,
"grad_norm": 0.0659966990351677,
"learning_rate": 9.006078139234939e-05,
"loss": 0.0053,
"step": 1100
},
{
"epoch": 7.096774193548387,
"eval_loss": 0.022749263793230057,
"eval_runtime": 20.6454,
"eval_samples_per_second": 4.844,
"eval_steps_per_second": 0.097,
"step": 1100
},
{
"epoch": 7.103225806451613,
"grad_norm": 0.05406612157821655,
"learning_rate": 9.003380997806193e-05,
"loss": 0.0045,
"step": 1101
},
{
"epoch": 7.109677419354838,
"grad_norm": 0.05338094010949135,
"learning_rate": 9.000680606831032e-05,
"loss": 0.0059,
"step": 1102
},
{
"epoch": 7.116129032258065,
"grad_norm": 0.055592648684978485,
"learning_rate": 8.997976968501361e-05,
"loss": 0.0056,
"step": 1103
},
{
"epoch": 7.122580645161291,
"grad_norm": 0.053097788244485855,
"learning_rate": 8.99527008501173e-05,
"loss": 0.0047,
"step": 1104
},
{
"epoch": 7.129032258064516,
"grad_norm": 0.048222865909338,
"learning_rate": 8.992559958559315e-05,
"loss": 0.0047,
"step": 1105
},
{
"epoch": 7.135483870967742,
"grad_norm": 0.05399785935878754,
"learning_rate": 8.989846591343927e-05,
"loss": 0.0047,
"step": 1106
},
{
"epoch": 7.141935483870967,
"grad_norm": 0.04064812511205673,
"learning_rate": 8.987129985568007e-05,
"loss": 0.0046,
"step": 1107
},
{
"epoch": 7.148387096774194,
"grad_norm": 0.048960182815790176,
"learning_rate": 8.984410143436628e-05,
"loss": 0.0048,
"step": 1108
},
{
"epoch": 7.15483870967742,
"grad_norm": 0.04570115730166435,
"learning_rate": 8.981687067157484e-05,
"loss": 0.0043,
"step": 1109
},
{
"epoch": 7.161290322580645,
"grad_norm": 0.057650115340948105,
"learning_rate": 8.9789607589409e-05,
"loss": 0.0052,
"step": 1110
},
{
"epoch": 7.167741935483871,
"grad_norm": 0.04060333967208862,
"learning_rate": 8.976231220999821e-05,
"loss": 0.0048,
"step": 1111
},
{
"epoch": 7.174193548387096,
"grad_norm": 0.04383286461234093,
"learning_rate": 8.973498455549814e-05,
"loss": 0.0054,
"step": 1112
},
{
"epoch": 7.180645161290323,
"grad_norm": 0.049976591020822525,
"learning_rate": 8.970762464809064e-05,
"loss": 0.0051,
"step": 1113
},
{
"epoch": 7.187096774193549,
"grad_norm": 0.05424259603023529,
"learning_rate": 8.968023250998381e-05,
"loss": 0.0046,
"step": 1114
},
{
"epoch": 7.193548387096774,
"grad_norm": 0.053604692220687866,
"learning_rate": 8.965280816341183e-05,
"loss": 0.004,
"step": 1115
},
{
"epoch": 7.2,
"grad_norm": 0.052061185240745544,
"learning_rate": 8.962535163063506e-05,
"loss": 0.0049,
"step": 1116
},
{
"epoch": 7.2064516129032254,
"grad_norm": 0.03969616815447807,
"learning_rate": 8.959786293393997e-05,
"loss": 0.004,
"step": 1117
},
{
"epoch": 7.212903225806452,
"grad_norm": 0.051511142402887344,
"learning_rate": 8.957034209563917e-05,
"loss": 0.0042,
"step": 1118
},
{
"epoch": 7.219354838709678,
"grad_norm": 0.04875602200627327,
"learning_rate": 8.954278913807133e-05,
"loss": 0.0048,
"step": 1119
},
{
"epoch": 7.225806451612903,
"grad_norm": 0.044596847146749496,
"learning_rate": 8.95152040836012e-05,
"loss": 0.004,
"step": 1120
},
{
"epoch": 7.232258064516129,
"grad_norm": 0.04940240457653999,
"learning_rate": 8.948758695461958e-05,
"loss": 0.0049,
"step": 1121
},
{
"epoch": 7.2387096774193544,
"grad_norm": 0.04639221727848053,
"learning_rate": 8.945993777354333e-05,
"loss": 0.004,
"step": 1122
},
{
"epoch": 7.245161290322581,
"grad_norm": 0.041882410645484924,
"learning_rate": 8.943225656281527e-05,
"loss": 0.0038,
"step": 1123
},
{
"epoch": 7.251612903225807,
"grad_norm": 0.05632800981402397,
"learning_rate": 8.940454334490427e-05,
"loss": 0.0045,
"step": 1124
},
{
"epoch": 7.258064516129032,
"grad_norm": 0.05960334837436676,
"learning_rate": 8.937679814230517e-05,
"loss": 0.0049,
"step": 1125
},
{
"epoch": 7.258064516129032,
"eval_loss": 0.0219831932336092,
"eval_runtime": 20.6196,
"eval_samples_per_second": 4.85,
"eval_steps_per_second": 0.097,
"step": 1125
},
{
"epoch": 7.264516129032258,
"grad_norm": 0.04046253114938736,
"learning_rate": 8.934902097753874e-05,
"loss": 0.0036,
"step": 1126
},
{
"epoch": 7.270967741935484,
"grad_norm": 0.04227912425994873,
"learning_rate": 8.932121187315176e-05,
"loss": 0.0043,
"step": 1127
},
{
"epoch": 7.27741935483871,
"grad_norm": 0.05613262206315994,
"learning_rate": 8.929337085171686e-05,
"loss": 0.0047,
"step": 1128
},
{
"epoch": 7.283870967741936,
"grad_norm": 0.04614945501089096,
"learning_rate": 8.926549793583263e-05,
"loss": 0.0044,
"step": 1129
},
{
"epoch": 7.290322580645161,
"grad_norm": 0.04451045021414757,
"learning_rate": 8.923759314812351e-05,
"loss": 0.0041,
"step": 1130
},
{
"epoch": 7.296774193548387,
"grad_norm": 0.07014819979667664,
"learning_rate": 8.920965651123984e-05,
"loss": 0.0061,
"step": 1131
},
{
"epoch": 7.3032258064516125,
"grad_norm": 0.044604312628507614,
"learning_rate": 8.918168804785782e-05,
"loss": 0.0048,
"step": 1132
},
{
"epoch": 7.309677419354839,
"grad_norm": 0.04796329513192177,
"learning_rate": 8.915368778067945e-05,
"loss": 0.0052,
"step": 1133
},
{
"epoch": 7.316129032258065,
"grad_norm": 0.054540954530239105,
"learning_rate": 8.912565573243256e-05,
"loss": 0.0044,
"step": 1134
},
{
"epoch": 7.32258064516129,
"grad_norm": 0.06614997982978821,
"learning_rate": 8.909759192587078e-05,
"loss": 0.0049,
"step": 1135
},
{
"epoch": 7.329032258064516,
"grad_norm": 0.04955561086535454,
"learning_rate": 8.90694963837735e-05,
"loss": 0.004,
"step": 1136
},
{
"epoch": 7.335483870967742,
"grad_norm": 0.039983998984098434,
"learning_rate": 8.904136912894593e-05,
"loss": 0.0042,
"step": 1137
},
{
"epoch": 7.341935483870968,
"grad_norm": 0.043588194996118546,
"learning_rate": 8.901321018421893e-05,
"loss": 0.0043,
"step": 1138
},
{
"epoch": 7.348387096774194,
"grad_norm": 0.049632057547569275,
"learning_rate": 8.898501957244917e-05,
"loss": 0.0042,
"step": 1139
},
{
"epoch": 7.354838709677419,
"grad_norm": 0.03852184861898422,
"learning_rate": 8.895679731651895e-05,
"loss": 0.0043,
"step": 1140
},
{
"epoch": 7.361290322580645,
"grad_norm": 0.05421644076704979,
"learning_rate": 8.892854343933631e-05,
"loss": 0.0043,
"step": 1141
},
{
"epoch": 7.367741935483871,
"grad_norm": 0.0643831118941307,
"learning_rate": 8.890025796383492e-05,
"loss": 0.0056,
"step": 1142
},
{
"epoch": 7.374193548387097,
"grad_norm": 0.04557755962014198,
"learning_rate": 8.887194091297415e-05,
"loss": 0.0058,
"step": 1143
},
{
"epoch": 7.380645161290323,
"grad_norm": 0.09635678678750992,
"learning_rate": 8.884359230973891e-05,
"loss": 0.005,
"step": 1144
},
{
"epoch": 7.387096774193548,
"grad_norm": 0.05057625100016594,
"learning_rate": 8.881521217713983e-05,
"loss": 0.0054,
"step": 1145
},
{
"epoch": 7.393548387096774,
"grad_norm": 0.04902059584856033,
"learning_rate": 8.878680053821304e-05,
"loss": 0.0043,
"step": 1146
},
{
"epoch": 7.4,
"grad_norm": 0.049218758940696716,
"learning_rate": 8.87583574160203e-05,
"loss": 0.0047,
"step": 1147
},
{
"epoch": 7.406451612903226,
"grad_norm": 0.05688267946243286,
"learning_rate": 8.872988283364892e-05,
"loss": 0.0055,
"step": 1148
},
{
"epoch": 7.412903225806452,
"grad_norm": 0.06391454488039017,
"learning_rate": 8.870137681421169e-05,
"loss": 0.0051,
"step": 1149
},
{
"epoch": 7.419354838709677,
"grad_norm": 0.054065387696027756,
"learning_rate": 8.867283938084701e-05,
"loss": 0.0059,
"step": 1150
},
{
"epoch": 7.419354838709677,
"eval_loss": 0.021773455664515495,
"eval_runtime": 20.6268,
"eval_samples_per_second": 4.848,
"eval_steps_per_second": 0.097,
"step": 1150
},
{
"epoch": 7.425806451612903,
"grad_norm": 0.045604806393384933,
"learning_rate": 8.864427055671869e-05,
"loss": 0.0044,
"step": 1151
},
{
"epoch": 7.432258064516129,
"grad_norm": 0.04603537544608116,
"learning_rate": 8.861567036501608e-05,
"loss": 0.0052,
"step": 1152
},
{
"epoch": 7.438709677419355,
"grad_norm": 0.0699162408709526,
"learning_rate": 8.858703882895397e-05,
"loss": 0.006,
"step": 1153
},
{
"epoch": 7.445161290322581,
"grad_norm": 0.04083402082324028,
"learning_rate": 8.855837597177258e-05,
"loss": 0.0044,
"step": 1154
},
{
"epoch": 7.451612903225806,
"grad_norm": 0.04157475382089615,
"learning_rate": 8.852968181673757e-05,
"loss": 0.0042,
"step": 1155
},
{
"epoch": 7.458064516129032,
"grad_norm": 0.03213357925415039,
"learning_rate": 8.850095638714003e-05,
"loss": 0.0039,
"step": 1156
},
{
"epoch": 7.464516129032258,
"grad_norm": 0.05405053496360779,
"learning_rate": 8.847219970629636e-05,
"loss": 0.0058,
"step": 1157
},
{
"epoch": 7.470967741935484,
"grad_norm": 0.0503312386572361,
"learning_rate": 8.844341179754839e-05,
"loss": 0.0045,
"step": 1158
},
{
"epoch": 7.47741935483871,
"grad_norm": 0.05273361876606941,
"learning_rate": 8.841459268426329e-05,
"loss": 0.0046,
"step": 1159
},
{
"epoch": 7.483870967741936,
"grad_norm": 0.05701204389333725,
"learning_rate": 8.838574238983354e-05,
"loss": 0.005,
"step": 1160
},
{
"epoch": 7.490322580645161,
"grad_norm": 0.07972869277000427,
"learning_rate": 8.835686093767696e-05,
"loss": 0.0044,
"step": 1161
},
{
"epoch": 7.496774193548387,
"grad_norm": 0.04704023525118828,
"learning_rate": 8.832794835123661e-05,
"loss": 0.0044,
"step": 1162
},
{
"epoch": 7.503225806451613,
"grad_norm": 0.0568983368575573,
"learning_rate": 8.829900465398087e-05,
"loss": 0.0052,
"step": 1163
},
{
"epoch": 7.509677419354839,
"grad_norm": 0.046636976301670074,
"learning_rate": 8.827002986940333e-05,
"loss": 0.0046,
"step": 1164
},
{
"epoch": 7.516129032258064,
"grad_norm": 0.06755772978067398,
"learning_rate": 8.824102402102285e-05,
"loss": 0.0052,
"step": 1165
},
{
"epoch": 7.52258064516129,
"grad_norm": 0.04916645213961601,
"learning_rate": 8.821198713238352e-05,
"loss": 0.0053,
"step": 1166
},
{
"epoch": 7.529032258064516,
"grad_norm": 0.049249373376369476,
"learning_rate": 8.818291922705456e-05,
"loss": 0.0049,
"step": 1167
},
{
"epoch": 7.535483870967742,
"grad_norm": 0.046701643615961075,
"learning_rate": 8.815382032863042e-05,
"loss": 0.0042,
"step": 1168
},
{
"epoch": 7.541935483870968,
"grad_norm": 0.04675468057394028,
"learning_rate": 8.812469046073069e-05,
"loss": 0.0053,
"step": 1169
},
{
"epoch": 7.548387096774194,
"grad_norm": 0.040924135595560074,
"learning_rate": 8.80955296470001e-05,
"loss": 0.0046,
"step": 1170
},
{
"epoch": 7.554838709677419,
"grad_norm": 0.06187480688095093,
"learning_rate": 8.806633791110847e-05,
"loss": 0.0047,
"step": 1171
},
{
"epoch": 7.561290322580645,
"grad_norm": 0.04417719691991806,
"learning_rate": 8.80371152767508e-05,
"loss": 0.0048,
"step": 1172
},
{
"epoch": 7.567741935483871,
"grad_norm": 0.0677109807729721,
"learning_rate": 8.800786176764708e-05,
"loss": 0.0048,
"step": 1173
},
{
"epoch": 7.574193548387097,
"grad_norm": 0.045606911182403564,
"learning_rate": 8.797857740754241e-05,
"loss": 0.0053,
"step": 1174
},
{
"epoch": 7.580645161290323,
"grad_norm": 0.04456802085042,
"learning_rate": 8.79492622202069e-05,
"loss": 0.0045,
"step": 1175
},
{
"epoch": 7.580645161290323,
"eval_loss": 0.02147463709115982,
"eval_runtime": 20.6465,
"eval_samples_per_second": 4.843,
"eval_steps_per_second": 0.097,
"step": 1175
},
{
"epoch": 7.587096774193548,
"grad_norm": 0.05277775600552559,
"learning_rate": 8.791991622943574e-05,
"loss": 0.0055,
"step": 1176
},
{
"epoch": 7.593548387096774,
"grad_norm": 0.05247287079691887,
"learning_rate": 8.789053945904906e-05,
"loss": 0.0048,
"step": 1177
},
{
"epoch": 7.6,
"grad_norm": 0.057342078536748886,
"learning_rate": 8.786113193289201e-05,
"loss": 0.0056,
"step": 1178
},
{
"epoch": 7.606451612903226,
"grad_norm": 0.054789651185274124,
"learning_rate": 8.783169367483467e-05,
"loss": 0.0044,
"step": 1179
},
{
"epoch": 7.612903225806452,
"grad_norm": 0.05523642152547836,
"learning_rate": 8.780222470877214e-05,
"loss": 0.0047,
"step": 1180
},
{
"epoch": 7.619354838709677,
"grad_norm": 0.0644751563668251,
"learning_rate": 8.777272505862435e-05,
"loss": 0.0051,
"step": 1181
},
{
"epoch": 7.625806451612903,
"grad_norm": 0.044002220034599304,
"learning_rate": 8.77431947483362e-05,
"loss": 0.0043,
"step": 1182
},
{
"epoch": 7.632258064516129,
"grad_norm": 0.06370144337415695,
"learning_rate": 8.771363380187743e-05,
"loss": 0.0049,
"step": 1183
},
{
"epoch": 7.638709677419355,
"grad_norm": 0.05078444629907608,
"learning_rate": 8.768404224324271e-05,
"loss": 0.005,
"step": 1184
},
{
"epoch": 7.645161290322581,
"grad_norm": 0.051172371953725815,
"learning_rate": 8.76544200964515e-05,
"loss": 0.0049,
"step": 1185
},
{
"epoch": 7.651612903225806,
"grad_norm": 0.04795413091778755,
"learning_rate": 8.762476738554815e-05,
"loss": 0.0045,
"step": 1186
},
{
"epoch": 7.658064516129032,
"grad_norm": 0.06591526418924332,
"learning_rate": 8.759508413460174e-05,
"loss": 0.0051,
"step": 1187
},
{
"epoch": 7.664516129032258,
"grad_norm": 0.06435294449329376,
"learning_rate": 8.75653703677062e-05,
"loss": 0.0057,
"step": 1188
},
{
"epoch": 7.670967741935484,
"grad_norm": 0.051989562809467316,
"learning_rate": 8.753562610898021e-05,
"loss": 0.0049,
"step": 1189
},
{
"epoch": 7.67741935483871,
"grad_norm": 0.039358485490083694,
"learning_rate": 8.750585138256717e-05,
"loss": 0.0043,
"step": 1190
},
{
"epoch": 7.683870967741935,
"grad_norm": 0.03751049563288689,
"learning_rate": 8.74760462126353e-05,
"loss": 0.004,
"step": 1191
},
{
"epoch": 7.690322580645161,
"grad_norm": 0.058525778353214264,
"learning_rate": 8.744621062337741e-05,
"loss": 0.0053,
"step": 1192
},
{
"epoch": 7.6967741935483875,
"grad_norm": 0.05404181033372879,
"learning_rate": 8.74163446390111e-05,
"loss": 0.0051,
"step": 1193
},
{
"epoch": 7.703225806451613,
"grad_norm": 0.043138835579156876,
"learning_rate": 8.73864482837786e-05,
"loss": 0.004,
"step": 1194
},
{
"epoch": 7.709677419354839,
"grad_norm": 0.046529654413461685,
"learning_rate": 8.735652158194678e-05,
"loss": 0.0051,
"step": 1195
},
{
"epoch": 7.716129032258064,
"grad_norm": 0.05675065889954567,
"learning_rate": 8.732656455780717e-05,
"loss": 0.0041,
"step": 1196
},
{
"epoch": 7.72258064516129,
"grad_norm": 0.04349643364548683,
"learning_rate": 8.72965772356759e-05,
"loss": 0.0041,
"step": 1197
},
{
"epoch": 7.729032258064516,
"grad_norm": 0.04814918711781502,
"learning_rate": 8.726655963989371e-05,
"loss": 0.0046,
"step": 1198
},
{
"epoch": 7.735483870967742,
"grad_norm": 0.05350608006119728,
"learning_rate": 8.723651179482588e-05,
"loss": 0.0041,
"step": 1199
},
{
"epoch": 7.741935483870968,
"grad_norm": 0.0744265466928482,
"learning_rate": 8.720643372486228e-05,
"loss": 0.0058,
"step": 1200
},
{
"epoch": 7.741935483870968,
"eval_loss": 0.020646410062909126,
"eval_runtime": 20.6232,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.097,
"step": 1200
}
],
"logging_steps": 1,
"max_steps": 3875,
"num_input_tokens_seen": 0,
"num_train_epochs": 25,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.043424643958702e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}