ryusangwon's picture
End of training
2ba3ffe verified
raw
history blame
30.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7265193370165746,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.9884898710865566e-05,
"loss": 1.803,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 4.976979742173112e-05,
"loss": 1.781,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 4.965469613259669e-05,
"loss": 1.721,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 4.953959484346225e-05,
"loss": 1.6942,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 4.942449355432781e-05,
"loss": 1.7153,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 4.930939226519337e-05,
"loss": 1.6549,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 4.919429097605893e-05,
"loss": 1.6161,
"step": 70
},
{
"epoch": 0.06,
"learning_rate": 4.9079189686924495e-05,
"loss": 1.6448,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 4.896408839779006e-05,
"loss": 1.602,
"step": 90
},
{
"epoch": 0.07,
"learning_rate": 4.884898710865562e-05,
"loss": 1.6401,
"step": 100
},
{
"epoch": 0.08,
"learning_rate": 4.873388581952118e-05,
"loss": 1.5939,
"step": 110
},
{
"epoch": 0.08,
"learning_rate": 4.861878453038674e-05,
"loss": 1.6273,
"step": 120
},
{
"epoch": 0.09,
"learning_rate": 4.8503683241252305e-05,
"loss": 1.5847,
"step": 130
},
{
"epoch": 0.1,
"learning_rate": 4.838858195211787e-05,
"loss": 1.6303,
"step": 140
},
{
"epoch": 0.1,
"learning_rate": 4.8273480662983425e-05,
"loss": 1.5958,
"step": 150
},
{
"epoch": 0.11,
"learning_rate": 4.815837937384899e-05,
"loss": 1.628,
"step": 160
},
{
"epoch": 0.12,
"learning_rate": 4.804327808471455e-05,
"loss": 1.6073,
"step": 170
},
{
"epoch": 0.12,
"learning_rate": 4.7928176795580114e-05,
"loss": 1.6069,
"step": 180
},
{
"epoch": 0.13,
"learning_rate": 4.781307550644568e-05,
"loss": 1.5774,
"step": 190
},
{
"epoch": 0.14,
"learning_rate": 4.7697974217311234e-05,
"loss": 1.5908,
"step": 200
},
{
"epoch": 0.15,
"learning_rate": 4.75828729281768e-05,
"loss": 1.6272,
"step": 210
},
{
"epoch": 0.15,
"learning_rate": 4.746777163904236e-05,
"loss": 1.5789,
"step": 220
},
{
"epoch": 0.16,
"learning_rate": 4.7352670349907924e-05,
"loss": 1.6184,
"step": 230
},
{
"epoch": 0.17,
"learning_rate": 4.723756906077349e-05,
"loss": 1.5601,
"step": 240
},
{
"epoch": 0.17,
"learning_rate": 4.7122467771639044e-05,
"loss": 1.6025,
"step": 250
},
{
"epoch": 0.18,
"learning_rate": 4.700736648250461e-05,
"loss": 1.5405,
"step": 260
},
{
"epoch": 0.19,
"learning_rate": 4.6892265193370164e-05,
"loss": 1.5928,
"step": 270
},
{
"epoch": 0.19,
"learning_rate": 4.6777163904235734e-05,
"loss": 1.5753,
"step": 280
},
{
"epoch": 0.2,
"learning_rate": 4.666206261510129e-05,
"loss": 1.6019,
"step": 290
},
{
"epoch": 0.21,
"learning_rate": 4.6546961325966854e-05,
"loss": 1.6118,
"step": 300
},
{
"epoch": 0.21,
"learning_rate": 4.643186003683242e-05,
"loss": 1.6183,
"step": 310
},
{
"epoch": 0.22,
"learning_rate": 4.6316758747697973e-05,
"loss": 1.5882,
"step": 320
},
{
"epoch": 0.23,
"learning_rate": 4.6201657458563544e-05,
"loss": 1.5797,
"step": 330
},
{
"epoch": 0.23,
"learning_rate": 4.60865561694291e-05,
"loss": 1.6227,
"step": 340
},
{
"epoch": 0.24,
"learning_rate": 4.597145488029466e-05,
"loss": 1.5912,
"step": 350
},
{
"epoch": 0.25,
"learning_rate": 4.585635359116022e-05,
"loss": 1.5672,
"step": 360
},
{
"epoch": 0.26,
"learning_rate": 4.574125230202578e-05,
"loss": 1.612,
"step": 370
},
{
"epoch": 0.26,
"learning_rate": 4.5626151012891346e-05,
"loss": 1.6161,
"step": 380
},
{
"epoch": 0.27,
"learning_rate": 4.551104972375691e-05,
"loss": 1.6061,
"step": 390
},
{
"epoch": 0.28,
"learning_rate": 4.539594843462247e-05,
"loss": 1.5733,
"step": 400
},
{
"epoch": 0.28,
"learning_rate": 4.528084714548803e-05,
"loss": 1.5949,
"step": 410
},
{
"epoch": 0.29,
"learning_rate": 4.516574585635359e-05,
"loss": 1.6355,
"step": 420
},
{
"epoch": 0.3,
"learning_rate": 4.505064456721915e-05,
"loss": 1.6296,
"step": 430
},
{
"epoch": 0.3,
"learning_rate": 4.493554327808472e-05,
"loss": 1.592,
"step": 440
},
{
"epoch": 0.31,
"learning_rate": 4.4820441988950276e-05,
"loss": 1.5712,
"step": 450
},
{
"epoch": 0.32,
"learning_rate": 4.470534069981584e-05,
"loss": 1.6086,
"step": 460
},
{
"epoch": 0.32,
"learning_rate": 4.45902394106814e-05,
"loss": 1.5757,
"step": 470
},
{
"epoch": 0.33,
"learning_rate": 4.447513812154696e-05,
"loss": 1.5885,
"step": 480
},
{
"epoch": 0.34,
"learning_rate": 4.436003683241253e-05,
"loss": 1.5828,
"step": 490
},
{
"epoch": 0.35,
"learning_rate": 4.4244935543278086e-05,
"loss": 1.5782,
"step": 500
},
{
"epoch": 0.35,
"learning_rate": 4.412983425414365e-05,
"loss": 1.6,
"step": 510
},
{
"epoch": 0.36,
"learning_rate": 4.401473296500921e-05,
"loss": 1.5992,
"step": 520
},
{
"epoch": 0.37,
"learning_rate": 4.389963167587477e-05,
"loss": 1.5709,
"step": 530
},
{
"epoch": 0.37,
"learning_rate": 4.378453038674034e-05,
"loss": 1.5642,
"step": 540
},
{
"epoch": 0.38,
"learning_rate": 4.3669429097605895e-05,
"loss": 1.6253,
"step": 550
},
{
"epoch": 0.39,
"learning_rate": 4.355432780847146e-05,
"loss": 1.5742,
"step": 560
},
{
"epoch": 0.39,
"learning_rate": 4.3439226519337015e-05,
"loss": 1.5912,
"step": 570
},
{
"epoch": 0.4,
"learning_rate": 4.332412523020258e-05,
"loss": 1.587,
"step": 580
},
{
"epoch": 0.41,
"learning_rate": 4.320902394106814e-05,
"loss": 1.5834,
"step": 590
},
{
"epoch": 0.41,
"learning_rate": 4.3093922651933705e-05,
"loss": 1.6063,
"step": 600
},
{
"epoch": 0.42,
"learning_rate": 4.297882136279927e-05,
"loss": 1.5801,
"step": 610
},
{
"epoch": 0.43,
"learning_rate": 4.2863720073664825e-05,
"loss": 1.5947,
"step": 620
},
{
"epoch": 0.44,
"learning_rate": 4.274861878453039e-05,
"loss": 1.6001,
"step": 630
},
{
"epoch": 0.44,
"learning_rate": 4.263351749539595e-05,
"loss": 1.6476,
"step": 640
},
{
"epoch": 0.45,
"learning_rate": 4.2518416206261515e-05,
"loss": 1.6008,
"step": 650
},
{
"epoch": 0.46,
"learning_rate": 4.240331491712707e-05,
"loss": 1.6011,
"step": 660
},
{
"epoch": 0.46,
"learning_rate": 4.2288213627992634e-05,
"loss": 1.603,
"step": 670
},
{
"epoch": 0.47,
"learning_rate": 4.21731123388582e-05,
"loss": 1.5862,
"step": 680
},
{
"epoch": 0.48,
"learning_rate": 4.205801104972376e-05,
"loss": 1.6024,
"step": 690
},
{
"epoch": 0.48,
"learning_rate": 4.1942909760589324e-05,
"loss": 1.6072,
"step": 700
},
{
"epoch": 0.49,
"learning_rate": 4.182780847145488e-05,
"loss": 1.5769,
"step": 710
},
{
"epoch": 0.5,
"learning_rate": 4.1712707182320444e-05,
"loss": 1.5869,
"step": 720
},
{
"epoch": 0.5,
"learning_rate": 4.1597605893186e-05,
"loss": 1.5877,
"step": 730
},
{
"epoch": 0.51,
"learning_rate": 4.148250460405157e-05,
"loss": 1.6401,
"step": 740
},
{
"epoch": 0.52,
"learning_rate": 4.136740331491713e-05,
"loss": 1.6012,
"step": 750
},
{
"epoch": 0.52,
"learning_rate": 4.125230202578269e-05,
"loss": 1.5881,
"step": 760
},
{
"epoch": 0.53,
"learning_rate": 4.1137200736648254e-05,
"loss": 1.6215,
"step": 770
},
{
"epoch": 0.54,
"learning_rate": 4.102209944751381e-05,
"loss": 1.6339,
"step": 780
},
{
"epoch": 0.55,
"learning_rate": 4.090699815837938e-05,
"loss": 1.627,
"step": 790
},
{
"epoch": 0.55,
"learning_rate": 4.079189686924494e-05,
"loss": 1.595,
"step": 800
},
{
"epoch": 0.56,
"learning_rate": 4.06767955801105e-05,
"loss": 1.5975,
"step": 810
},
{
"epoch": 0.57,
"learning_rate": 4.056169429097606e-05,
"loss": 1.581,
"step": 820
},
{
"epoch": 0.57,
"learning_rate": 4.044659300184162e-05,
"loss": 1.6179,
"step": 830
},
{
"epoch": 0.58,
"learning_rate": 4.033149171270719e-05,
"loss": 1.5888,
"step": 840
},
{
"epoch": 0.59,
"learning_rate": 4.0216390423572746e-05,
"loss": 1.5645,
"step": 850
},
{
"epoch": 0.59,
"learning_rate": 4.010128913443831e-05,
"loss": 1.6079,
"step": 860
},
{
"epoch": 0.6,
"learning_rate": 3.9986187845303866e-05,
"loss": 1.5738,
"step": 870
},
{
"epoch": 0.61,
"learning_rate": 3.987108655616943e-05,
"loss": 1.5887,
"step": 880
},
{
"epoch": 0.61,
"learning_rate": 3.975598526703499e-05,
"loss": 1.6049,
"step": 890
},
{
"epoch": 0.62,
"learning_rate": 3.9640883977900556e-05,
"loss": 1.6021,
"step": 900
},
{
"epoch": 0.63,
"learning_rate": 3.952578268876612e-05,
"loss": 1.5762,
"step": 910
},
{
"epoch": 0.64,
"learning_rate": 3.9410681399631676e-05,
"loss": 1.5568,
"step": 920
},
{
"epoch": 0.64,
"learning_rate": 3.929558011049724e-05,
"loss": 1.5439,
"step": 930
},
{
"epoch": 0.65,
"learning_rate": 3.91804788213628e-05,
"loss": 1.5901,
"step": 940
},
{
"epoch": 0.66,
"learning_rate": 3.9065377532228366e-05,
"loss": 1.6208,
"step": 950
},
{
"epoch": 0.66,
"learning_rate": 3.895027624309392e-05,
"loss": 1.6169,
"step": 960
},
{
"epoch": 0.67,
"learning_rate": 3.8835174953959486e-05,
"loss": 1.6367,
"step": 970
},
{
"epoch": 0.68,
"learning_rate": 3.872007366482505e-05,
"loss": 1.6195,
"step": 980
},
{
"epoch": 0.68,
"learning_rate": 3.860497237569061e-05,
"loss": 1.6107,
"step": 990
},
{
"epoch": 0.69,
"learning_rate": 3.8489871086556175e-05,
"loss": 1.607,
"step": 1000
},
{
"epoch": 0.7,
"learning_rate": 3.837476979742173e-05,
"loss": 1.572,
"step": 1010
},
{
"epoch": 0.7,
"learning_rate": 3.8259668508287295e-05,
"loss": 1.5877,
"step": 1020
},
{
"epoch": 0.71,
"learning_rate": 3.814456721915285e-05,
"loss": 1.5893,
"step": 1030
},
{
"epoch": 0.72,
"learning_rate": 3.8029465930018415e-05,
"loss": 1.5979,
"step": 1040
},
{
"epoch": 0.73,
"learning_rate": 3.791436464088398e-05,
"loss": 1.62,
"step": 1050
},
{
"epoch": 0.73,
"learning_rate": 3.779926335174954e-05,
"loss": 1.5397,
"step": 1060
},
{
"epoch": 0.74,
"learning_rate": 3.7684162062615105e-05,
"loss": 1.6262,
"step": 1070
},
{
"epoch": 0.75,
"learning_rate": 3.756906077348066e-05,
"loss": 1.5745,
"step": 1080
},
{
"epoch": 0.75,
"learning_rate": 3.7453959484346225e-05,
"loss": 1.6107,
"step": 1090
},
{
"epoch": 0.76,
"learning_rate": 3.733885819521179e-05,
"loss": 1.6006,
"step": 1100
},
{
"epoch": 0.77,
"learning_rate": 3.722375690607735e-05,
"loss": 1.6287,
"step": 1110
},
{
"epoch": 0.77,
"learning_rate": 3.710865561694291e-05,
"loss": 1.5726,
"step": 1120
},
{
"epoch": 0.78,
"learning_rate": 3.699355432780847e-05,
"loss": 1.5706,
"step": 1130
},
{
"epoch": 0.79,
"learning_rate": 3.6878453038674034e-05,
"loss": 1.542,
"step": 1140
},
{
"epoch": 0.79,
"learning_rate": 3.67633517495396e-05,
"loss": 1.5889,
"step": 1150
},
{
"epoch": 0.8,
"learning_rate": 3.664825046040516e-05,
"loss": 1.594,
"step": 1160
},
{
"epoch": 0.81,
"learning_rate": 3.653314917127072e-05,
"loss": 1.584,
"step": 1170
},
{
"epoch": 0.81,
"learning_rate": 3.641804788213628e-05,
"loss": 1.5925,
"step": 1180
},
{
"epoch": 0.82,
"learning_rate": 3.6302946593001844e-05,
"loss": 1.6085,
"step": 1190
},
{
"epoch": 0.83,
"learning_rate": 3.618784530386741e-05,
"loss": 1.5677,
"step": 1200
},
{
"epoch": 0.84,
"learning_rate": 3.607274401473297e-05,
"loss": 1.6185,
"step": 1210
},
{
"epoch": 0.84,
"learning_rate": 3.595764272559853e-05,
"loss": 1.584,
"step": 1220
},
{
"epoch": 0.85,
"learning_rate": 3.584254143646409e-05,
"loss": 1.6028,
"step": 1230
},
{
"epoch": 0.86,
"learning_rate": 3.572744014732965e-05,
"loss": 1.5581,
"step": 1240
},
{
"epoch": 0.86,
"learning_rate": 3.561233885819522e-05,
"loss": 1.5907,
"step": 1250
},
{
"epoch": 0.87,
"learning_rate": 3.5497237569060774e-05,
"loss": 1.5602,
"step": 1260
},
{
"epoch": 0.88,
"learning_rate": 3.538213627992634e-05,
"loss": 1.5887,
"step": 1270
},
{
"epoch": 0.88,
"learning_rate": 3.52670349907919e-05,
"loss": 1.5724,
"step": 1280
},
{
"epoch": 0.89,
"learning_rate": 3.515193370165746e-05,
"loss": 1.58,
"step": 1290
},
{
"epoch": 0.9,
"learning_rate": 3.503683241252303e-05,
"loss": 1.6222,
"step": 1300
},
{
"epoch": 0.9,
"learning_rate": 3.492173112338858e-05,
"loss": 1.5889,
"step": 1310
},
{
"epoch": 0.91,
"learning_rate": 3.4806629834254147e-05,
"loss": 1.5776,
"step": 1320
},
{
"epoch": 0.92,
"learning_rate": 3.46915285451197e-05,
"loss": 1.5561,
"step": 1330
},
{
"epoch": 0.93,
"learning_rate": 3.4576427255985266e-05,
"loss": 1.6076,
"step": 1340
},
{
"epoch": 0.93,
"learning_rate": 3.446132596685083e-05,
"loss": 1.586,
"step": 1350
},
{
"epoch": 0.94,
"learning_rate": 3.434622467771639e-05,
"loss": 1.5443,
"step": 1360
},
{
"epoch": 0.95,
"learning_rate": 3.4231123388581956e-05,
"loss": 1.6226,
"step": 1370
},
{
"epoch": 0.95,
"learning_rate": 3.411602209944751e-05,
"loss": 1.5835,
"step": 1380
},
{
"epoch": 0.96,
"learning_rate": 3.4000920810313076e-05,
"loss": 1.5367,
"step": 1390
},
{
"epoch": 0.97,
"learning_rate": 3.388581952117864e-05,
"loss": 1.5967,
"step": 1400
},
{
"epoch": 0.97,
"learning_rate": 3.37707182320442e-05,
"loss": 1.559,
"step": 1410
},
{
"epoch": 0.98,
"learning_rate": 3.365561694290976e-05,
"loss": 1.6235,
"step": 1420
},
{
"epoch": 0.99,
"learning_rate": 3.354051565377532e-05,
"loss": 1.5517,
"step": 1430
},
{
"epoch": 0.99,
"learning_rate": 3.3425414364640886e-05,
"loss": 1.5651,
"step": 1440
},
{
"epoch": 1.0,
"learning_rate": 3.331031307550645e-05,
"loss": 1.5994,
"step": 1450
},
{
"epoch": 1.01,
"learning_rate": 3.319521178637201e-05,
"loss": 1.6072,
"step": 1460
},
{
"epoch": 1.02,
"learning_rate": 3.308011049723757e-05,
"loss": 1.5715,
"step": 1470
},
{
"epoch": 1.02,
"learning_rate": 3.296500920810313e-05,
"loss": 1.5736,
"step": 1480
},
{
"epoch": 1.03,
"learning_rate": 3.2849907918968695e-05,
"loss": 1.5581,
"step": 1490
},
{
"epoch": 1.04,
"learning_rate": 3.273480662983426e-05,
"loss": 1.5204,
"step": 1500
},
{
"epoch": 1.04,
"learning_rate": 3.261970534069982e-05,
"loss": 1.5835,
"step": 1510
},
{
"epoch": 1.05,
"learning_rate": 3.250460405156538e-05,
"loss": 1.5892,
"step": 1520
},
{
"epoch": 1.06,
"learning_rate": 3.238950276243094e-05,
"loss": 1.5769,
"step": 1530
},
{
"epoch": 1.06,
"learning_rate": 3.22744014732965e-05,
"loss": 1.5565,
"step": 1540
},
{
"epoch": 1.07,
"learning_rate": 3.215930018416207e-05,
"loss": 1.585,
"step": 1550
},
{
"epoch": 1.08,
"learning_rate": 3.2044198895027625e-05,
"loss": 1.5896,
"step": 1560
},
{
"epoch": 1.08,
"learning_rate": 3.192909760589319e-05,
"loss": 1.5741,
"step": 1570
},
{
"epoch": 1.09,
"learning_rate": 3.181399631675875e-05,
"loss": 1.5669,
"step": 1580
},
{
"epoch": 1.1,
"learning_rate": 3.169889502762431e-05,
"loss": 1.6052,
"step": 1590
},
{
"epoch": 1.1,
"learning_rate": 3.158379373848988e-05,
"loss": 1.6087,
"step": 1600
},
{
"epoch": 1.11,
"learning_rate": 3.1468692449355434e-05,
"loss": 1.6186,
"step": 1610
},
{
"epoch": 1.12,
"learning_rate": 3.1353591160221e-05,
"loss": 1.5654,
"step": 1620
},
{
"epoch": 1.13,
"learning_rate": 3.1238489871086554e-05,
"loss": 1.5747,
"step": 1630
},
{
"epoch": 1.13,
"learning_rate": 3.112338858195212e-05,
"loss": 1.6006,
"step": 1640
},
{
"epoch": 1.14,
"learning_rate": 3.100828729281768e-05,
"loss": 1.5931,
"step": 1650
},
{
"epoch": 1.15,
"learning_rate": 3.0893186003683244e-05,
"loss": 1.5518,
"step": 1660
},
{
"epoch": 1.15,
"learning_rate": 3.077808471454881e-05,
"loss": 1.5872,
"step": 1670
},
{
"epoch": 1.16,
"learning_rate": 3.0662983425414364e-05,
"loss": 1.6109,
"step": 1680
},
{
"epoch": 1.17,
"learning_rate": 3.054788213627993e-05,
"loss": 1.5978,
"step": 1690
},
{
"epoch": 1.17,
"learning_rate": 3.0432780847145487e-05,
"loss": 1.6005,
"step": 1700
},
{
"epoch": 1.18,
"learning_rate": 3.0317679558011054e-05,
"loss": 1.5629,
"step": 1710
},
{
"epoch": 1.19,
"learning_rate": 3.0202578268876614e-05,
"loss": 1.5958,
"step": 1720
},
{
"epoch": 1.19,
"learning_rate": 3.0087476979742174e-05,
"loss": 1.5918,
"step": 1730
},
{
"epoch": 1.2,
"learning_rate": 2.9972375690607734e-05,
"loss": 1.6137,
"step": 1740
},
{
"epoch": 1.21,
"learning_rate": 2.9857274401473297e-05,
"loss": 1.5628,
"step": 1750
},
{
"epoch": 1.22,
"learning_rate": 2.974217311233886e-05,
"loss": 1.5806,
"step": 1760
},
{
"epoch": 1.22,
"learning_rate": 2.9627071823204423e-05,
"loss": 1.5868,
"step": 1770
},
{
"epoch": 1.23,
"learning_rate": 2.9511970534069983e-05,
"loss": 1.5842,
"step": 1780
},
{
"epoch": 1.24,
"learning_rate": 2.9396869244935543e-05,
"loss": 1.5898,
"step": 1790
},
{
"epoch": 1.24,
"learning_rate": 2.9281767955801103e-05,
"loss": 1.5812,
"step": 1800
},
{
"epoch": 1.25,
"learning_rate": 2.916666666666667e-05,
"loss": 1.5812,
"step": 1810
},
{
"epoch": 1.26,
"learning_rate": 2.905156537753223e-05,
"loss": 1.6343,
"step": 1820
},
{
"epoch": 1.26,
"learning_rate": 2.893646408839779e-05,
"loss": 1.5993,
"step": 1830
},
{
"epoch": 1.27,
"learning_rate": 2.8821362799263353e-05,
"loss": 1.5398,
"step": 1840
},
{
"epoch": 1.28,
"learning_rate": 2.8706261510128913e-05,
"loss": 1.585,
"step": 1850
},
{
"epoch": 1.28,
"learning_rate": 2.859116022099448e-05,
"loss": 1.5969,
"step": 1860
},
{
"epoch": 1.29,
"learning_rate": 2.847605893186004e-05,
"loss": 1.582,
"step": 1870
},
{
"epoch": 1.3,
"learning_rate": 2.83609576427256e-05,
"loss": 1.5646,
"step": 1880
},
{
"epoch": 1.31,
"learning_rate": 2.824585635359116e-05,
"loss": 1.5525,
"step": 1890
},
{
"epoch": 1.31,
"learning_rate": 2.8130755064456722e-05,
"loss": 1.5957,
"step": 1900
},
{
"epoch": 1.32,
"learning_rate": 2.8015653775322286e-05,
"loss": 1.5797,
"step": 1910
},
{
"epoch": 1.33,
"learning_rate": 2.790055248618785e-05,
"loss": 1.5693,
"step": 1920
},
{
"epoch": 1.33,
"learning_rate": 2.778545119705341e-05,
"loss": 1.5788,
"step": 1930
},
{
"epoch": 1.34,
"learning_rate": 2.767034990791897e-05,
"loss": 1.5635,
"step": 1940
},
{
"epoch": 1.35,
"learning_rate": 2.755524861878453e-05,
"loss": 1.5764,
"step": 1950
},
{
"epoch": 1.35,
"learning_rate": 2.7440147329650095e-05,
"loss": 1.5704,
"step": 1960
},
{
"epoch": 1.36,
"learning_rate": 2.7325046040515655e-05,
"loss": 1.5623,
"step": 1970
},
{
"epoch": 1.37,
"learning_rate": 2.7209944751381215e-05,
"loss": 1.6165,
"step": 1980
},
{
"epoch": 1.37,
"learning_rate": 2.709484346224678e-05,
"loss": 1.5834,
"step": 1990
},
{
"epoch": 1.38,
"learning_rate": 2.697974217311234e-05,
"loss": 1.5569,
"step": 2000
},
{
"epoch": 1.39,
"learning_rate": 2.6864640883977905e-05,
"loss": 1.5973,
"step": 2010
},
{
"epoch": 1.4,
"learning_rate": 2.6749539594843465e-05,
"loss": 1.611,
"step": 2020
},
{
"epoch": 1.4,
"learning_rate": 2.6634438305709025e-05,
"loss": 1.5728,
"step": 2030
},
{
"epoch": 1.41,
"learning_rate": 2.6519337016574585e-05,
"loss": 1.5953,
"step": 2040
},
{
"epoch": 1.42,
"learning_rate": 2.6404235727440148e-05,
"loss": 1.5468,
"step": 2050
},
{
"epoch": 1.42,
"learning_rate": 2.628913443830571e-05,
"loss": 1.5737,
"step": 2060
},
{
"epoch": 1.43,
"learning_rate": 2.6174033149171275e-05,
"loss": 1.559,
"step": 2070
},
{
"epoch": 1.44,
"learning_rate": 2.6058931860036835e-05,
"loss": 1.595,
"step": 2080
},
{
"epoch": 1.44,
"learning_rate": 2.5943830570902394e-05,
"loss": 1.5688,
"step": 2090
},
{
"epoch": 1.45,
"learning_rate": 2.5828729281767954e-05,
"loss": 1.5566,
"step": 2100
},
{
"epoch": 1.46,
"learning_rate": 2.571362799263352e-05,
"loss": 1.6499,
"step": 2110
},
{
"epoch": 1.46,
"learning_rate": 2.559852670349908e-05,
"loss": 1.6233,
"step": 2120
},
{
"epoch": 1.47,
"learning_rate": 2.548342541436464e-05,
"loss": 1.5406,
"step": 2130
},
{
"epoch": 1.48,
"learning_rate": 2.5368324125230204e-05,
"loss": 1.5989,
"step": 2140
},
{
"epoch": 1.48,
"learning_rate": 2.5253222836095764e-05,
"loss": 1.5964,
"step": 2150
},
{
"epoch": 1.49,
"learning_rate": 2.513812154696133e-05,
"loss": 1.5715,
"step": 2160
},
{
"epoch": 1.5,
"learning_rate": 2.502302025782689e-05,
"loss": 1.5662,
"step": 2170
},
{
"epoch": 1.51,
"learning_rate": 2.490791896869245e-05,
"loss": 1.6269,
"step": 2180
},
{
"epoch": 1.51,
"learning_rate": 2.479281767955801e-05,
"loss": 1.5549,
"step": 2190
},
{
"epoch": 1.52,
"learning_rate": 2.4677716390423574e-05,
"loss": 1.6521,
"step": 2200
},
{
"epoch": 1.53,
"learning_rate": 2.4562615101289137e-05,
"loss": 1.6164,
"step": 2210
},
{
"epoch": 1.53,
"learning_rate": 2.4447513812154697e-05,
"loss": 1.5851,
"step": 2220
},
{
"epoch": 1.54,
"learning_rate": 2.433241252302026e-05,
"loss": 1.6146,
"step": 2230
},
{
"epoch": 1.55,
"learning_rate": 2.421731123388582e-05,
"loss": 1.5704,
"step": 2240
},
{
"epoch": 1.55,
"learning_rate": 2.4102209944751383e-05,
"loss": 1.6094,
"step": 2250
},
{
"epoch": 1.56,
"learning_rate": 2.3987108655616943e-05,
"loss": 1.5886,
"step": 2260
},
{
"epoch": 1.57,
"learning_rate": 2.3872007366482503e-05,
"loss": 1.5399,
"step": 2270
},
{
"epoch": 1.57,
"learning_rate": 2.3756906077348066e-05,
"loss": 1.5285,
"step": 2280
},
{
"epoch": 1.58,
"learning_rate": 2.364180478821363e-05,
"loss": 1.6065,
"step": 2290
},
{
"epoch": 1.59,
"learning_rate": 2.3526703499079193e-05,
"loss": 1.5909,
"step": 2300
},
{
"epoch": 1.6,
"learning_rate": 2.3411602209944753e-05,
"loss": 1.5671,
"step": 2310
},
{
"epoch": 1.6,
"learning_rate": 2.3296500920810313e-05,
"loss": 1.6306,
"step": 2320
},
{
"epoch": 1.61,
"learning_rate": 2.3181399631675876e-05,
"loss": 1.5684,
"step": 2330
},
{
"epoch": 1.62,
"learning_rate": 2.3066298342541436e-05,
"loss": 1.5746,
"step": 2340
},
{
"epoch": 1.62,
"learning_rate": 2.2951197053407e-05,
"loss": 1.5796,
"step": 2350
},
{
"epoch": 1.63,
"learning_rate": 2.2836095764272563e-05,
"loss": 1.5653,
"step": 2360
},
{
"epoch": 1.64,
"learning_rate": 2.2720994475138122e-05,
"loss": 1.6186,
"step": 2370
},
{
"epoch": 1.64,
"learning_rate": 2.2605893186003686e-05,
"loss": 1.5848,
"step": 2380
},
{
"epoch": 1.65,
"learning_rate": 2.2490791896869246e-05,
"loss": 1.5488,
"step": 2390
},
{
"epoch": 1.66,
"learning_rate": 2.237569060773481e-05,
"loss": 1.5522,
"step": 2400
},
{
"epoch": 1.66,
"learning_rate": 2.226058931860037e-05,
"loss": 1.5386,
"step": 2410
},
{
"epoch": 1.67,
"learning_rate": 2.214548802946593e-05,
"loss": 1.5603,
"step": 2420
},
{
"epoch": 1.68,
"learning_rate": 2.2030386740331492e-05,
"loss": 1.5723,
"step": 2430
},
{
"epoch": 1.69,
"learning_rate": 2.1915285451197055e-05,
"loss": 1.5712,
"step": 2440
},
{
"epoch": 1.69,
"learning_rate": 2.180018416206262e-05,
"loss": 1.5656,
"step": 2450
},
{
"epoch": 1.7,
"learning_rate": 2.168508287292818e-05,
"loss": 1.6042,
"step": 2460
},
{
"epoch": 1.71,
"learning_rate": 2.156998158379374e-05,
"loss": 1.619,
"step": 2470
},
{
"epoch": 1.71,
"learning_rate": 2.1454880294659302e-05,
"loss": 1.6388,
"step": 2480
},
{
"epoch": 1.72,
"learning_rate": 2.133977900552486e-05,
"loss": 1.5726,
"step": 2490
},
{
"epoch": 1.73,
"learning_rate": 2.1224677716390425e-05,
"loss": 1.5748,
"step": 2500
}
],
"logging_steps": 10,
"max_steps": 4344,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.579588036193157e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}