llama3_inst_truthbench2_model / trainer_state.json
Ogamon's picture
Initial commit
cf0a3a7 verified
raw
history blame
41 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.903225806451613,
"eval_steps": 500,
"global_step": 190,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.025806451612903226,
"grad_norm": 611.8341064453125,
"learning_rate": 5.000000000000001e-07,
"loss": 13.7821,
"num_input_tokens_seen": 6416,
"step": 1
},
{
"epoch": 0.05161290322580645,
"grad_norm": 617.9733276367188,
"learning_rate": 1.0000000000000002e-06,
"loss": 13.6363,
"num_input_tokens_seen": 12992,
"step": 2
},
{
"epoch": 0.07741935483870968,
"grad_norm": 608.6278686523438,
"learning_rate": 1.5e-06,
"loss": 13.6033,
"num_input_tokens_seen": 19472,
"step": 3
},
{
"epoch": 0.1032258064516129,
"grad_norm": 618.8984985351562,
"learning_rate": 2.0000000000000003e-06,
"loss": 12.5696,
"num_input_tokens_seen": 25936,
"step": 4
},
{
"epoch": 0.12903225806451613,
"grad_norm": 481.14508056640625,
"learning_rate": 2.5e-06,
"loss": 9.3589,
"num_input_tokens_seen": 32272,
"step": 5
},
{
"epoch": 0.15483870967741936,
"grad_norm": 592.4844970703125,
"learning_rate": 3e-06,
"loss": 6.7715,
"num_input_tokens_seen": 38640,
"step": 6
},
{
"epoch": 0.18064516129032257,
"grad_norm": 468.1033935546875,
"learning_rate": 3.5e-06,
"loss": 5.3541,
"num_input_tokens_seen": 44992,
"step": 7
},
{
"epoch": 0.2064516129032258,
"grad_norm": 226.9576416015625,
"learning_rate": 4.000000000000001e-06,
"loss": 1.9295,
"num_input_tokens_seen": 51328,
"step": 8
},
{
"epoch": 0.23225806451612904,
"grad_norm": 106.9604721069336,
"learning_rate": 4.5e-06,
"loss": 0.6328,
"num_input_tokens_seen": 57408,
"step": 9
},
{
"epoch": 0.25806451612903225,
"grad_norm": 426.4488830566406,
"learning_rate": 5e-06,
"loss": 3.3225,
"num_input_tokens_seen": 63696,
"step": 10
},
{
"epoch": 0.2838709677419355,
"grad_norm": 28.350229263305664,
"learning_rate": 4.9996192378909785e-06,
"loss": 0.2598,
"num_input_tokens_seen": 69984,
"step": 11
},
{
"epoch": 0.3096774193548387,
"grad_norm": 198.60235595703125,
"learning_rate": 4.99847706754774e-06,
"loss": 0.6874,
"num_input_tokens_seen": 76384,
"step": 12
},
{
"epoch": 0.33548387096774196,
"grad_norm": 286.51763916015625,
"learning_rate": 4.9965738368864345e-06,
"loss": 2.0329,
"num_input_tokens_seen": 82704,
"step": 13
},
{
"epoch": 0.36129032258064514,
"grad_norm": 76.4217300415039,
"learning_rate": 4.993910125649561e-06,
"loss": 0.4942,
"num_input_tokens_seen": 88976,
"step": 14
},
{
"epoch": 0.3870967741935484,
"grad_norm": 150.88587951660156,
"learning_rate": 4.990486745229364e-06,
"loss": 1.1786,
"num_input_tokens_seen": 95472,
"step": 15
},
{
"epoch": 0.4129032258064516,
"grad_norm": 60.549949645996094,
"learning_rate": 4.986304738420684e-06,
"loss": 0.4424,
"num_input_tokens_seen": 101904,
"step": 16
},
{
"epoch": 0.43870967741935485,
"grad_norm": 56.29042053222656,
"learning_rate": 4.981365379103306e-06,
"loss": 0.3336,
"num_input_tokens_seen": 108272,
"step": 17
},
{
"epoch": 0.4645161290322581,
"grad_norm": 27.45728874206543,
"learning_rate": 4.975670171853926e-06,
"loss": 0.2568,
"num_input_tokens_seen": 114464,
"step": 18
},
{
"epoch": 0.49032258064516127,
"grad_norm": 20.99466323852539,
"learning_rate": 4.9692208514878445e-06,
"loss": 0.1889,
"num_input_tokens_seen": 120816,
"step": 19
},
{
"epoch": 0.5161290322580645,
"grad_norm": 13.661433219909668,
"learning_rate": 4.962019382530521e-06,
"loss": 0.1974,
"num_input_tokens_seen": 127120,
"step": 20
},
{
"epoch": 0.5419354838709678,
"grad_norm": 16.250490188598633,
"learning_rate": 4.9540679586191605e-06,
"loss": 0.1766,
"num_input_tokens_seen": 133712,
"step": 21
},
{
"epoch": 0.567741935483871,
"grad_norm": 10.539461135864258,
"learning_rate": 4.9453690018345144e-06,
"loss": 0.1694,
"num_input_tokens_seen": 139904,
"step": 22
},
{
"epoch": 0.5935483870967742,
"grad_norm": 11.246196746826172,
"learning_rate": 4.935925161963089e-06,
"loss": 0.1374,
"num_input_tokens_seen": 146256,
"step": 23
},
{
"epoch": 0.6193548387096774,
"grad_norm": 16.509199142456055,
"learning_rate": 4.925739315689991e-06,
"loss": 0.1496,
"num_input_tokens_seen": 152784,
"step": 24
},
{
"epoch": 0.6451612903225806,
"grad_norm": 7.252533435821533,
"learning_rate": 4.914814565722671e-06,
"loss": 0.1554,
"num_input_tokens_seen": 158976,
"step": 25
},
{
"epoch": 0.6709677419354839,
"grad_norm": 11.432785034179688,
"learning_rate": 4.903154239845798e-06,
"loss": 0.0918,
"num_input_tokens_seen": 165280,
"step": 26
},
{
"epoch": 0.6967741935483871,
"grad_norm": 8.87109375,
"learning_rate": 4.890761889907589e-06,
"loss": 0.1062,
"num_input_tokens_seen": 171808,
"step": 27
},
{
"epoch": 0.7225806451612903,
"grad_norm": 26.34603500366211,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.1975,
"num_input_tokens_seen": 178112,
"step": 28
},
{
"epoch": 0.7483870967741936,
"grad_norm": 20.092260360717773,
"learning_rate": 4.863796438998293e-06,
"loss": 0.1389,
"num_input_tokens_seen": 184448,
"step": 29
},
{
"epoch": 0.7741935483870968,
"grad_norm": 16.30493927001953,
"learning_rate": 4.849231551964771e-06,
"loss": 0.1382,
"num_input_tokens_seen": 190896,
"step": 30
},
{
"epoch": 0.8,
"grad_norm": 20.723533630371094,
"learning_rate": 4.833951066243004e-06,
"loss": 0.1982,
"num_input_tokens_seen": 197024,
"step": 31
},
{
"epoch": 0.8258064516129032,
"grad_norm": 7.432217597961426,
"learning_rate": 4.817959636416969e-06,
"loss": 0.1072,
"num_input_tokens_seen": 203248,
"step": 32
},
{
"epoch": 0.8516129032258064,
"grad_norm": 8.914057731628418,
"learning_rate": 4.801262133631101e-06,
"loss": 0.0757,
"num_input_tokens_seen": 209760,
"step": 33
},
{
"epoch": 0.8774193548387097,
"grad_norm": 12.913470268249512,
"learning_rate": 4.783863644106502e-06,
"loss": 0.0829,
"num_input_tokens_seen": 215968,
"step": 34
},
{
"epoch": 0.9032258064516129,
"grad_norm": 8.646073341369629,
"learning_rate": 4.765769467591626e-06,
"loss": 0.1017,
"num_input_tokens_seen": 222192,
"step": 35
},
{
"epoch": 0.9290322580645162,
"grad_norm": 11.357608795166016,
"learning_rate": 4.746985115747918e-06,
"loss": 0.0957,
"num_input_tokens_seen": 228512,
"step": 36
},
{
"epoch": 0.9548387096774194,
"grad_norm": 6.806307315826416,
"learning_rate": 4.72751631047092e-06,
"loss": 0.0999,
"num_input_tokens_seen": 235120,
"step": 37
},
{
"epoch": 0.9806451612903225,
"grad_norm": 7.694289684295654,
"learning_rate": 4.707368982147318e-06,
"loss": 0.0581,
"num_input_tokens_seen": 241584,
"step": 38
},
{
"epoch": 1.0064516129032257,
"grad_norm": 6.573248386383057,
"learning_rate": 4.68654926784849e-06,
"loss": 0.0923,
"num_input_tokens_seen": 248080,
"step": 39
},
{
"epoch": 1.032258064516129,
"grad_norm": 6.997208118438721,
"learning_rate": 4.665063509461098e-06,
"loss": 0.0506,
"num_input_tokens_seen": 254416,
"step": 40
},
{
"epoch": 1.0580645161290323,
"grad_norm": 8.015267372131348,
"learning_rate": 4.642918251755281e-06,
"loss": 0.0333,
"num_input_tokens_seen": 260640,
"step": 41
},
{
"epoch": 1.0838709677419356,
"grad_norm": 6.756544589996338,
"learning_rate": 4.620120240391065e-06,
"loss": 0.038,
"num_input_tokens_seen": 267072,
"step": 42
},
{
"epoch": 1.1096774193548387,
"grad_norm": 4.104006290435791,
"learning_rate": 4.596676419863561e-06,
"loss": 0.0416,
"num_input_tokens_seen": 273392,
"step": 43
},
{
"epoch": 1.135483870967742,
"grad_norm": 16.92180633544922,
"learning_rate": 4.572593931387604e-06,
"loss": 0.1068,
"num_input_tokens_seen": 279680,
"step": 44
},
{
"epoch": 1.1612903225806452,
"grad_norm": 6.803272247314453,
"learning_rate": 4.54788011072248e-06,
"loss": 0.0369,
"num_input_tokens_seen": 285968,
"step": 45
},
{
"epoch": 1.1870967741935483,
"grad_norm": 10.87576675415039,
"learning_rate": 4.522542485937369e-06,
"loss": 0.1703,
"num_input_tokens_seen": 292304,
"step": 46
},
{
"epoch": 1.2129032258064516,
"grad_norm": 10.078003883361816,
"learning_rate": 4.496588775118232e-06,
"loss": 0.1102,
"num_input_tokens_seen": 298608,
"step": 47
},
{
"epoch": 1.238709677419355,
"grad_norm": 14.269394874572754,
"learning_rate": 4.470026884016805e-06,
"loss": 0.0595,
"num_input_tokens_seen": 304912,
"step": 48
},
{
"epoch": 1.2645161290322582,
"grad_norm": 12.623174667358398,
"learning_rate": 4.442864903642428e-06,
"loss": 0.1009,
"num_input_tokens_seen": 311328,
"step": 49
},
{
"epoch": 1.2903225806451613,
"grad_norm": 6.7360053062438965,
"learning_rate": 4.415111107797445e-06,
"loss": 0.0434,
"num_input_tokens_seen": 317664,
"step": 50
},
{
"epoch": 1.3161290322580645,
"grad_norm": 1.8871471881866455,
"learning_rate": 4.386773950556931e-06,
"loss": 0.0281,
"num_input_tokens_seen": 324128,
"step": 51
},
{
"epoch": 1.3419354838709676,
"grad_norm": 8.304376602172852,
"learning_rate": 4.357862063693486e-06,
"loss": 0.0513,
"num_input_tokens_seen": 330304,
"step": 52
},
{
"epoch": 1.367741935483871,
"grad_norm": 9.218457221984863,
"learning_rate": 4.328384254047927e-06,
"loss": 0.0902,
"num_input_tokens_seen": 336608,
"step": 53
},
{
"epoch": 1.3935483870967742,
"grad_norm": 6.52198600769043,
"learning_rate": 4.2983495008466285e-06,
"loss": 0.0448,
"num_input_tokens_seen": 342944,
"step": 54
},
{
"epoch": 1.4193548387096775,
"grad_norm": 5.9580397605896,
"learning_rate": 4.267766952966369e-06,
"loss": 0.036,
"num_input_tokens_seen": 349504,
"step": 55
},
{
"epoch": 1.4451612903225808,
"grad_norm": 7.217082977294922,
"learning_rate": 4.236645926147493e-06,
"loss": 0.0279,
"num_input_tokens_seen": 355808,
"step": 56
},
{
"epoch": 1.4709677419354839,
"grad_norm": 8.09554386138916,
"learning_rate": 4.204995900156247e-06,
"loss": 0.0527,
"num_input_tokens_seen": 362144,
"step": 57
},
{
"epoch": 1.4967741935483871,
"grad_norm": 9.561200141906738,
"learning_rate": 4.172826515897146e-06,
"loss": 0.0466,
"num_input_tokens_seen": 368800,
"step": 58
},
{
"epoch": 1.5225806451612902,
"grad_norm": 4.221444606781006,
"learning_rate": 4.140147572476269e-06,
"loss": 0.0203,
"num_input_tokens_seen": 375264,
"step": 59
},
{
"epoch": 1.5483870967741935,
"grad_norm": 8.664204597473145,
"learning_rate": 4.106969024216348e-06,
"loss": 0.0693,
"num_input_tokens_seen": 381408,
"step": 60
},
{
"epoch": 1.5741935483870968,
"grad_norm": 3.899017333984375,
"learning_rate": 4.073300977624594e-06,
"loss": 0.0193,
"num_input_tokens_seen": 387552,
"step": 61
},
{
"epoch": 1.6,
"grad_norm": 8.732043266296387,
"learning_rate": 4.039153688314146e-06,
"loss": 0.1155,
"num_input_tokens_seen": 394128,
"step": 62
},
{
"epoch": 1.6258064516129034,
"grad_norm": 5.531858921051025,
"learning_rate": 4.0045375578801216e-06,
"loss": 0.0594,
"num_input_tokens_seen": 400512,
"step": 63
},
{
"epoch": 1.6516129032258065,
"grad_norm": 8.751105308532715,
"learning_rate": 3.969463130731183e-06,
"loss": 0.0391,
"num_input_tokens_seen": 406752,
"step": 64
},
{
"epoch": 1.6774193548387095,
"grad_norm": 5.837942600250244,
"learning_rate": 3.933941090877615e-06,
"loss": 0.0552,
"num_input_tokens_seen": 413040,
"step": 65
},
{
"epoch": 1.7032258064516128,
"grad_norm": 3.627204418182373,
"learning_rate": 3.897982258676867e-06,
"loss": 0.03,
"num_input_tokens_seen": 419408,
"step": 66
},
{
"epoch": 1.729032258064516,
"grad_norm": 7.315670490264893,
"learning_rate": 3.861597587537568e-06,
"loss": 0.0458,
"num_input_tokens_seen": 425920,
"step": 67
},
{
"epoch": 1.7548387096774194,
"grad_norm": 5.958889484405518,
"learning_rate": 3.824798160583012e-06,
"loss": 0.0502,
"num_input_tokens_seen": 432400,
"step": 68
},
{
"epoch": 1.7806451612903227,
"grad_norm": 6.365023136138916,
"learning_rate": 3.787595187275136e-06,
"loss": 0.0513,
"num_input_tokens_seen": 438688,
"step": 69
},
{
"epoch": 1.8064516129032258,
"grad_norm": 3.058981418609619,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0309,
"num_input_tokens_seen": 445280,
"step": 70
},
{
"epoch": 1.832258064516129,
"grad_norm": 4.8149003982543945,
"learning_rate": 3.7120240506158433e-06,
"loss": 0.0889,
"num_input_tokens_seen": 451616,
"step": 71
},
{
"epoch": 1.8580645161290321,
"grad_norm": 7.181830406188965,
"learning_rate": 3.6736789069647273e-06,
"loss": 0.0868,
"num_input_tokens_seen": 457856,
"step": 72
},
{
"epoch": 1.8838709677419354,
"grad_norm": 7.750051975250244,
"learning_rate": 3.634976249348867e-06,
"loss": 0.0516,
"num_input_tokens_seen": 464144,
"step": 73
},
{
"epoch": 1.9096774193548387,
"grad_norm": 5.294251918792725,
"learning_rate": 3.595927866972694e-06,
"loss": 0.059,
"num_input_tokens_seen": 470736,
"step": 74
},
{
"epoch": 1.935483870967742,
"grad_norm": 4.239963531494141,
"learning_rate": 3.556545654351749e-06,
"loss": 0.0475,
"num_input_tokens_seen": 477168,
"step": 75
},
{
"epoch": 1.9612903225806453,
"grad_norm": 5.429728031158447,
"learning_rate": 3.516841607689501e-06,
"loss": 0.0704,
"num_input_tokens_seen": 483536,
"step": 76
},
{
"epoch": 1.9870967741935484,
"grad_norm": 11.018205642700195,
"learning_rate": 3.476827821223184e-06,
"loss": 0.0666,
"num_input_tokens_seen": 489760,
"step": 77
},
{
"epoch": 2.0129032258064514,
"grad_norm": 8.510910987854004,
"learning_rate": 3.436516483539781e-06,
"loss": 0.0275,
"num_input_tokens_seen": 496000,
"step": 78
},
{
"epoch": 2.0387096774193547,
"grad_norm": 4.036402225494385,
"learning_rate": 3.39591987386325e-06,
"loss": 0.0169,
"num_input_tokens_seen": 502384,
"step": 79
},
{
"epoch": 2.064516129032258,
"grad_norm": 1.8402727842330933,
"learning_rate": 3.3550503583141726e-06,
"loss": 0.0056,
"num_input_tokens_seen": 508992,
"step": 80
},
{
"epoch": 2.0903225806451613,
"grad_norm": 3.9755101203918457,
"learning_rate": 3.313920386142892e-06,
"loss": 0.0139,
"num_input_tokens_seen": 515216,
"step": 81
},
{
"epoch": 2.1161290322580646,
"grad_norm": 6.4678168296813965,
"learning_rate": 3.272542485937369e-06,
"loss": 0.0561,
"num_input_tokens_seen": 521792,
"step": 82
},
{
"epoch": 2.141935483870968,
"grad_norm": 4.438370704650879,
"learning_rate": 3.230929261806842e-06,
"loss": 0.0098,
"num_input_tokens_seen": 528176,
"step": 83
},
{
"epoch": 2.167741935483871,
"grad_norm": 1.3302000761032104,
"learning_rate": 3.189093389542498e-06,
"loss": 0.0037,
"num_input_tokens_seen": 534496,
"step": 84
},
{
"epoch": 2.193548387096774,
"grad_norm": 4.607197284698486,
"learning_rate": 3.147047612756302e-06,
"loss": 0.0194,
"num_input_tokens_seen": 541024,
"step": 85
},
{
"epoch": 2.2193548387096773,
"grad_norm": 0.3103199899196625,
"learning_rate": 3.1048047389991693e-06,
"loss": 0.0004,
"num_input_tokens_seen": 547328,
"step": 86
},
{
"epoch": 2.2451612903225806,
"grad_norm": 0.17417627573013306,
"learning_rate": 3.062377635859663e-06,
"loss": 0.0003,
"num_input_tokens_seen": 553760,
"step": 87
},
{
"epoch": 2.270967741935484,
"grad_norm": 21.153165817260742,
"learning_rate": 3.019779227044398e-06,
"loss": 0.0511,
"num_input_tokens_seen": 560048,
"step": 88
},
{
"epoch": 2.296774193548387,
"grad_norm": 1.729843020439148,
"learning_rate": 2.9770224884413625e-06,
"loss": 0.0974,
"num_input_tokens_seen": 566624,
"step": 89
},
{
"epoch": 2.3225806451612905,
"grad_norm": 14.893946647644043,
"learning_rate": 2.9341204441673267e-06,
"loss": 0.0442,
"num_input_tokens_seen": 572864,
"step": 90
},
{
"epoch": 2.3483870967741938,
"grad_norm": 6.81472110748291,
"learning_rate": 2.8910861626005774e-06,
"loss": 0.0802,
"num_input_tokens_seen": 579024,
"step": 91
},
{
"epoch": 2.3741935483870966,
"grad_norm": 8.73901653289795,
"learning_rate": 2.847932752400164e-06,
"loss": 0.0195,
"num_input_tokens_seen": 585536,
"step": 92
},
{
"epoch": 2.4,
"grad_norm": 2.9088733196258545,
"learning_rate": 2.804673358512869e-06,
"loss": 0.055,
"num_input_tokens_seen": 591792,
"step": 93
},
{
"epoch": 2.425806451612903,
"grad_norm": 4.13192081451416,
"learning_rate": 2.761321158169134e-06,
"loss": 0.0268,
"num_input_tokens_seen": 598144,
"step": 94
},
{
"epoch": 2.4516129032258065,
"grad_norm": 3.6112844944000244,
"learning_rate": 2.717889356869146e-06,
"loss": 0.0196,
"num_input_tokens_seen": 604496,
"step": 95
},
{
"epoch": 2.47741935483871,
"grad_norm": 6.083769798278809,
"learning_rate": 2.6743911843603134e-06,
"loss": 0.0363,
"num_input_tokens_seen": 610784,
"step": 96
},
{
"epoch": 2.5032258064516126,
"grad_norm": 1.1240483522415161,
"learning_rate": 2.6308398906073603e-06,
"loss": 0.0046,
"num_input_tokens_seen": 617024,
"step": 97
},
{
"epoch": 2.5290322580645164,
"grad_norm": 7.753751277923584,
"learning_rate": 2.587248741756253e-06,
"loss": 0.0366,
"num_input_tokens_seen": 623312,
"step": 98
},
{
"epoch": 2.554838709677419,
"grad_norm": 1.1425215005874634,
"learning_rate": 2.543631016093209e-06,
"loss": 0.0051,
"num_input_tokens_seen": 629616,
"step": 99
},
{
"epoch": 2.5806451612903225,
"grad_norm": 2.6504504680633545,
"learning_rate": 2.5e-06,
"loss": 0.0226,
"num_input_tokens_seen": 636144,
"step": 100
},
{
"epoch": 2.606451612903226,
"grad_norm": 5.269250869750977,
"learning_rate": 2.4563689839067913e-06,
"loss": 0.0818,
"num_input_tokens_seen": 642496,
"step": 101
},
{
"epoch": 2.632258064516129,
"grad_norm": 6.670433044433594,
"learning_rate": 2.4127512582437486e-06,
"loss": 0.0247,
"num_input_tokens_seen": 648912,
"step": 102
},
{
"epoch": 2.6580645161290324,
"grad_norm": 2.8113598823547363,
"learning_rate": 2.3691601093926406e-06,
"loss": 0.0593,
"num_input_tokens_seen": 655088,
"step": 103
},
{
"epoch": 2.6838709677419352,
"grad_norm": 4.226614475250244,
"learning_rate": 2.325608815639687e-06,
"loss": 0.0073,
"num_input_tokens_seen": 661680,
"step": 104
},
{
"epoch": 2.709677419354839,
"grad_norm": 2.636570453643799,
"learning_rate": 2.2821106431308546e-06,
"loss": 0.0295,
"num_input_tokens_seen": 668016,
"step": 105
},
{
"epoch": 2.735483870967742,
"grad_norm": 7.529627323150635,
"learning_rate": 2.238678841830867e-06,
"loss": 0.0115,
"num_input_tokens_seen": 674176,
"step": 106
},
{
"epoch": 2.761290322580645,
"grad_norm": 2.3682408332824707,
"learning_rate": 2.195326641487132e-06,
"loss": 0.0064,
"num_input_tokens_seen": 680464,
"step": 107
},
{
"epoch": 2.7870967741935484,
"grad_norm": 4.345768451690674,
"learning_rate": 2.1520672475998374e-06,
"loss": 0.0229,
"num_input_tokens_seen": 686688,
"step": 108
},
{
"epoch": 2.8129032258064517,
"grad_norm": 8.903531074523926,
"learning_rate": 2.1089138373994226e-06,
"loss": 0.0605,
"num_input_tokens_seen": 692992,
"step": 109
},
{
"epoch": 2.838709677419355,
"grad_norm": 9.090164184570312,
"learning_rate": 2.0658795558326745e-06,
"loss": 0.05,
"num_input_tokens_seen": 699392,
"step": 110
},
{
"epoch": 2.864516129032258,
"grad_norm": 5.729383945465088,
"learning_rate": 2.022977511558638e-06,
"loss": 0.0544,
"num_input_tokens_seen": 705680,
"step": 111
},
{
"epoch": 2.8903225806451616,
"grad_norm": 2.055316925048828,
"learning_rate": 1.9802207729556023e-06,
"loss": 0.0109,
"num_input_tokens_seen": 711952,
"step": 112
},
{
"epoch": 2.9161290322580644,
"grad_norm": 4.638775825500488,
"learning_rate": 1.937622364140338e-06,
"loss": 0.0242,
"num_input_tokens_seen": 718192,
"step": 113
},
{
"epoch": 2.9419354838709677,
"grad_norm": 6.185389995574951,
"learning_rate": 1.895195261000831e-06,
"loss": 0.0223,
"num_input_tokens_seen": 724832,
"step": 114
},
{
"epoch": 2.967741935483871,
"grad_norm": 3.262885332107544,
"learning_rate": 1.852952387243698e-06,
"loss": 0.0263,
"num_input_tokens_seen": 731312,
"step": 115
},
{
"epoch": 2.9935483870967743,
"grad_norm": 0.28118520975112915,
"learning_rate": 1.8109066104575023e-06,
"loss": 0.0014,
"num_input_tokens_seen": 737488,
"step": 116
},
{
"epoch": 3.0193548387096776,
"grad_norm": 0.9135170578956604,
"learning_rate": 1.7690707381931585e-06,
"loss": 0.0061,
"num_input_tokens_seen": 743760,
"step": 117
},
{
"epoch": 3.0451612903225804,
"grad_norm": 2.3123724460601807,
"learning_rate": 1.7274575140626318e-06,
"loss": 0.0296,
"num_input_tokens_seen": 750048,
"step": 118
},
{
"epoch": 3.0709677419354837,
"grad_norm": 0.6837524771690369,
"learning_rate": 1.686079613857109e-06,
"loss": 0.0186,
"num_input_tokens_seen": 756400,
"step": 119
},
{
"epoch": 3.096774193548387,
"grad_norm": 1.3550941944122314,
"learning_rate": 1.6449496416858285e-06,
"loss": 0.0038,
"num_input_tokens_seen": 762432,
"step": 120
},
{
"epoch": 3.1225806451612903,
"grad_norm": 1.2603812217712402,
"learning_rate": 1.6040801261367494e-06,
"loss": 0.0033,
"num_input_tokens_seen": 768688,
"step": 121
},
{
"epoch": 3.1483870967741936,
"grad_norm": 2.1692001819610596,
"learning_rate": 1.56348351646022e-06,
"loss": 0.0091,
"num_input_tokens_seen": 775024,
"step": 122
},
{
"epoch": 3.174193548387097,
"grad_norm": 0.3110519349575043,
"learning_rate": 1.5231721787768162e-06,
"loss": 0.0012,
"num_input_tokens_seen": 781360,
"step": 123
},
{
"epoch": 3.2,
"grad_norm": 1.8163748979568481,
"learning_rate": 1.4831583923105e-06,
"loss": 0.0223,
"num_input_tokens_seen": 787888,
"step": 124
},
{
"epoch": 3.225806451612903,
"grad_norm": 3.2385294437408447,
"learning_rate": 1.443454345648252e-06,
"loss": 0.0131,
"num_input_tokens_seen": 794112,
"step": 125
},
{
"epoch": 3.2516129032258063,
"grad_norm": 0.23872306942939758,
"learning_rate": 1.4040721330273063e-06,
"loss": 0.0008,
"num_input_tokens_seen": 800384,
"step": 126
},
{
"epoch": 3.2774193548387096,
"grad_norm": 2.089226722717285,
"learning_rate": 1.3650237506511333e-06,
"loss": 0.0058,
"num_input_tokens_seen": 806848,
"step": 127
},
{
"epoch": 3.303225806451613,
"grad_norm": 4.151247501373291,
"learning_rate": 1.3263210930352737e-06,
"loss": 0.0065,
"num_input_tokens_seen": 813136,
"step": 128
},
{
"epoch": 3.329032258064516,
"grad_norm": 3.59883189201355,
"learning_rate": 1.2879759493841577e-06,
"loss": 0.0398,
"num_input_tokens_seen": 819504,
"step": 129
},
{
"epoch": 3.3548387096774195,
"grad_norm": 0.13131457567214966,
"learning_rate": 1.2500000000000007e-06,
"loss": 0.0005,
"num_input_tokens_seen": 825936,
"step": 130
},
{
"epoch": 3.3806451612903228,
"grad_norm": 0.9149203896522522,
"learning_rate": 1.2124048127248644e-06,
"loss": 0.0049,
"num_input_tokens_seen": 832496,
"step": 131
},
{
"epoch": 3.4064516129032256,
"grad_norm": 1.6843948364257812,
"learning_rate": 1.1752018394169882e-06,
"loss": 0.0061,
"num_input_tokens_seen": 838864,
"step": 132
},
{
"epoch": 3.432258064516129,
"grad_norm": 1.2536215782165527,
"learning_rate": 1.1384024124624324e-06,
"loss": 0.0111,
"num_input_tokens_seen": 845504,
"step": 133
},
{
"epoch": 3.458064516129032,
"grad_norm": 1.0523045063018799,
"learning_rate": 1.1020177413231334e-06,
"loss": 0.0049,
"num_input_tokens_seen": 851888,
"step": 134
},
{
"epoch": 3.4838709677419355,
"grad_norm": 0.698330819606781,
"learning_rate": 1.0660589091223854e-06,
"loss": 0.0012,
"num_input_tokens_seen": 858144,
"step": 135
},
{
"epoch": 3.509677419354839,
"grad_norm": 0.35953453183174133,
"learning_rate": 1.0305368692688175e-06,
"loss": 0.0004,
"num_input_tokens_seen": 864496,
"step": 136
},
{
"epoch": 3.535483870967742,
"grad_norm": 0.17629964649677277,
"learning_rate": 9.95462442119879e-07,
"loss": 0.0006,
"num_input_tokens_seen": 870672,
"step": 137
},
{
"epoch": 3.5612903225806454,
"grad_norm": 0.060391154140233994,
"learning_rate": 9.608463116858544e-07,
"loss": 0.0003,
"num_input_tokens_seen": 876944,
"step": 138
},
{
"epoch": 3.587096774193548,
"grad_norm": 0.08453088998794556,
"learning_rate": 9.266990223754069e-07,
"loss": 0.0004,
"num_input_tokens_seen": 883488,
"step": 139
},
{
"epoch": 3.6129032258064515,
"grad_norm": 0.6224934458732605,
"learning_rate": 8.930309757836517e-07,
"loss": 0.0016,
"num_input_tokens_seen": 889824,
"step": 140
},
{
"epoch": 3.638709677419355,
"grad_norm": 2.8432862758636475,
"learning_rate": 8.598524275237321e-07,
"loss": 0.0268,
"num_input_tokens_seen": 896176,
"step": 141
},
{
"epoch": 3.664516129032258,
"grad_norm": 0.6191421747207642,
"learning_rate": 8.271734841028553e-07,
"loss": 0.0018,
"num_input_tokens_seen": 902272,
"step": 142
},
{
"epoch": 3.6903225806451614,
"grad_norm": 1.3128641843795776,
"learning_rate": 7.950040998437541e-07,
"loss": 0.01,
"num_input_tokens_seen": 908512,
"step": 143
},
{
"epoch": 3.7161290322580647,
"grad_norm": 1.9438813924789429,
"learning_rate": 7.633540738525066e-07,
"loss": 0.0209,
"num_input_tokens_seen": 915152,
"step": 144
},
{
"epoch": 3.741935483870968,
"grad_norm": 1.1845624446868896,
"learning_rate": 7.322330470336314e-07,
"loss": 0.0076,
"num_input_tokens_seen": 921552,
"step": 145
},
{
"epoch": 3.767741935483871,
"grad_norm": 4.526978492736816,
"learning_rate": 7.016504991533727e-07,
"loss": 0.0227,
"num_input_tokens_seen": 928048,
"step": 146
},
{
"epoch": 3.793548387096774,
"grad_norm": 0.03266080841422081,
"learning_rate": 6.716157459520739e-07,
"loss": 0.0002,
"num_input_tokens_seen": 934512,
"step": 147
},
{
"epoch": 3.8193548387096774,
"grad_norm": 5.138983249664307,
"learning_rate": 6.421379363065142e-07,
"loss": 0.0296,
"num_input_tokens_seen": 940816,
"step": 148
},
{
"epoch": 3.8451612903225807,
"grad_norm": 0.3178328275680542,
"learning_rate": 6.1322604944307e-07,
"loss": 0.0006,
"num_input_tokens_seen": 946992,
"step": 149
},
{
"epoch": 3.870967741935484,
"grad_norm": 1.1145384311676025,
"learning_rate": 5.848888922025553e-07,
"loss": 0.0012,
"num_input_tokens_seen": 953424,
"step": 150
},
{
"epoch": 3.896774193548387,
"grad_norm": 0.23422469198703766,
"learning_rate": 5.571350963575728e-07,
"loss": 0.0007,
"num_input_tokens_seen": 959616,
"step": 151
},
{
"epoch": 3.9225806451612906,
"grad_norm": 0.09516251087188721,
"learning_rate": 5.299731159831953e-07,
"loss": 0.0003,
"num_input_tokens_seen": 966096,
"step": 152
},
{
"epoch": 3.9483870967741934,
"grad_norm": 0.3407827317714691,
"learning_rate": 5.034112248817685e-07,
"loss": 0.0005,
"num_input_tokens_seen": 972368,
"step": 153
},
{
"epoch": 3.9741935483870967,
"grad_norm": 0.2334306687116623,
"learning_rate": 4.774575140626317e-07,
"loss": 0.0008,
"num_input_tokens_seen": 978848,
"step": 154
},
{
"epoch": 4.0,
"grad_norm": 0.10143516212701797,
"learning_rate": 4.5211988927752026e-07,
"loss": 0.0003,
"num_input_tokens_seen": 985520,
"step": 155
},
{
"epoch": 4.025806451612903,
"grad_norm": 0.25746700167655945,
"learning_rate": 4.27406068612396e-07,
"loss": 0.0015,
"num_input_tokens_seen": 991904,
"step": 156
},
{
"epoch": 4.051612903225807,
"grad_norm": 0.22239096462726593,
"learning_rate": 4.033235801364402e-07,
"loss": 0.0007,
"num_input_tokens_seen": 998288,
"step": 157
},
{
"epoch": 4.077419354838709,
"grad_norm": 0.05284583568572998,
"learning_rate": 3.798797596089351e-07,
"loss": 0.0002,
"num_input_tokens_seen": 1004432,
"step": 158
},
{
"epoch": 4.103225806451613,
"grad_norm": 1.299528956413269,
"learning_rate": 3.5708174824471947e-07,
"loss": 0.0052,
"num_input_tokens_seen": 1010608,
"step": 159
},
{
"epoch": 4.129032258064516,
"grad_norm": 1.0830060243606567,
"learning_rate": 3.3493649053890325e-07,
"loss": 0.004,
"num_input_tokens_seen": 1016848,
"step": 160
},
{
"epoch": 4.15483870967742,
"grad_norm": 0.16024000942707062,
"learning_rate": 3.134507321515107e-07,
"loss": 0.0004,
"num_input_tokens_seen": 1023360,
"step": 161
},
{
"epoch": 4.180645161290323,
"grad_norm": 0.6330615878105164,
"learning_rate": 2.9263101785268253e-07,
"loss": 0.002,
"num_input_tokens_seen": 1029808,
"step": 162
},
{
"epoch": 4.2064516129032254,
"grad_norm": 0.03194880485534668,
"learning_rate": 2.7248368952908055e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1036080,
"step": 163
},
{
"epoch": 4.232258064516129,
"grad_norm": 0.02812141925096512,
"learning_rate": 2.53014884252083e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1042240,
"step": 164
},
{
"epoch": 4.258064516129032,
"grad_norm": 0.029427967965602875,
"learning_rate": 2.3423053240837518e-07,
"loss": 0.0002,
"num_input_tokens_seen": 1048672,
"step": 165
},
{
"epoch": 4.283870967741936,
"grad_norm": 2.0345771312713623,
"learning_rate": 2.1613635589349756e-07,
"loss": 0.0076,
"num_input_tokens_seen": 1054832,
"step": 166
},
{
"epoch": 4.309677419354839,
"grad_norm": 0.02240203320980072,
"learning_rate": 1.9873786636889908e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1061312,
"step": 167
},
{
"epoch": 4.335483870967742,
"grad_norm": 0.08536162972450256,
"learning_rate": 1.8204036358303173e-07,
"loss": 0.0002,
"num_input_tokens_seen": 1067488,
"step": 168
},
{
"epoch": 4.361290322580645,
"grad_norm": 0.0193481482565403,
"learning_rate": 1.6604893375699594e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1073648,
"step": 169
},
{
"epoch": 4.387096774193548,
"grad_norm": 0.021560601890087128,
"learning_rate": 1.507684480352292e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1080160,
"step": 170
},
{
"epoch": 4.412903225806452,
"grad_norm": 0.03216614946722984,
"learning_rate": 1.362035610017079e-07,
"loss": 0.0002,
"num_input_tokens_seen": 1086832,
"step": 171
},
{
"epoch": 4.438709677419355,
"grad_norm": 0.04035123065114021,
"learning_rate": 1.223587092621162e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1093184,
"step": 172
},
{
"epoch": 4.464516129032258,
"grad_norm": 0.17049850523471832,
"learning_rate": 1.0923811009241142e-07,
"loss": 0.0005,
"num_input_tokens_seen": 1099728,
"step": 173
},
{
"epoch": 4.490322580645161,
"grad_norm": 0.02470760978758335,
"learning_rate": 9.684576015420277e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1106032,
"step": 174
},
{
"epoch": 4.516129032258064,
"grad_norm": 0.009991397149860859,
"learning_rate": 8.518543427732951e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1112496,
"step": 175
},
{
"epoch": 4.541935483870968,
"grad_norm": 1.3486413955688477,
"learning_rate": 7.426068431000883e-08,
"loss": 0.0081,
"num_input_tokens_seen": 1119152,
"step": 176
},
{
"epoch": 4.567741935483871,
"grad_norm": 0.04406896233558655,
"learning_rate": 6.407483803691216e-08,
"loss": 0.0002,
"num_input_tokens_seen": 1125360,
"step": 177
},
{
"epoch": 4.593548387096774,
"grad_norm": 0.07763337343931198,
"learning_rate": 5.463099816548578e-08,
"loss": 0.0003,
"num_input_tokens_seen": 1131824,
"step": 178
},
{
"epoch": 4.619354838709677,
"grad_norm": 0.028237691149115562,
"learning_rate": 4.593204138084006e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1138224,
"step": 179
},
{
"epoch": 4.645161290322581,
"grad_norm": 0.12385120242834091,
"learning_rate": 3.798061746947995e-08,
"loss": 0.0005,
"num_input_tokens_seen": 1144528,
"step": 180
},
{
"epoch": 4.670967741935484,
"grad_norm": 0.033041104674339294,
"learning_rate": 3.077914851215585e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1150880,
"step": 181
},
{
"epoch": 4.6967741935483875,
"grad_norm": 0.025356203317642212,
"learning_rate": 2.4329828146074096e-08,
"loss": 0.0002,
"num_input_tokens_seen": 1157184,
"step": 182
},
{
"epoch": 4.72258064516129,
"grad_norm": 0.021917220205068588,
"learning_rate": 1.8634620896695044e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1163536,
"step": 183
},
{
"epoch": 4.748387096774193,
"grad_norm": 0.05064333602786064,
"learning_rate": 1.3695261579316776e-08,
"loss": 0.0002,
"num_input_tokens_seen": 1169888,
"step": 184
},
{
"epoch": 4.774193548387097,
"grad_norm": 0.07646457105875015,
"learning_rate": 9.513254770636138e-09,
"loss": 0.0002,
"num_input_tokens_seen": 1176400,
"step": 185
},
{
"epoch": 4.8,
"grad_norm": 0.02895214594900608,
"learning_rate": 6.089874350439507e-09,
"loss": 0.0002,
"num_input_tokens_seen": 1182608,
"step": 186
},
{
"epoch": 4.825806451612904,
"grad_norm": 0.015325246378779411,
"learning_rate": 3.4261631135654174e-09,
"loss": 0.0001,
"num_input_tokens_seen": 1189008,
"step": 187
},
{
"epoch": 4.851612903225806,
"grad_norm": 0.159898042678833,
"learning_rate": 1.5229324522605949e-09,
"loss": 0.0004,
"num_input_tokens_seen": 1195280,
"step": 188
},
{
"epoch": 4.877419354838709,
"grad_norm": 0.4591263234615326,
"learning_rate": 3.8076210902182607e-10,
"loss": 0.0013,
"num_input_tokens_seen": 1201456,
"step": 189
},
{
"epoch": 4.903225806451613,
"grad_norm": 0.39459678530693054,
"learning_rate": 0.0,
"loss": 0.0008,
"num_input_tokens_seen": 1207760,
"step": 190
},
{
"epoch": 4.903225806451613,
"num_input_tokens_seen": 1207760,
"step": 190,
"total_flos": 5.438488809413018e+16,
"train_loss": 0.49016160156086125,
"train_runtime": 2575.226,
"train_samples_per_second": 9.626,
"train_steps_per_second": 0.074
}
],
"logging_steps": 1,
"max_steps": 190,
"num_input_tokens_seen": 1207760,
"num_train_epochs": 5,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.438488809413018e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}