lillian039's picture
Model save
8707c51 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 152,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013157894736842105,
"grad_norm": 13.235902064816667,
"learning_rate": 6.25e-07,
"loss": 0.3431,
"step": 1
},
{
"epoch": 0.02631578947368421,
"grad_norm": 13.138228566029483,
"learning_rate": 1.25e-06,
"loss": 0.3429,
"step": 2
},
{
"epoch": 0.039473684210526314,
"grad_norm": 11.866560617710881,
"learning_rate": 1.8750000000000003e-06,
"loss": 0.2846,
"step": 3
},
{
"epoch": 0.05263157894736842,
"grad_norm": 11.872638763731226,
"learning_rate": 2.5e-06,
"loss": 0.3455,
"step": 4
},
{
"epoch": 0.06578947368421052,
"grad_norm": 7.262421698320824,
"learning_rate": 3.125e-06,
"loss": 0.2313,
"step": 5
},
{
"epoch": 0.07894736842105263,
"grad_norm": 5.162108989306741,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.1935,
"step": 6
},
{
"epoch": 0.09210526315789473,
"grad_norm": 4.989963293072479,
"learning_rate": 4.3750000000000005e-06,
"loss": 0.1607,
"step": 7
},
{
"epoch": 0.10526315789473684,
"grad_norm": 3.2196850103315575,
"learning_rate": 5e-06,
"loss": 0.1746,
"step": 8
},
{
"epoch": 0.11842105263157894,
"grad_norm": 9.267917781353754,
"learning_rate": 5.625e-06,
"loss": 0.2141,
"step": 9
},
{
"epoch": 0.13157894736842105,
"grad_norm": 2.9550126149670968,
"learning_rate": 6.25e-06,
"loss": 0.1466,
"step": 10
},
{
"epoch": 0.14473684210526316,
"grad_norm": 3.2305434151019665,
"learning_rate": 6.875e-06,
"loss": 0.1559,
"step": 11
},
{
"epoch": 0.15789473684210525,
"grad_norm": 2.303728369668543,
"learning_rate": 7.500000000000001e-06,
"loss": 0.1459,
"step": 12
},
{
"epoch": 0.17105263157894737,
"grad_norm": 2.1650046636974225,
"learning_rate": 8.125000000000001e-06,
"loss": 0.1158,
"step": 13
},
{
"epoch": 0.18421052631578946,
"grad_norm": 2.43429839604823,
"learning_rate": 8.750000000000001e-06,
"loss": 0.1429,
"step": 14
},
{
"epoch": 0.19736842105263158,
"grad_norm": 2.256199461941204,
"learning_rate": 9.375000000000001e-06,
"loss": 0.1569,
"step": 15
},
{
"epoch": 0.21052631578947367,
"grad_norm": 2.540675164259285,
"learning_rate": 1e-05,
"loss": 0.1389,
"step": 16
},
{
"epoch": 0.2236842105263158,
"grad_norm": 3.0603124517729934,
"learning_rate": 9.998666040558187e-06,
"loss": 0.1481,
"step": 17
},
{
"epoch": 0.23684210526315788,
"grad_norm": 3.15679704599401,
"learning_rate": 9.994664874011864e-06,
"loss": 0.1543,
"step": 18
},
{
"epoch": 0.25,
"grad_norm": 1.2073031998301942,
"learning_rate": 9.987998635318586e-06,
"loss": 0.0912,
"step": 19
},
{
"epoch": 0.2631578947368421,
"grad_norm": 1.5485358071033657,
"learning_rate": 9.978670881475173e-06,
"loss": 0.1454,
"step": 20
},
{
"epoch": 0.27631578947368424,
"grad_norm": 1.837098464284506,
"learning_rate": 9.96668658961975e-06,
"loss": 0.1263,
"step": 21
},
{
"epoch": 0.2894736842105263,
"grad_norm": 2.195521777946691,
"learning_rate": 9.952052154376027e-06,
"loss": 0.1347,
"step": 22
},
{
"epoch": 0.3026315789473684,
"grad_norm": 1.4759607642496113,
"learning_rate": 9.93477538444123e-06,
"loss": 0.1036,
"step": 23
},
{
"epoch": 0.3157894736842105,
"grad_norm": 3.4617468292311653,
"learning_rate": 9.91486549841951e-06,
"loss": 0.1401,
"step": 24
},
{
"epoch": 0.32894736842105265,
"grad_norm": 1.8290234660376485,
"learning_rate": 9.892333119903045e-06,
"loss": 0.1191,
"step": 25
},
{
"epoch": 0.34210526315789475,
"grad_norm": 1.9446124807007885,
"learning_rate": 9.867190271803466e-06,
"loss": 0.1156,
"step": 26
},
{
"epoch": 0.35526315789473684,
"grad_norm": 1.924481905745065,
"learning_rate": 9.839450369936615e-06,
"loss": 0.1203,
"step": 27
},
{
"epoch": 0.3684210526315789,
"grad_norm": 0.9472794741039898,
"learning_rate": 9.809128215864096e-06,
"loss": 0.084,
"step": 28
},
{
"epoch": 0.3815789473684211,
"grad_norm": 1.8614087289828243,
"learning_rate": 9.776239988995401e-06,
"loss": 0.1237,
"step": 29
},
{
"epoch": 0.39473684210526316,
"grad_norm": 1.8725601311672047,
"learning_rate": 9.74080323795483e-06,
"loss": 0.109,
"step": 30
},
{
"epoch": 0.40789473684210525,
"grad_norm": 1.6989866208387698,
"learning_rate": 9.702836871217838e-06,
"loss": 0.1078,
"step": 31
},
{
"epoch": 0.42105263157894735,
"grad_norm": 1.4620493708990816,
"learning_rate": 9.66236114702178e-06,
"loss": 0.0993,
"step": 32
},
{
"epoch": 0.4342105263157895,
"grad_norm": 1.4349318310245505,
"learning_rate": 9.619397662556434e-06,
"loss": 0.1168,
"step": 33
},
{
"epoch": 0.4473684210526316,
"grad_norm": 1.2971799391770196,
"learning_rate": 9.573969342440107e-06,
"loss": 0.0989,
"step": 34
},
{
"epoch": 0.4605263157894737,
"grad_norm": 1.3284875209748295,
"learning_rate": 9.52610042648741e-06,
"loss": 0.1052,
"step": 35
},
{
"epoch": 0.47368421052631576,
"grad_norm": 1.517294107060111,
"learning_rate": 9.475816456775313e-06,
"loss": 0.1095,
"step": 36
},
{
"epoch": 0.4868421052631579,
"grad_norm": 1.8550537994393244,
"learning_rate": 9.423144264014278e-06,
"loss": 0.0975,
"step": 37
},
{
"epoch": 0.5,
"grad_norm": 2.2490574894196733,
"learning_rate": 9.368111953231849e-06,
"loss": 0.1171,
"step": 38
},
{
"epoch": 0.5131578947368421,
"grad_norm": 1.839240097339726,
"learning_rate": 9.310748888776254e-06,
"loss": 0.0916,
"step": 39
},
{
"epoch": 0.5263157894736842,
"grad_norm": 1.0701426736883897,
"learning_rate": 9.251085678648072e-06,
"loss": 0.1004,
"step": 40
},
{
"epoch": 0.5394736842105263,
"grad_norm": 1.3170647678764655,
"learning_rate": 9.189154158168293e-06,
"loss": 0.0873,
"step": 41
},
{
"epoch": 0.5526315789473685,
"grad_norm": 1.2646530838963848,
"learning_rate": 9.124987372991512e-06,
"loss": 0.1062,
"step": 42
},
{
"epoch": 0.5657894736842105,
"grad_norm": 2.0376323047774947,
"learning_rate": 9.058619561473308e-06,
"loss": 0.1446,
"step": 43
},
{
"epoch": 0.5789473684210527,
"grad_norm": 1.4525149631147896,
"learning_rate": 8.990086136401199e-06,
"loss": 0.1063,
"step": 44
},
{
"epoch": 0.5921052631578947,
"grad_norm": 1.7227367203376753,
"learning_rate": 8.91942366609897e-06,
"loss": 0.1288,
"step": 45
},
{
"epoch": 0.6052631578947368,
"grad_norm": 1.6374368378542756,
"learning_rate": 8.846669854914395e-06,
"loss": 0.1101,
"step": 46
},
{
"epoch": 0.618421052631579,
"grad_norm": 1.776661892531926,
"learning_rate": 8.771863523100821e-06,
"loss": 0.0863,
"step": 47
},
{
"epoch": 0.631578947368421,
"grad_norm": 2.101189002973872,
"learning_rate": 8.695044586103297e-06,
"loss": 0.1155,
"step": 48
},
{
"epoch": 0.6447368421052632,
"grad_norm": 1.4990143401140024,
"learning_rate": 8.616254033260351e-06,
"loss": 0.1233,
"step": 49
},
{
"epoch": 0.6578947368421053,
"grad_norm": 1.300205397968391,
"learning_rate": 8.535533905932739e-06,
"loss": 0.0722,
"step": 50
},
{
"epoch": 0.6710526315789473,
"grad_norm": 1.2976123144962795,
"learning_rate": 8.452927275070858e-06,
"loss": 0.0955,
"step": 51
},
{
"epoch": 0.6842105263157895,
"grad_norm": 3.1993999412027807,
"learning_rate": 8.368478218232787e-06,
"loss": 0.0992,
"step": 52
},
{
"epoch": 0.6973684210526315,
"grad_norm": 1.511040367087983,
"learning_rate": 8.282231796065215e-06,
"loss": 0.1239,
"step": 53
},
{
"epoch": 0.7105263157894737,
"grad_norm": 1.735863282573467,
"learning_rate": 8.194234028259806e-06,
"loss": 0.1114,
"step": 54
},
{
"epoch": 0.7236842105263158,
"grad_norm": 1.062538506538223,
"learning_rate": 8.104531868997858e-06,
"loss": 0.0833,
"step": 55
},
{
"epoch": 0.7368421052631579,
"grad_norm": 1.4057184361489892,
"learning_rate": 8.013173181896283e-06,
"loss": 0.1348,
"step": 56
},
{
"epoch": 0.75,
"grad_norm": 1.0217071440957173,
"learning_rate": 7.920206714468383e-06,
"loss": 0.0869,
"step": 57
},
{
"epoch": 0.7631578947368421,
"grad_norm": 0.9647935123156808,
"learning_rate": 7.82568207211296e-06,
"loss": 0.0774,
"step": 58
},
{
"epoch": 0.7763157894736842,
"grad_norm": 1.2119765768642377,
"learning_rate": 7.729649691645673e-06,
"loss": 0.0867,
"step": 59
},
{
"epoch": 0.7894736842105263,
"grad_norm": 0.684404887958961,
"learning_rate": 7.63216081438678e-06,
"loss": 0.077,
"step": 60
},
{
"epoch": 0.8026315789473685,
"grad_norm": 1.202460612915356,
"learning_rate": 7.533267458819597e-06,
"loss": 0.0823,
"step": 61
},
{
"epoch": 0.8157894736842105,
"grad_norm": 1.307993207575058,
"learning_rate": 7.4330223928342814e-06,
"loss": 0.0902,
"step": 62
},
{
"epoch": 0.8289473684210527,
"grad_norm": 1.6640855566750032,
"learning_rate": 7.33147910557174e-06,
"loss": 0.141,
"step": 63
},
{
"epoch": 0.8421052631578947,
"grad_norm": 1.3895288949997349,
"learning_rate": 7.2286917788826926e-06,
"loss": 0.0992,
"step": 64
},
{
"epoch": 0.8552631578947368,
"grad_norm": 0.86233230188121,
"learning_rate": 7.124715258417111e-06,
"loss": 0.0663,
"step": 65
},
{
"epoch": 0.868421052631579,
"grad_norm": 1.3734677592094817,
"learning_rate": 7.019605024359475e-06,
"loss": 0.0848,
"step": 66
},
{
"epoch": 0.881578947368421,
"grad_norm": 1.2016915391705267,
"learning_rate": 6.913417161825449e-06,
"loss": 0.1137,
"step": 67
},
{
"epoch": 0.8947368421052632,
"grad_norm": 1.1962233631272159,
"learning_rate": 6.806208330935766e-06,
"loss": 0.0866,
"step": 68
},
{
"epoch": 0.9078947368421053,
"grad_norm": 1.3774507325980982,
"learning_rate": 6.698035736583307e-06,
"loss": 0.0975,
"step": 69
},
{
"epoch": 0.9210526315789473,
"grad_norm": 1.0910714190292217,
"learning_rate": 6.588957097909509e-06,
"loss": 0.0804,
"step": 70
},
{
"epoch": 0.9342105263157895,
"grad_norm": 0.7559368152288192,
"learning_rate": 6.4790306175063535e-06,
"loss": 0.0748,
"step": 71
},
{
"epoch": 0.9473684210526315,
"grad_norm": 1.320278904610911,
"learning_rate": 6.368314950360416e-06,
"loss": 0.1092,
"step": 72
},
{
"epoch": 0.9605263157894737,
"grad_norm": 0.9978117274759357,
"learning_rate": 6.2568691725555144e-06,
"loss": 0.0669,
"step": 73
},
{
"epoch": 0.9736842105263158,
"grad_norm": 1.184512537053634,
"learning_rate": 6.144752749750671e-06,
"loss": 0.0791,
"step": 74
},
{
"epoch": 0.9868421052631579,
"grad_norm": 1.3888364419849502,
"learning_rate": 6.0320255054501985e-06,
"loss": 0.0791,
"step": 75
},
{
"epoch": 1.0,
"grad_norm": 1.0696792385978997,
"learning_rate": 5.918747589082853e-06,
"loss": 0.1352,
"step": 76
},
{
"epoch": 1.0,
"eval_loss": 0.09702859073877335,
"eval_runtime": 143.4285,
"eval_samples_per_second": 35.593,
"eval_steps_per_second": 1.116,
"step": 76
},
{
"epoch": 1.013157894736842,
"grad_norm": 1.130795788902584,
"learning_rate": 5.804979443907065e-06,
"loss": 0.0565,
"step": 77
},
{
"epoch": 1.0263157894736843,
"grad_norm": 0.9742675703939438,
"learning_rate": 5.690781774759412e-06,
"loss": 0.0826,
"step": 78
},
{
"epoch": 1.0394736842105263,
"grad_norm": 1.4529332894640457,
"learning_rate": 5.576215515663489e-06,
"loss": 0.109,
"step": 79
},
{
"epoch": 1.0526315789473684,
"grad_norm": 0.8745980190537407,
"learning_rate": 5.46134179731651e-06,
"loss": 0.0815,
"step": 80
},
{
"epoch": 1.0657894736842106,
"grad_norm": 0.965352409023328,
"learning_rate": 5.346221914470959e-06,
"loss": 0.0753,
"step": 81
},
{
"epoch": 1.0789473684210527,
"grad_norm": 0.9281912350813449,
"learning_rate": 5.230917293228699e-06,
"loss": 0.0604,
"step": 82
},
{
"epoch": 1.0921052631578947,
"grad_norm": 0.7117517424840752,
"learning_rate": 5.115489458265006e-06,
"loss": 0.0548,
"step": 83
},
{
"epoch": 1.1052631578947367,
"grad_norm": 0.8174226361772433,
"learning_rate": 5e-06,
"loss": 0.0798,
"step": 84
},
{
"epoch": 1.118421052631579,
"grad_norm": 1.091499890906096,
"learning_rate": 4.8845105417349955e-06,
"loss": 0.0669,
"step": 85
},
{
"epoch": 1.131578947368421,
"grad_norm": 0.8489893677105097,
"learning_rate": 4.7690827067713035e-06,
"loss": 0.0799,
"step": 86
},
{
"epoch": 1.1447368421052633,
"grad_norm": 1.0103580405572923,
"learning_rate": 4.653778085529043e-06,
"loss": 0.0697,
"step": 87
},
{
"epoch": 1.1578947368421053,
"grad_norm": 0.8915773944210619,
"learning_rate": 4.53865820268349e-06,
"loss": 0.0786,
"step": 88
},
{
"epoch": 1.1710526315789473,
"grad_norm": 0.9795278903306394,
"learning_rate": 4.4237844843365126e-06,
"loss": 0.0948,
"step": 89
},
{
"epoch": 1.1842105263157894,
"grad_norm": 0.6718237305799917,
"learning_rate": 4.309218225240591e-06,
"loss": 0.0751,
"step": 90
},
{
"epoch": 1.1973684210526316,
"grad_norm": 0.9809953018950844,
"learning_rate": 4.195020556092935e-06,
"loss": 0.0717,
"step": 91
},
{
"epoch": 1.2105263157894737,
"grad_norm": 1.3739884608587827,
"learning_rate": 4.081252410917148e-06,
"loss": 0.0834,
"step": 92
},
{
"epoch": 1.2236842105263157,
"grad_norm": 0.7354951934416454,
"learning_rate": 3.967974494549803e-06,
"loss": 0.0497,
"step": 93
},
{
"epoch": 1.236842105263158,
"grad_norm": 0.9321611181418619,
"learning_rate": 3.855247250249331e-06,
"loss": 0.1091,
"step": 94
},
{
"epoch": 1.25,
"grad_norm": 0.8153028496714768,
"learning_rate": 3.743130827444487e-06,
"loss": 0.0719,
"step": 95
},
{
"epoch": 1.263157894736842,
"grad_norm": 1.7763781332034168,
"learning_rate": 3.6316850496395863e-06,
"loss": 0.0995,
"step": 96
},
{
"epoch": 1.2763157894736843,
"grad_norm": 1.0403921146171855,
"learning_rate": 3.5209693824936486e-06,
"loss": 0.1035,
"step": 97
},
{
"epoch": 1.2894736842105263,
"grad_norm": 0.9375966336686881,
"learning_rate": 3.4110429020904924e-06,
"loss": 0.0854,
"step": 98
},
{
"epoch": 1.3026315789473684,
"grad_norm": 0.8044967231814447,
"learning_rate": 3.301964263416693e-06,
"loss": 0.0545,
"step": 99
},
{
"epoch": 1.3157894736842106,
"grad_norm": 1.057615646119616,
"learning_rate": 3.1937916690642356e-06,
"loss": 0.0721,
"step": 100
},
{
"epoch": 1.3289473684210527,
"grad_norm": 0.8460218568489392,
"learning_rate": 3.0865828381745515e-06,
"loss": 0.0756,
"step": 101
},
{
"epoch": 1.3421052631578947,
"grad_norm": 0.7973010176418315,
"learning_rate": 2.980394975640526e-06,
"loss": 0.0701,
"step": 102
},
{
"epoch": 1.3552631578947367,
"grad_norm": 0.43625526231871226,
"learning_rate": 2.8752847415828923e-06,
"loss": 0.0439,
"step": 103
},
{
"epoch": 1.368421052631579,
"grad_norm": 0.7846073292481729,
"learning_rate": 2.771308221117309e-06,
"loss": 0.068,
"step": 104
},
{
"epoch": 1.381578947368421,
"grad_norm": 0.7894886291152677,
"learning_rate": 2.668520894428259e-06,
"loss": 0.0521,
"step": 105
},
{
"epoch": 1.3947368421052633,
"grad_norm": 0.8898713056015292,
"learning_rate": 2.5669776071657194e-06,
"loss": 0.0832,
"step": 106
},
{
"epoch": 1.4078947368421053,
"grad_norm": 0.7239448528037634,
"learning_rate": 2.466732541180404e-06,
"loss": 0.0651,
"step": 107
},
{
"epoch": 1.4210526315789473,
"grad_norm": 1.1924698653865555,
"learning_rate": 2.3678391856132203e-06,
"loss": 0.0871,
"step": 108
},
{
"epoch": 1.4342105263157894,
"grad_norm": 0.796957419192979,
"learning_rate": 2.2703503083543288e-06,
"loss": 0.0632,
"step": 109
},
{
"epoch": 1.4473684210526316,
"grad_norm": 0.8317972188965687,
"learning_rate": 2.174317927887041e-06,
"loss": 0.0627,
"step": 110
},
{
"epoch": 1.4605263157894737,
"grad_norm": 0.6383766307808258,
"learning_rate": 2.0797932855316183e-06,
"loss": 0.0685,
"step": 111
},
{
"epoch": 1.4736842105263157,
"grad_norm": 0.8167329309674938,
"learning_rate": 1.9868268181037186e-06,
"loss": 0.0674,
"step": 112
},
{
"epoch": 1.486842105263158,
"grad_norm": 0.7471966235227357,
"learning_rate": 1.8954681310021434e-06,
"loss": 0.0729,
"step": 113
},
{
"epoch": 1.5,
"grad_norm": 0.6587846660979715,
"learning_rate": 1.8057659717401948e-06,
"loss": 0.0481,
"step": 114
},
{
"epoch": 1.513157894736842,
"grad_norm": 0.8623712101197185,
"learning_rate": 1.7177682039347875e-06,
"loss": 0.0751,
"step": 115
},
{
"epoch": 1.526315789473684,
"grad_norm": 0.7600610777742871,
"learning_rate": 1.6315217817672142e-06,
"loss": 0.0754,
"step": 116
},
{
"epoch": 1.5394736842105263,
"grad_norm": 0.7028388956696653,
"learning_rate": 1.5470727249291423e-06,
"loss": 0.0591,
"step": 117
},
{
"epoch": 1.5526315789473686,
"grad_norm": 0.9080662531327958,
"learning_rate": 1.4644660940672628e-06,
"loss": 0.0665,
"step": 118
},
{
"epoch": 1.5657894736842106,
"grad_norm": 0.8002081473664321,
"learning_rate": 1.383745966739652e-06,
"loss": 0.0567,
"step": 119
},
{
"epoch": 1.5789473684210527,
"grad_norm": 0.8070376846944848,
"learning_rate": 1.3049554138967052e-06,
"loss": 0.074,
"step": 120
},
{
"epoch": 1.5921052631578947,
"grad_norm": 0.7133857988769062,
"learning_rate": 1.2281364768991804e-06,
"loss": 0.082,
"step": 121
},
{
"epoch": 1.6052631578947367,
"grad_norm": 0.7063230781754961,
"learning_rate": 1.1533301450856054e-06,
"loss": 0.0793,
"step": 122
},
{
"epoch": 1.618421052631579,
"grad_norm": 0.5118303753159236,
"learning_rate": 1.0805763339010329e-06,
"loss": 0.0606,
"step": 123
},
{
"epoch": 1.631578947368421,
"grad_norm": 0.6255028097581671,
"learning_rate": 1.0099138635988026e-06,
"loss": 0.0624,
"step": 124
},
{
"epoch": 1.6447368421052633,
"grad_norm": 1.2565545443199133,
"learning_rate": 9.41380438526694e-07,
"loss": 0.0834,
"step": 125
},
{
"epoch": 1.6578947368421053,
"grad_norm": 0.6949398501709794,
"learning_rate": 8.750126270084891e-07,
"loss": 0.0554,
"step": 126
},
{
"epoch": 1.6710526315789473,
"grad_norm": 0.6169370398639232,
"learning_rate": 8.108458418317089e-07,
"loss": 0.0756,
"step": 127
},
{
"epoch": 1.6842105263157894,
"grad_norm": 0.5783403647508548,
"learning_rate": 7.489143213519301e-07,
"loss": 0.0484,
"step": 128
},
{
"epoch": 1.6973684210526314,
"grad_norm": 0.613206478877195,
"learning_rate": 6.892511112237472e-07,
"loss": 0.0674,
"step": 129
},
{
"epoch": 1.7105263157894737,
"grad_norm": 0.7610207582376373,
"learning_rate": 6.318880467681527e-07,
"loss": 0.0802,
"step": 130
},
{
"epoch": 1.723684210526316,
"grad_norm": 0.6484932637313764,
"learning_rate": 5.768557359857241e-07,
"loss": 0.0799,
"step": 131
},
{
"epoch": 1.736842105263158,
"grad_norm": 0.801045963046033,
"learning_rate": 5.241835432246888e-07,
"loss": 0.0631,
"step": 132
},
{
"epoch": 1.75,
"grad_norm": 0.5006567755599417,
"learning_rate": 4.738995735125895e-07,
"loss": 0.0602,
"step": 133
},
{
"epoch": 1.763157894736842,
"grad_norm": 0.7321825355306667,
"learning_rate": 4.2603065755989493e-07,
"loss": 0.0574,
"step": 134
},
{
"epoch": 1.776315789473684,
"grad_norm": 0.5455281345073466,
"learning_rate": 3.8060233744356634e-07,
"loss": 0.0585,
"step": 135
},
{
"epoch": 1.7894736842105263,
"grad_norm": 0.6941675384816356,
"learning_rate": 3.3763885297822153e-07,
"loss": 0.0593,
"step": 136
},
{
"epoch": 1.8026315789473686,
"grad_norm": 0.607277109667664,
"learning_rate": 2.9716312878216194e-07,
"loss": 0.0596,
"step": 137
},
{
"epoch": 1.8157894736842106,
"grad_norm": 0.5526534850437881,
"learning_rate": 2.5919676204517073e-07,
"loss": 0.0574,
"step": 138
},
{
"epoch": 1.8289473684210527,
"grad_norm": 0.7027887496493381,
"learning_rate": 2.237600110046001e-07,
"loss": 0.0883,
"step": 139
},
{
"epoch": 1.8421052631578947,
"grad_norm": 0.5658475866263035,
"learning_rate": 1.908717841359048e-07,
"loss": 0.0593,
"step": 140
},
{
"epoch": 1.8552631578947367,
"grad_norm": 0.6440458312241749,
"learning_rate": 1.6054963006338742e-07,
"loss": 0.0645,
"step": 141
},
{
"epoch": 1.868421052631579,
"grad_norm": 0.8251777255090132,
"learning_rate": 1.328097281965357e-07,
"loss": 0.091,
"step": 142
},
{
"epoch": 1.881578947368421,
"grad_norm": 0.584859727969167,
"learning_rate": 1.0766688009695548e-07,
"loss": 0.0624,
"step": 143
},
{
"epoch": 1.8947368421052633,
"grad_norm": 0.6478070234261503,
"learning_rate": 8.513450158049109e-08,
"loss": 0.0662,
"step": 144
},
{
"epoch": 1.9078947368421053,
"grad_norm": 0.5481010295457636,
"learning_rate": 6.522461555877213e-08,
"loss": 0.0717,
"step": 145
},
{
"epoch": 1.9210526315789473,
"grad_norm": 0.5866004853500736,
"learning_rate": 4.794784562397459e-08,
"loss": 0.0729,
"step": 146
},
{
"epoch": 1.9342105263157894,
"grad_norm": 0.7549384445552164,
"learning_rate": 3.3313410380250157e-08,
"loss": 0.0936,
"step": 147
},
{
"epoch": 1.9473684210526314,
"grad_norm": 0.8887389895072535,
"learning_rate": 2.1329118524827662e-08,
"loss": 0.1033,
"step": 148
},
{
"epoch": 1.9605263157894737,
"grad_norm": 0.6459019124164774,
"learning_rate": 1.200136468141544e-08,
"loss": 0.0951,
"step": 149
},
{
"epoch": 1.973684210526316,
"grad_norm": 0.5993524993653697,
"learning_rate": 5.3351259881379016e-09,
"loss": 0.063,
"step": 150
},
{
"epoch": 1.986842105263158,
"grad_norm": 0.6183094577100168,
"learning_rate": 1.3339594418138036e-09,
"loss": 0.0715,
"step": 151
},
{
"epoch": 2.0,
"grad_norm": 0.47166327201031033,
"learning_rate": 0.0,
"loss": 0.051,
"step": 152
},
{
"epoch": 2.0,
"eval_loss": 0.08257210999727249,
"eval_runtime": 143.687,
"eval_samples_per_second": 35.529,
"eval_steps_per_second": 1.114,
"step": 152
},
{
"epoch": 2.0,
"step": 152,
"total_flos": 4.516827336435302e+16,
"train_loss": 0.09894711428665016,
"train_runtime": 2051.5561,
"train_samples_per_second": 9.454,
"train_steps_per_second": 0.074
}
],
"logging_steps": 1,
"max_steps": 152,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.516827336435302e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}