yaoying-qwen2.5 / model_weights_14B /trainer_state.json
sunday-hao's picture
Upload folder using huggingface_hub
1494ec4 verified
raw
history blame
106 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 672,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004464285714285714,
"grad_norm": 118.73274230957031,
"learning_rate": 0.0,
"loss": 3.3178,
"step": 1
},
{
"epoch": 0.008928571428571428,
"grad_norm": 120.67337036132812,
"learning_rate": 4.6275642631951835e-06,
"loss": 3.3568,
"step": 2
},
{
"epoch": 0.013392857142857142,
"grad_norm": 115.98211669921875,
"learning_rate": 7.3345158268416935e-06,
"loss": 3.2911,
"step": 3
},
{
"epoch": 0.017857142857142856,
"grad_norm": 19.323978424072266,
"learning_rate": 9.255128526390367e-06,
"loss": 2.589,
"step": 4
},
{
"epoch": 0.022321428571428572,
"grad_norm": 11.930215835571289,
"learning_rate": 1.0744871473609633e-05,
"loss": 2.1897,
"step": 5
},
{
"epoch": 0.026785714285714284,
"grad_norm": 11.268843650817871,
"learning_rate": 1.1962080090036879e-05,
"loss": 1.6538,
"step": 6
},
{
"epoch": 0.03125,
"grad_norm": 17.312570571899414,
"learning_rate": 1.299121531141887e-05,
"loss": 1.1579,
"step": 7
},
{
"epoch": 0.03571428571428571,
"grad_norm": 11.755829811096191,
"learning_rate": 1.388269278958555e-05,
"loss": 0.6901,
"step": 8
},
{
"epoch": 0.04017857142857143,
"grad_norm": 9.42724609375,
"learning_rate": 1.4669031653683387e-05,
"loss": 0.442,
"step": 9
},
{
"epoch": 0.044642857142857144,
"grad_norm": 7.048767566680908,
"learning_rate": 1.537243573680482e-05,
"loss": 0.2483,
"step": 10
},
{
"epoch": 0.049107142857142856,
"grad_norm": 11.449494361877441,
"learning_rate": 1.600874212937343e-05,
"loss": 0.184,
"step": 11
},
{
"epoch": 0.05357142857142857,
"grad_norm": 4.351762771606445,
"learning_rate": 1.6589644353232063e-05,
"loss": 0.0867,
"step": 12
},
{
"epoch": 0.05803571428571429,
"grad_norm": 8.21219253540039,
"learning_rate": 1.712402259777778e-05,
"loss": 0.079,
"step": 13
},
{
"epoch": 0.0625,
"grad_norm": 2.505552053451538,
"learning_rate": 1.7618779574614054e-05,
"loss": 0.1795,
"step": 14
},
{
"epoch": 0.06696428571428571,
"grad_norm": 1.9123979806900024,
"learning_rate": 1.8079387300451327e-05,
"loss": 0.0686,
"step": 15
},
{
"epoch": 0.07142857142857142,
"grad_norm": 1.5148472785949707,
"learning_rate": 1.8510257052780734e-05,
"loss": 0.0645,
"step": 16
},
{
"epoch": 0.07589285714285714,
"grad_norm": 1.7530207633972168,
"learning_rate": 1.891499697130832e-05,
"loss": 0.0598,
"step": 17
},
{
"epoch": 0.08035714285714286,
"grad_norm": 1.9798786640167236,
"learning_rate": 1.929659591687857e-05,
"loss": 0.0519,
"step": 18
},
{
"epoch": 0.08482142857142858,
"grad_norm": 1.4390745162963867,
"learning_rate": 1.9657557553855117e-05,
"loss": 0.0359,
"step": 19
},
{
"epoch": 0.08928571428571429,
"grad_norm": 0.8786221742630005,
"learning_rate": 2e-05,
"loss": 0.0746,
"step": 20
},
{
"epoch": 0.09375,
"grad_norm": 0.8981115818023682,
"learning_rate": 2e-05,
"loss": 0.0331,
"step": 21
},
{
"epoch": 0.09821428571428571,
"grad_norm": 2.092228889465332,
"learning_rate": 2e-05,
"loss": 0.0339,
"step": 22
},
{
"epoch": 0.10267857142857142,
"grad_norm": 0.6631488800048828,
"learning_rate": 2e-05,
"loss": 0.0339,
"step": 23
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.7038796544075012,
"learning_rate": 2e-05,
"loss": 0.0381,
"step": 24
},
{
"epoch": 0.11160714285714286,
"grad_norm": 1.3783732652664185,
"learning_rate": 2e-05,
"loss": 0.0934,
"step": 25
},
{
"epoch": 0.11607142857142858,
"grad_norm": 2.0931007862091064,
"learning_rate": 2e-05,
"loss": 0.0409,
"step": 26
},
{
"epoch": 0.12053571428571429,
"grad_norm": 3.667280435562134,
"learning_rate": 2e-05,
"loss": 0.0325,
"step": 27
},
{
"epoch": 0.125,
"grad_norm": 1.4841816425323486,
"learning_rate": 2e-05,
"loss": 0.0242,
"step": 28
},
{
"epoch": 0.12946428571428573,
"grad_norm": 1.6250470876693726,
"learning_rate": 2e-05,
"loss": 0.0337,
"step": 29
},
{
"epoch": 0.13392857142857142,
"grad_norm": 0.6866446137428284,
"learning_rate": 2e-05,
"loss": 0.031,
"step": 30
},
{
"epoch": 0.13839285714285715,
"grad_norm": 1.0550711154937744,
"learning_rate": 2e-05,
"loss": 0.05,
"step": 31
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.4552890360355377,
"learning_rate": 2e-05,
"loss": 0.0329,
"step": 32
},
{
"epoch": 0.14732142857142858,
"grad_norm": 1.899696946144104,
"learning_rate": 2e-05,
"loss": 0.0191,
"step": 33
},
{
"epoch": 0.15178571428571427,
"grad_norm": 31.991363525390625,
"learning_rate": 2e-05,
"loss": 0.0252,
"step": 34
},
{
"epoch": 0.15625,
"grad_norm": 7.586791038513184,
"learning_rate": 2e-05,
"loss": 0.0318,
"step": 35
},
{
"epoch": 0.16071428571428573,
"grad_norm": 0.9316713809967041,
"learning_rate": 2e-05,
"loss": 0.0275,
"step": 36
},
{
"epoch": 0.16517857142857142,
"grad_norm": 0.8239340782165527,
"learning_rate": 2e-05,
"loss": 0.0175,
"step": 37
},
{
"epoch": 0.16964285714285715,
"grad_norm": 0.2483951449394226,
"learning_rate": 2e-05,
"loss": 0.0102,
"step": 38
},
{
"epoch": 0.17410714285714285,
"grad_norm": 0.41918331384658813,
"learning_rate": 2e-05,
"loss": 0.0235,
"step": 39
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.5702436566352844,
"learning_rate": 2e-05,
"loss": 0.0344,
"step": 40
},
{
"epoch": 0.18303571428571427,
"grad_norm": 0.31215420365333557,
"learning_rate": 2e-05,
"loss": 0.018,
"step": 41
},
{
"epoch": 0.1875,
"grad_norm": 0.36833837628364563,
"learning_rate": 2e-05,
"loss": 0.024,
"step": 42
},
{
"epoch": 0.19196428571428573,
"grad_norm": 0.14042280614376068,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 43
},
{
"epoch": 0.19642857142857142,
"grad_norm": 0.25796693563461304,
"learning_rate": 2e-05,
"loss": 0.016,
"step": 44
},
{
"epoch": 0.20089285714285715,
"grad_norm": 0.32930517196655273,
"learning_rate": 2e-05,
"loss": 0.0217,
"step": 45
},
{
"epoch": 0.20535714285714285,
"grad_norm": 0.5433669090270996,
"learning_rate": 2e-05,
"loss": 0.0204,
"step": 46
},
{
"epoch": 0.20982142857142858,
"grad_norm": 0.2668748199939728,
"learning_rate": 2e-05,
"loss": 0.012,
"step": 47
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.12396270036697388,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 48
},
{
"epoch": 0.21875,
"grad_norm": 0.3738039433956146,
"learning_rate": 2e-05,
"loss": 0.0172,
"step": 49
},
{
"epoch": 0.22321428571428573,
"grad_norm": 0.2019537091255188,
"learning_rate": 2e-05,
"loss": 0.009,
"step": 50
},
{
"epoch": 0.22767857142857142,
"grad_norm": 0.2405676245689392,
"learning_rate": 2e-05,
"loss": 0.0124,
"step": 51
},
{
"epoch": 0.23214285714285715,
"grad_norm": 0.3147808611392975,
"learning_rate": 2e-05,
"loss": 0.0106,
"step": 52
},
{
"epoch": 0.23660714285714285,
"grad_norm": 0.19005292654037476,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 53
},
{
"epoch": 0.24107142857142858,
"grad_norm": 0.5401691198348999,
"learning_rate": 2e-05,
"loss": 0.0202,
"step": 54
},
{
"epoch": 0.24553571428571427,
"grad_norm": 0.10109489411115646,
"learning_rate": 2e-05,
"loss": 0.0061,
"step": 55
},
{
"epoch": 0.25,
"grad_norm": 0.5185668468475342,
"learning_rate": 2e-05,
"loss": 0.0275,
"step": 56
},
{
"epoch": 0.2544642857142857,
"grad_norm": 0.3303714990615845,
"learning_rate": 2e-05,
"loss": 0.0154,
"step": 57
},
{
"epoch": 0.25892857142857145,
"grad_norm": 0.31375738978385925,
"learning_rate": 2e-05,
"loss": 0.0169,
"step": 58
},
{
"epoch": 0.26339285714285715,
"grad_norm": 0.33531704545021057,
"learning_rate": 2e-05,
"loss": 0.0173,
"step": 59
},
{
"epoch": 0.26785714285714285,
"grad_norm": 0.34593161940574646,
"learning_rate": 2e-05,
"loss": 0.0152,
"step": 60
},
{
"epoch": 0.27232142857142855,
"grad_norm": 0.339955598115921,
"learning_rate": 2e-05,
"loss": 0.0179,
"step": 61
},
{
"epoch": 0.2767857142857143,
"grad_norm": 0.40512269735336304,
"learning_rate": 2e-05,
"loss": 0.0181,
"step": 62
},
{
"epoch": 0.28125,
"grad_norm": 0.39926639199256897,
"learning_rate": 2e-05,
"loss": 0.0229,
"step": 63
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.49879220128059387,
"learning_rate": 2e-05,
"loss": 0.0182,
"step": 64
},
{
"epoch": 0.29017857142857145,
"grad_norm": 0.6592809557914734,
"learning_rate": 2e-05,
"loss": 0.0791,
"step": 65
},
{
"epoch": 0.29464285714285715,
"grad_norm": 1.4562678337097168,
"learning_rate": 2e-05,
"loss": 0.0391,
"step": 66
},
{
"epoch": 0.29910714285714285,
"grad_norm": 0.2671646773815155,
"learning_rate": 2e-05,
"loss": 0.0122,
"step": 67
},
{
"epoch": 0.30357142857142855,
"grad_norm": 0.398265540599823,
"learning_rate": 2e-05,
"loss": 0.0113,
"step": 68
},
{
"epoch": 0.3080357142857143,
"grad_norm": 0.4379926323890686,
"learning_rate": 2e-05,
"loss": 0.0557,
"step": 69
},
{
"epoch": 0.3125,
"grad_norm": 0.18577145040035248,
"learning_rate": 2e-05,
"loss": 0.0105,
"step": 70
},
{
"epoch": 0.3169642857142857,
"grad_norm": 0.27026066184043884,
"learning_rate": 2e-05,
"loss": 0.0097,
"step": 71
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.35076332092285156,
"learning_rate": 2e-05,
"loss": 0.0122,
"step": 72
},
{
"epoch": 0.32589285714285715,
"grad_norm": 0.16470995545387268,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 73
},
{
"epoch": 0.33035714285714285,
"grad_norm": 0.2735673785209656,
"learning_rate": 2e-05,
"loss": 0.0131,
"step": 74
},
{
"epoch": 0.33482142857142855,
"grad_norm": 0.4593258798122406,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 75
},
{
"epoch": 0.3392857142857143,
"grad_norm": 0.285243421792984,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 76
},
{
"epoch": 0.34375,
"grad_norm": 0.10123898833990097,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 77
},
{
"epoch": 0.3482142857142857,
"grad_norm": 0.17487011849880219,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 78
},
{
"epoch": 0.35267857142857145,
"grad_norm": 0.3148305416107178,
"learning_rate": 2e-05,
"loss": 0.0318,
"step": 79
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.4343680143356323,
"learning_rate": 2e-05,
"loss": 0.0109,
"step": 80
},
{
"epoch": 0.36160714285714285,
"grad_norm": 0.24763138592243195,
"learning_rate": 2e-05,
"loss": 0.0096,
"step": 81
},
{
"epoch": 0.36607142857142855,
"grad_norm": 0.12427603453397751,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 82
},
{
"epoch": 0.3705357142857143,
"grad_norm": 0.37291279435157776,
"learning_rate": 2e-05,
"loss": 0.0322,
"step": 83
},
{
"epoch": 0.375,
"grad_norm": 0.1884593665599823,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 84
},
{
"epoch": 0.3794642857142857,
"grad_norm": 0.22058643400669098,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 85
},
{
"epoch": 0.38392857142857145,
"grad_norm": 0.209506556391716,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 86
},
{
"epoch": 0.38839285714285715,
"grad_norm": 0.15874722599983215,
"learning_rate": 2e-05,
"loss": 0.0096,
"step": 87
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.24815426766872406,
"learning_rate": 2e-05,
"loss": 0.0126,
"step": 88
},
{
"epoch": 0.39732142857142855,
"grad_norm": 0.17406296730041504,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 89
},
{
"epoch": 0.4017857142857143,
"grad_norm": 0.2863009572029114,
"learning_rate": 2e-05,
"loss": 0.0168,
"step": 90
},
{
"epoch": 0.40625,
"grad_norm": 0.3468080759048462,
"learning_rate": 2e-05,
"loss": 0.0317,
"step": 91
},
{
"epoch": 0.4107142857142857,
"grad_norm": 0.3998833894729614,
"learning_rate": 2e-05,
"loss": 0.0398,
"step": 92
},
{
"epoch": 0.41517857142857145,
"grad_norm": 0.16211910545825958,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 93
},
{
"epoch": 0.41964285714285715,
"grad_norm": 0.27588722109794617,
"learning_rate": 2e-05,
"loss": 0.0119,
"step": 94
},
{
"epoch": 0.42410714285714285,
"grad_norm": 0.17133073508739471,
"learning_rate": 2e-05,
"loss": 0.0108,
"step": 95
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.22069743275642395,
"learning_rate": 2e-05,
"loss": 0.0113,
"step": 96
},
{
"epoch": 0.4330357142857143,
"grad_norm": 0.14985281229019165,
"learning_rate": 2e-05,
"loss": 0.0056,
"step": 97
},
{
"epoch": 0.4375,
"grad_norm": 0.2660472095012665,
"learning_rate": 2e-05,
"loss": 0.0092,
"step": 98
},
{
"epoch": 0.4419642857142857,
"grad_norm": 0.15048441290855408,
"learning_rate": 2e-05,
"loss": 0.0107,
"step": 99
},
{
"epoch": 0.44642857142857145,
"grad_norm": 0.09269213676452637,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 100
},
{
"epoch": 0.45089285714285715,
"grad_norm": 0.08095856755971909,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 101
},
{
"epoch": 0.45535714285714285,
"grad_norm": 0.22052565217018127,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 102
},
{
"epoch": 0.45982142857142855,
"grad_norm": 0.15201696753501892,
"learning_rate": 2e-05,
"loss": 0.011,
"step": 103
},
{
"epoch": 0.4642857142857143,
"grad_norm": 0.2241186946630478,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 104
},
{
"epoch": 0.46875,
"grad_norm": 0.2035360485315323,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 105
},
{
"epoch": 0.4732142857142857,
"grad_norm": 0.16850705444812775,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 106
},
{
"epoch": 0.47767857142857145,
"grad_norm": 0.265540212392807,
"learning_rate": 2e-05,
"loss": 0.0116,
"step": 107
},
{
"epoch": 0.48214285714285715,
"grad_norm": 0.15132322907447815,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 108
},
{
"epoch": 0.48660714285714285,
"grad_norm": 0.23885183036327362,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 109
},
{
"epoch": 0.49107142857142855,
"grad_norm": 0.16768532991409302,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 110
},
{
"epoch": 0.4955357142857143,
"grad_norm": 0.156480073928833,
"learning_rate": 2e-05,
"loss": 0.0048,
"step": 111
},
{
"epoch": 0.5,
"grad_norm": 0.31494247913360596,
"learning_rate": 2e-05,
"loss": 0.0171,
"step": 112
},
{
"epoch": 0.5044642857142857,
"grad_norm": 0.3249429166316986,
"learning_rate": 2e-05,
"loss": 0.0136,
"step": 113
},
{
"epoch": 0.5089285714285714,
"grad_norm": 0.18467825651168823,
"learning_rate": 2e-05,
"loss": 0.0113,
"step": 114
},
{
"epoch": 0.5133928571428571,
"grad_norm": 0.11450862139463425,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 115
},
{
"epoch": 0.5178571428571429,
"grad_norm": 0.12011968344449997,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 116
},
{
"epoch": 0.5223214285714286,
"grad_norm": 0.4256938397884369,
"learning_rate": 2e-05,
"loss": 0.03,
"step": 117
},
{
"epoch": 0.5267857142857143,
"grad_norm": 0.12585486471652985,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 118
},
{
"epoch": 0.53125,
"grad_norm": 0.13013215363025665,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 119
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.44324806332588196,
"learning_rate": 2e-05,
"loss": 0.0299,
"step": 120
},
{
"epoch": 0.5401785714285714,
"grad_norm": 0.09240884333848953,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 121
},
{
"epoch": 0.5446428571428571,
"grad_norm": 0.11633799970149994,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 122
},
{
"epoch": 0.5491071428571429,
"grad_norm": 0.13074152171611786,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 123
},
{
"epoch": 0.5535714285714286,
"grad_norm": 0.08824117481708527,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 124
},
{
"epoch": 0.5580357142857143,
"grad_norm": 0.1764586865901947,
"learning_rate": 2e-05,
"loss": 0.0115,
"step": 125
},
{
"epoch": 0.5625,
"grad_norm": 0.28081825375556946,
"learning_rate": 2e-05,
"loss": 0.0157,
"step": 126
},
{
"epoch": 0.5669642857142857,
"grad_norm": 0.14706389605998993,
"learning_rate": 2e-05,
"loss": 0.0097,
"step": 127
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.13169962167739868,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 128
},
{
"epoch": 0.5758928571428571,
"grad_norm": 0.24221737682819366,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 129
},
{
"epoch": 0.5803571428571429,
"grad_norm": 0.2343599945306778,
"learning_rate": 2e-05,
"loss": 0.0125,
"step": 130
},
{
"epoch": 0.5848214285714286,
"grad_norm": 0.24817775189876556,
"learning_rate": 2e-05,
"loss": 0.0169,
"step": 131
},
{
"epoch": 0.5892857142857143,
"grad_norm": 0.1439865231513977,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 132
},
{
"epoch": 0.59375,
"grad_norm": 0.5774053335189819,
"learning_rate": 2e-05,
"loss": 0.0115,
"step": 133
},
{
"epoch": 0.5982142857142857,
"grad_norm": 0.27249982953071594,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 134
},
{
"epoch": 0.6026785714285714,
"grad_norm": 0.21357686817646027,
"learning_rate": 2e-05,
"loss": 0.0052,
"step": 135
},
{
"epoch": 0.6071428571428571,
"grad_norm": 0.139152392745018,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 136
},
{
"epoch": 0.6116071428571429,
"grad_norm": 0.3676150143146515,
"learning_rate": 2e-05,
"loss": 0.0181,
"step": 137
},
{
"epoch": 0.6160714285714286,
"grad_norm": 0.1653941422700882,
"learning_rate": 2e-05,
"loss": 0.0103,
"step": 138
},
{
"epoch": 0.6205357142857143,
"grad_norm": 0.13131186366081238,
"learning_rate": 2e-05,
"loss": 0.009,
"step": 139
},
{
"epoch": 0.625,
"grad_norm": 0.1705312728881836,
"learning_rate": 2e-05,
"loss": 0.0127,
"step": 140
},
{
"epoch": 0.6294642857142857,
"grad_norm": 0.20293530821800232,
"learning_rate": 2e-05,
"loss": 0.0106,
"step": 141
},
{
"epoch": 0.6339285714285714,
"grad_norm": 0.19262683391571045,
"learning_rate": 2e-05,
"loss": 0.011,
"step": 142
},
{
"epoch": 0.6383928571428571,
"grad_norm": 0.13301971554756165,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 143
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.13495324552059174,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 144
},
{
"epoch": 0.6473214285714286,
"grad_norm": 0.16713371872901917,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 145
},
{
"epoch": 0.6517857142857143,
"grad_norm": 0.153954416513443,
"learning_rate": 2e-05,
"loss": 0.0092,
"step": 146
},
{
"epoch": 0.65625,
"grad_norm": 0.13245932757854462,
"learning_rate": 2e-05,
"loss": 0.0098,
"step": 147
},
{
"epoch": 0.6607142857142857,
"grad_norm": 0.10969486832618713,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 148
},
{
"epoch": 0.6651785714285714,
"grad_norm": 0.3466426432132721,
"learning_rate": 2e-05,
"loss": 0.0155,
"step": 149
},
{
"epoch": 0.6696428571428571,
"grad_norm": 0.17744660377502441,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 150
},
{
"epoch": 0.6741071428571429,
"grad_norm": 0.505264163017273,
"learning_rate": 2e-05,
"loss": 0.0188,
"step": 151
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.225211963057518,
"learning_rate": 2e-05,
"loss": 0.0124,
"step": 152
},
{
"epoch": 0.6830357142857143,
"grad_norm": 0.14465954899787903,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 153
},
{
"epoch": 0.6875,
"grad_norm": 0.13549940288066864,
"learning_rate": 2e-05,
"loss": 0.0092,
"step": 154
},
{
"epoch": 0.6919642857142857,
"grad_norm": 0.17627279460430145,
"learning_rate": 2e-05,
"loss": 0.0107,
"step": 155
},
{
"epoch": 0.6964285714285714,
"grad_norm": 0.1911449432373047,
"learning_rate": 2e-05,
"loss": 0.0111,
"step": 156
},
{
"epoch": 0.7008928571428571,
"grad_norm": 0.18086941540241241,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 157
},
{
"epoch": 0.7053571428571429,
"grad_norm": 0.280097633600235,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 158
},
{
"epoch": 0.7098214285714286,
"grad_norm": 0.1896984875202179,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 159
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.16626662015914917,
"learning_rate": 2e-05,
"loss": 0.005,
"step": 160
},
{
"epoch": 0.71875,
"grad_norm": 0.18548309803009033,
"learning_rate": 2e-05,
"loss": 0.012,
"step": 161
},
{
"epoch": 0.7232142857142857,
"grad_norm": 0.19919419288635254,
"learning_rate": 2e-05,
"loss": 0.0099,
"step": 162
},
{
"epoch": 0.7276785714285714,
"grad_norm": 0.2509930729866028,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 163
},
{
"epoch": 0.7321428571428571,
"grad_norm": 0.21043901145458221,
"learning_rate": 2e-05,
"loss": 0.0145,
"step": 164
},
{
"epoch": 0.7366071428571429,
"grad_norm": 0.11943473666906357,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 165
},
{
"epoch": 0.7410714285714286,
"grad_norm": 0.2109983116388321,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 166
},
{
"epoch": 0.7455357142857143,
"grad_norm": 0.26444128155708313,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 167
},
{
"epoch": 0.75,
"grad_norm": 0.2525187134742737,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 168
},
{
"epoch": 0.7544642857142857,
"grad_norm": 0.26021668314933777,
"learning_rate": 2e-05,
"loss": 0.0135,
"step": 169
},
{
"epoch": 0.7589285714285714,
"grad_norm": 0.25887635350227356,
"learning_rate": 2e-05,
"loss": 0.0134,
"step": 170
},
{
"epoch": 0.7633928571428571,
"grad_norm": 0.4230816960334778,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 171
},
{
"epoch": 0.7678571428571429,
"grad_norm": 0.4137205183506012,
"learning_rate": 2e-05,
"loss": 0.0108,
"step": 172
},
{
"epoch": 0.7723214285714286,
"grad_norm": 0.12480466812849045,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 173
},
{
"epoch": 0.7767857142857143,
"grad_norm": 0.2187943309545517,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 174
},
{
"epoch": 0.78125,
"grad_norm": 0.09233838319778442,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 175
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.1769775003194809,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 176
},
{
"epoch": 0.7901785714285714,
"grad_norm": 0.12783905863761902,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 177
},
{
"epoch": 0.7946428571428571,
"grad_norm": 0.3727477788925171,
"learning_rate": 2e-05,
"loss": 0.0103,
"step": 178
},
{
"epoch": 0.7991071428571429,
"grad_norm": 0.17474934458732605,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 179
},
{
"epoch": 0.8035714285714286,
"grad_norm": 0.10964759439229965,
"learning_rate": 2e-05,
"loss": 0.009,
"step": 180
},
{
"epoch": 0.8080357142857143,
"grad_norm": 0.386709988117218,
"learning_rate": 2e-05,
"loss": 0.0098,
"step": 181
},
{
"epoch": 0.8125,
"grad_norm": 0.20667214691638947,
"learning_rate": 2e-05,
"loss": 0.0107,
"step": 182
},
{
"epoch": 0.8169642857142857,
"grad_norm": 0.13784578442573547,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 183
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.11419745534658432,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 184
},
{
"epoch": 0.8258928571428571,
"grad_norm": 0.1253211796283722,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 185
},
{
"epoch": 0.8303571428571429,
"grad_norm": 0.16259260475635529,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 186
},
{
"epoch": 0.8348214285714286,
"grad_norm": 0.22637289762496948,
"learning_rate": 2e-05,
"loss": 0.0125,
"step": 187
},
{
"epoch": 0.8392857142857143,
"grad_norm": 0.11347130686044693,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 188
},
{
"epoch": 0.84375,
"grad_norm": 0.23896394670009613,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 189
},
{
"epoch": 0.8482142857142857,
"grad_norm": 0.12304235994815826,
"learning_rate": 2e-05,
"loss": 0.006,
"step": 190
},
{
"epoch": 0.8526785714285714,
"grad_norm": 0.14501796662807465,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 191
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.22769401967525482,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 192
},
{
"epoch": 0.8616071428571429,
"grad_norm": 0.20632179081439972,
"learning_rate": 2e-05,
"loss": 0.0109,
"step": 193
},
{
"epoch": 0.8660714285714286,
"grad_norm": 0.09508738666772842,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 194
},
{
"epoch": 0.8705357142857143,
"grad_norm": 0.16956081986427307,
"learning_rate": 2e-05,
"loss": 0.0056,
"step": 195
},
{
"epoch": 0.875,
"grad_norm": 0.19381490349769592,
"learning_rate": 2e-05,
"loss": 0.0142,
"step": 196
},
{
"epoch": 0.8794642857142857,
"grad_norm": 0.28475290536880493,
"learning_rate": 2e-05,
"loss": 0.0123,
"step": 197
},
{
"epoch": 0.8839285714285714,
"grad_norm": 0.13424070179462433,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 198
},
{
"epoch": 0.8883928571428571,
"grad_norm": 0.20577555894851685,
"learning_rate": 2e-05,
"loss": 0.0098,
"step": 199
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.34151265025138855,
"learning_rate": 2e-05,
"loss": 0.0142,
"step": 200
},
{
"epoch": 0.8973214285714286,
"grad_norm": 0.211790069937706,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 201
},
{
"epoch": 0.9017857142857143,
"grad_norm": 0.17058762907981873,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 202
},
{
"epoch": 0.90625,
"grad_norm": 0.3145332634449005,
"learning_rate": 2e-05,
"loss": 0.0212,
"step": 203
},
{
"epoch": 0.9107142857142857,
"grad_norm": 0.1913270354270935,
"learning_rate": 2e-05,
"loss": 0.0119,
"step": 204
},
{
"epoch": 0.9151785714285714,
"grad_norm": 0.17364099621772766,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 205
},
{
"epoch": 0.9196428571428571,
"grad_norm": 0.20001767575740814,
"learning_rate": 2e-05,
"loss": 0.0101,
"step": 206
},
{
"epoch": 0.9241071428571429,
"grad_norm": 0.13925513625144958,
"learning_rate": 2e-05,
"loss": 0.0061,
"step": 207
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.13868604600429535,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 208
},
{
"epoch": 0.9330357142857143,
"grad_norm": 0.27624329924583435,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 209
},
{
"epoch": 0.9375,
"grad_norm": 0.1608811616897583,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 210
},
{
"epoch": 0.9419642857142857,
"grad_norm": 0.0851345956325531,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 211
},
{
"epoch": 0.9464285714285714,
"grad_norm": 0.34213757514953613,
"learning_rate": 2e-05,
"loss": 0.0141,
"step": 212
},
{
"epoch": 0.9508928571428571,
"grad_norm": 0.09006419777870178,
"learning_rate": 2e-05,
"loss": 0.0053,
"step": 213
},
{
"epoch": 0.9553571428571429,
"grad_norm": 0.1785372495651245,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 214
},
{
"epoch": 0.9598214285714286,
"grad_norm": 0.1560213714838028,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 215
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.11964119970798492,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 216
},
{
"epoch": 0.96875,
"grad_norm": 0.11234603822231293,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 217
},
{
"epoch": 0.9732142857142857,
"grad_norm": 0.11584550887346268,
"learning_rate": 2e-05,
"loss": 0.0095,
"step": 218
},
{
"epoch": 0.9776785714285714,
"grad_norm": 0.04019077867269516,
"learning_rate": 2e-05,
"loss": 0.0048,
"step": 219
},
{
"epoch": 0.9821428571428571,
"grad_norm": 0.20271597802639008,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 220
},
{
"epoch": 0.9866071428571429,
"grad_norm": 0.11852418631315231,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 221
},
{
"epoch": 0.9910714285714286,
"grad_norm": 0.08768610656261444,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 222
},
{
"epoch": 0.9955357142857143,
"grad_norm": 0.2536848485469818,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 223
},
{
"epoch": 1.0,
"grad_norm": 0.19689269363880157,
"learning_rate": 2e-05,
"loss": 0.0095,
"step": 224
},
{
"epoch": 1.0044642857142858,
"grad_norm": 0.10952406376600266,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 225
},
{
"epoch": 1.0089285714285714,
"grad_norm": 0.19677336513996124,
"learning_rate": 2e-05,
"loss": 0.005,
"step": 226
},
{
"epoch": 1.0133928571428572,
"grad_norm": 0.12416896969079971,
"learning_rate": 2e-05,
"loss": 0.0057,
"step": 227
},
{
"epoch": 1.0178571428571428,
"grad_norm": 0.14256438612937927,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 228
},
{
"epoch": 1.0223214285714286,
"grad_norm": 0.17529906332492828,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 229
},
{
"epoch": 1.0267857142857142,
"grad_norm": 0.22035321593284607,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 230
},
{
"epoch": 1.03125,
"grad_norm": 0.21250584721565247,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 231
},
{
"epoch": 1.0357142857142858,
"grad_norm": 0.18901652097702026,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 232
},
{
"epoch": 1.0401785714285714,
"grad_norm": 0.19155669212341309,
"learning_rate": 2e-05,
"loss": 0.0117,
"step": 233
},
{
"epoch": 1.0446428571428572,
"grad_norm": 0.1865350306034088,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 234
},
{
"epoch": 1.0491071428571428,
"grad_norm": 0.15170250833034515,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 235
},
{
"epoch": 1.0535714285714286,
"grad_norm": 0.10518006980419159,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 236
},
{
"epoch": 1.0580357142857142,
"grad_norm": 0.2728783190250397,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 237
},
{
"epoch": 1.0625,
"grad_norm": 0.21080271899700165,
"learning_rate": 2e-05,
"loss": 0.0102,
"step": 238
},
{
"epoch": 1.0669642857142858,
"grad_norm": 0.13723178207874298,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 239
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.19935685396194458,
"learning_rate": 2e-05,
"loss": 0.0057,
"step": 240
},
{
"epoch": 1.0758928571428572,
"grad_norm": 0.12787075340747833,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 241
},
{
"epoch": 1.0803571428571428,
"grad_norm": 0.3154740035533905,
"learning_rate": 2e-05,
"loss": 0.0106,
"step": 242
},
{
"epoch": 1.0848214285714286,
"grad_norm": 0.1168612465262413,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 243
},
{
"epoch": 1.0892857142857142,
"grad_norm": 0.10894633084535599,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 244
},
{
"epoch": 1.09375,
"grad_norm": 0.1844814121723175,
"learning_rate": 2e-05,
"loss": 0.0103,
"step": 245
},
{
"epoch": 1.0982142857142858,
"grad_norm": 0.5986953377723694,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 246
},
{
"epoch": 1.1026785714285714,
"grad_norm": 0.23711133003234863,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 247
},
{
"epoch": 1.1071428571428572,
"grad_norm": 0.28520116209983826,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 248
},
{
"epoch": 1.1116071428571428,
"grad_norm": 0.2094752937555313,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 249
},
{
"epoch": 1.1160714285714286,
"grad_norm": 0.17041270434856415,
"learning_rate": 2e-05,
"loss": 0.0085,
"step": 250
},
{
"epoch": 1.1205357142857142,
"grad_norm": 0.26283618807792664,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 251
},
{
"epoch": 1.125,
"grad_norm": 0.40016844868659973,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 252
},
{
"epoch": 1.1294642857142858,
"grad_norm": 0.13586895167827606,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 253
},
{
"epoch": 1.1339285714285714,
"grad_norm": 0.26292648911476135,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 254
},
{
"epoch": 1.1383928571428572,
"grad_norm": 0.6343429088592529,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 255
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.1948457509279251,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 256
},
{
"epoch": 1.1473214285714286,
"grad_norm": 0.18401798605918884,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 257
},
{
"epoch": 1.1517857142857142,
"grad_norm": 0.4655361771583557,
"learning_rate": 2e-05,
"loss": 0.0138,
"step": 258
},
{
"epoch": 1.15625,
"grad_norm": 0.11816074699163437,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 259
},
{
"epoch": 1.1607142857142858,
"grad_norm": 0.11838362365961075,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 260
},
{
"epoch": 1.1651785714285714,
"grad_norm": 0.16618025302886963,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 261
},
{
"epoch": 1.1696428571428572,
"grad_norm": 0.1511249542236328,
"learning_rate": 2e-05,
"loss": 0.0103,
"step": 262
},
{
"epoch": 1.1741071428571428,
"grad_norm": 0.14847303926944733,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 263
},
{
"epoch": 1.1785714285714286,
"grad_norm": 0.26684054732322693,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 264
},
{
"epoch": 1.1830357142857142,
"grad_norm": 0.13314294815063477,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 265
},
{
"epoch": 1.1875,
"grad_norm": 0.16953180730342865,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 266
},
{
"epoch": 1.1919642857142858,
"grad_norm": 0.1619475930929184,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 267
},
{
"epoch": 1.1964285714285714,
"grad_norm": 0.19401992857456207,
"learning_rate": 2e-05,
"loss": 0.0103,
"step": 268
},
{
"epoch": 1.2008928571428572,
"grad_norm": 0.16010133922100067,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 269
},
{
"epoch": 1.2053571428571428,
"grad_norm": 0.20059579610824585,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 270
},
{
"epoch": 1.2098214285714286,
"grad_norm": 0.07786630094051361,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 271
},
{
"epoch": 1.2142857142857142,
"grad_norm": 0.0951201319694519,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 272
},
{
"epoch": 1.21875,
"grad_norm": 0.16951927542686462,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 273
},
{
"epoch": 1.2232142857142858,
"grad_norm": 0.1350620537996292,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 274
},
{
"epoch": 1.2276785714285714,
"grad_norm": 0.09529034048318863,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 275
},
{
"epoch": 1.2321428571428572,
"grad_norm": 0.18849754333496094,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 276
},
{
"epoch": 1.2366071428571428,
"grad_norm": 0.08065006136894226,
"learning_rate": 2e-05,
"loss": 0.0053,
"step": 277
},
{
"epoch": 1.2410714285714286,
"grad_norm": 0.12004493921995163,
"learning_rate": 2e-05,
"loss": 0.0095,
"step": 278
},
{
"epoch": 1.2455357142857142,
"grad_norm": 0.0798286497592926,
"learning_rate": 2e-05,
"loss": 0.0053,
"step": 279
},
{
"epoch": 1.25,
"grad_norm": 0.08489444851875305,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 280
},
{
"epoch": 1.2544642857142856,
"grad_norm": 0.17156155407428741,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 281
},
{
"epoch": 1.2589285714285714,
"grad_norm": 0.17118088901042938,
"learning_rate": 2e-05,
"loss": 0.0102,
"step": 282
},
{
"epoch": 1.2633928571428572,
"grad_norm": 0.224991574883461,
"learning_rate": 2e-05,
"loss": 0.0097,
"step": 283
},
{
"epoch": 1.2678571428571428,
"grad_norm": 0.11658413708209991,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 284
},
{
"epoch": 1.2723214285714286,
"grad_norm": 0.20273533463478088,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 285
},
{
"epoch": 1.2767857142857144,
"grad_norm": 0.09004299342632294,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 286
},
{
"epoch": 1.28125,
"grad_norm": 0.18204613029956818,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 287
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.049572091549634933,
"learning_rate": 2e-05,
"loss": 0.0049,
"step": 288
},
{
"epoch": 1.2901785714285714,
"grad_norm": 0.1771448701620102,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 289
},
{
"epoch": 1.2946428571428572,
"grad_norm": 0.13888761401176453,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 290
},
{
"epoch": 1.2991071428571428,
"grad_norm": 0.197177454829216,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 291
},
{
"epoch": 1.3035714285714286,
"grad_norm": 0.243514284491539,
"learning_rate": 2e-05,
"loss": 0.013,
"step": 292
},
{
"epoch": 1.3080357142857144,
"grad_norm": 0.09940832108259201,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 293
},
{
"epoch": 1.3125,
"grad_norm": 0.299482136964798,
"learning_rate": 2e-05,
"loss": 0.013,
"step": 294
},
{
"epoch": 1.3169642857142856,
"grad_norm": 0.17354537546634674,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 295
},
{
"epoch": 1.3214285714285714,
"grad_norm": 0.1544426530599594,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 296
},
{
"epoch": 1.3258928571428572,
"grad_norm": 0.15909938514232635,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 297
},
{
"epoch": 1.3303571428571428,
"grad_norm": 0.21977274119853973,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 298
},
{
"epoch": 1.3348214285714286,
"grad_norm": 0.3289298415184021,
"learning_rate": 2e-05,
"loss": 0.005,
"step": 299
},
{
"epoch": 1.3392857142857144,
"grad_norm": 0.22125479578971863,
"learning_rate": 2e-05,
"loss": 0.0101,
"step": 300
},
{
"epoch": 1.34375,
"grad_norm": 0.10905671864748001,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 301
},
{
"epoch": 1.3482142857142856,
"grad_norm": 0.3605917692184448,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 302
},
{
"epoch": 1.3526785714285714,
"grad_norm": 0.18675312399864197,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 303
},
{
"epoch": 1.3571428571428572,
"grad_norm": 0.23379294574260712,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 304
},
{
"epoch": 1.3616071428571428,
"grad_norm": 0.09766809642314911,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 305
},
{
"epoch": 1.3660714285714286,
"grad_norm": 0.11652177572250366,
"learning_rate": 2e-05,
"loss": 0.0101,
"step": 306
},
{
"epoch": 1.3705357142857144,
"grad_norm": 0.11609390377998352,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 307
},
{
"epoch": 1.375,
"grad_norm": 0.3085818290710449,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 308
},
{
"epoch": 1.3794642857142856,
"grad_norm": 0.30840498208999634,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 309
},
{
"epoch": 1.3839285714285714,
"grad_norm": 0.12295576184988022,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 310
},
{
"epoch": 1.3883928571428572,
"grad_norm": 0.2735302746295929,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 311
},
{
"epoch": 1.3928571428571428,
"grad_norm": 0.22401046752929688,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 312
},
{
"epoch": 1.3973214285714286,
"grad_norm": 0.2643716335296631,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 313
},
{
"epoch": 1.4017857142857144,
"grad_norm": 0.09197328239679337,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 314
},
{
"epoch": 1.40625,
"grad_norm": 0.15055829286575317,
"learning_rate": 2e-05,
"loss": 0.0115,
"step": 315
},
{
"epoch": 1.4107142857142856,
"grad_norm": 0.10229172557592392,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 316
},
{
"epoch": 1.4151785714285714,
"grad_norm": 0.1713918000459671,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 317
},
{
"epoch": 1.4196428571428572,
"grad_norm": 0.17321155965328217,
"learning_rate": 2e-05,
"loss": 0.0128,
"step": 318
},
{
"epoch": 1.4241071428571428,
"grad_norm": 0.14712217450141907,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 319
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.08568955212831497,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 320
},
{
"epoch": 1.4330357142857144,
"grad_norm": 0.10814769566059113,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 321
},
{
"epoch": 1.4375,
"grad_norm": 0.11376317590475082,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 322
},
{
"epoch": 1.4419642857142856,
"grad_norm": 0.09950172901153564,
"learning_rate": 2e-05,
"loss": 0.0055,
"step": 323
},
{
"epoch": 1.4464285714285714,
"grad_norm": 0.10877425968647003,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 324
},
{
"epoch": 1.4508928571428572,
"grad_norm": 0.16534090042114258,
"learning_rate": 2e-05,
"loss": 0.012,
"step": 325
},
{
"epoch": 1.4553571428571428,
"grad_norm": 0.11280371993780136,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 326
},
{
"epoch": 1.4598214285714286,
"grad_norm": 0.09798318147659302,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 327
},
{
"epoch": 1.4642857142857144,
"grad_norm": 0.21448449790477753,
"learning_rate": 2e-05,
"loss": 0.005,
"step": 328
},
{
"epoch": 1.46875,
"grad_norm": 0.11371182650327682,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 329
},
{
"epoch": 1.4732142857142856,
"grad_norm": 0.08604440838098526,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 330
},
{
"epoch": 1.4776785714285714,
"grad_norm": 0.19020172953605652,
"learning_rate": 2e-05,
"loss": 0.0095,
"step": 331
},
{
"epoch": 1.4821428571428572,
"grad_norm": 0.07287605851888657,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 332
},
{
"epoch": 1.4866071428571428,
"grad_norm": 0.23569829761981964,
"learning_rate": 2e-05,
"loss": 0.0165,
"step": 333
},
{
"epoch": 1.4910714285714286,
"grad_norm": 0.13371407985687256,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 334
},
{
"epoch": 1.4955357142857144,
"grad_norm": 0.2280549257993698,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 335
},
{
"epoch": 1.5,
"grad_norm": 0.09851525723934174,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 336
},
{
"epoch": 1.5044642857142856,
"grad_norm": 0.17804968357086182,
"learning_rate": 2e-05,
"loss": 0.0102,
"step": 337
},
{
"epoch": 1.5089285714285714,
"grad_norm": 0.08316322416067123,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 338
},
{
"epoch": 1.5133928571428572,
"grad_norm": 0.09718410670757294,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 339
},
{
"epoch": 1.5178571428571428,
"grad_norm": 0.20578241348266602,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 340
},
{
"epoch": 1.5223214285714286,
"grad_norm": 0.22264248132705688,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 341
},
{
"epoch": 1.5267857142857144,
"grad_norm": 0.13287998735904694,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 342
},
{
"epoch": 1.53125,
"grad_norm": 0.17487944662570953,
"learning_rate": 2e-05,
"loss": 0.0085,
"step": 343
},
{
"epoch": 1.5357142857142856,
"grad_norm": 0.34332460165023804,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 344
},
{
"epoch": 1.5401785714285714,
"grad_norm": 0.12836158275604248,
"learning_rate": 2e-05,
"loss": 0.0097,
"step": 345
},
{
"epoch": 1.5446428571428572,
"grad_norm": 0.21668285131454468,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 346
},
{
"epoch": 1.5491071428571428,
"grad_norm": 0.20517054200172424,
"learning_rate": 2e-05,
"loss": 0.0108,
"step": 347
},
{
"epoch": 1.5535714285714286,
"grad_norm": 0.3114481270313263,
"learning_rate": 2e-05,
"loss": 0.0111,
"step": 348
},
{
"epoch": 1.5580357142857144,
"grad_norm": 0.19148893654346466,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 349
},
{
"epoch": 1.5625,
"grad_norm": 0.07075007259845734,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 350
},
{
"epoch": 1.5669642857142856,
"grad_norm": 0.12846648693084717,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 351
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.1478446125984192,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 352
},
{
"epoch": 1.5758928571428572,
"grad_norm": 0.16469308733940125,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 353
},
{
"epoch": 1.5803571428571428,
"grad_norm": 0.07830236107110977,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 354
},
{
"epoch": 1.5848214285714286,
"grad_norm": 0.1975899487733841,
"learning_rate": 2e-05,
"loss": 0.0097,
"step": 355
},
{
"epoch": 1.5892857142857144,
"grad_norm": 0.0999147966504097,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 356
},
{
"epoch": 1.59375,
"grad_norm": 0.13135221600532532,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 357
},
{
"epoch": 1.5982142857142856,
"grad_norm": 0.15470464527606964,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 358
},
{
"epoch": 1.6026785714285714,
"grad_norm": 0.12623795866966248,
"learning_rate": 2e-05,
"loss": 0.0055,
"step": 359
},
{
"epoch": 1.6071428571428572,
"grad_norm": 0.14909787476062775,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 360
},
{
"epoch": 1.6116071428571428,
"grad_norm": 0.16480869054794312,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 361
},
{
"epoch": 1.6160714285714286,
"grad_norm": 0.2582751512527466,
"learning_rate": 2e-05,
"loss": 0.011,
"step": 362
},
{
"epoch": 1.6205357142857144,
"grad_norm": 0.11021137982606888,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 363
},
{
"epoch": 1.625,
"grad_norm": 0.07676777243614197,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 364
},
{
"epoch": 1.6294642857142856,
"grad_norm": 0.08494916558265686,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 365
},
{
"epoch": 1.6339285714285714,
"grad_norm": 0.10398717224597931,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 366
},
{
"epoch": 1.6383928571428572,
"grad_norm": 0.09390587359666824,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 367
},
{
"epoch": 1.6428571428571428,
"grad_norm": 0.13992364704608917,
"learning_rate": 2e-05,
"loss": 0.0085,
"step": 368
},
{
"epoch": 1.6473214285714286,
"grad_norm": 0.0727076306939125,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 369
},
{
"epoch": 1.6517857142857144,
"grad_norm": 0.0938880518078804,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 370
},
{
"epoch": 1.65625,
"grad_norm": 0.1349678784608841,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 371
},
{
"epoch": 1.6607142857142856,
"grad_norm": 0.13271740078926086,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 372
},
{
"epoch": 1.6651785714285714,
"grad_norm": 0.06232460215687752,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 373
},
{
"epoch": 1.6696428571428572,
"grad_norm": 0.06478999555110931,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 374
},
{
"epoch": 1.6741071428571428,
"grad_norm": 0.16400626301765442,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 375
},
{
"epoch": 1.6785714285714286,
"grad_norm": 0.0681205615401268,
"learning_rate": 2e-05,
"loss": 0.0051,
"step": 376
},
{
"epoch": 1.6830357142857144,
"grad_norm": 0.11032702773809433,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 377
},
{
"epoch": 1.6875,
"grad_norm": 0.14635053277015686,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 378
},
{
"epoch": 1.6919642857142856,
"grad_norm": 0.10891900956630707,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 379
},
{
"epoch": 1.6964285714285714,
"grad_norm": 0.1412774920463562,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 380
},
{
"epoch": 1.7008928571428572,
"grad_norm": 0.14827723801136017,
"learning_rate": 2e-05,
"loss": 0.0053,
"step": 381
},
{
"epoch": 1.7053571428571428,
"grad_norm": 0.07940851151943207,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 382
},
{
"epoch": 1.7098214285714286,
"grad_norm": 0.3140534460544586,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 383
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.09956244379281998,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 384
},
{
"epoch": 1.71875,
"grad_norm": 0.18810197710990906,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 385
},
{
"epoch": 1.7232142857142856,
"grad_norm": 0.2445448487997055,
"learning_rate": 2e-05,
"loss": 0.013,
"step": 386
},
{
"epoch": 1.7276785714285714,
"grad_norm": 0.19565074145793915,
"learning_rate": 2e-05,
"loss": 0.0106,
"step": 387
},
{
"epoch": 1.7321428571428572,
"grad_norm": 0.07408107817173004,
"learning_rate": 2e-05,
"loss": 0.0054,
"step": 388
},
{
"epoch": 1.7366071428571428,
"grad_norm": 0.06907396763563156,
"learning_rate": 2e-05,
"loss": 0.0055,
"step": 389
},
{
"epoch": 1.7410714285714286,
"grad_norm": 0.12801378965377808,
"learning_rate": 2e-05,
"loss": 0.0092,
"step": 390
},
{
"epoch": 1.7455357142857144,
"grad_norm": 0.06491488218307495,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 391
},
{
"epoch": 1.75,
"grad_norm": 0.11618059128522873,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 392
},
{
"epoch": 1.7544642857142856,
"grad_norm": 0.08376041054725647,
"learning_rate": 2e-05,
"loss": 0.0054,
"step": 393
},
{
"epoch": 1.7589285714285714,
"grad_norm": 0.10667941719293594,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 394
},
{
"epoch": 1.7633928571428572,
"grad_norm": 0.13104568421840668,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 395
},
{
"epoch": 1.7678571428571428,
"grad_norm": 0.09212382137775421,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 396
},
{
"epoch": 1.7723214285714286,
"grad_norm": 0.23598161339759827,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 397
},
{
"epoch": 1.7767857142857144,
"grad_norm": 0.0689774751663208,
"learning_rate": 2e-05,
"loss": 0.0054,
"step": 398
},
{
"epoch": 1.78125,
"grad_norm": 0.08331548422574997,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 399
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.08793813735246658,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 400
},
{
"epoch": 1.7901785714285714,
"grad_norm": 0.1980009227991104,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 401
},
{
"epoch": 1.7946428571428572,
"grad_norm": 0.11130912601947784,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 402
},
{
"epoch": 1.7991071428571428,
"grad_norm": 0.1447134166955948,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 403
},
{
"epoch": 1.8035714285714286,
"grad_norm": 0.12303169071674347,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 404
},
{
"epoch": 1.8080357142857144,
"grad_norm": 0.10215016454458237,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 405
},
{
"epoch": 1.8125,
"grad_norm": 0.08570464700460434,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 406
},
{
"epoch": 1.8169642857142856,
"grad_norm": 0.0914364829659462,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 407
},
{
"epoch": 1.8214285714285714,
"grad_norm": 0.22513145208358765,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 408
},
{
"epoch": 1.8258928571428572,
"grad_norm": 0.11378823965787888,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 409
},
{
"epoch": 1.8303571428571428,
"grad_norm": 0.09297342598438263,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 410
},
{
"epoch": 1.8348214285714286,
"grad_norm": 0.19639909267425537,
"learning_rate": 2e-05,
"loss": 0.0113,
"step": 411
},
{
"epoch": 1.8392857142857144,
"grad_norm": 0.19027334451675415,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 412
},
{
"epoch": 1.84375,
"grad_norm": 0.14595958590507507,
"learning_rate": 2e-05,
"loss": 0.0096,
"step": 413
},
{
"epoch": 1.8482142857142856,
"grad_norm": 0.15500158071517944,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 414
},
{
"epoch": 1.8526785714285714,
"grad_norm": 0.1046842709183693,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 415
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.17138102650642395,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 416
},
{
"epoch": 1.8616071428571428,
"grad_norm": 0.05790293961763382,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 417
},
{
"epoch": 1.8660714285714286,
"grad_norm": 0.09619224816560745,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 418
},
{
"epoch": 1.8705357142857144,
"grad_norm": 0.0759076327085495,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 419
},
{
"epoch": 1.875,
"grad_norm": 0.09200000762939453,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 420
},
{
"epoch": 1.8794642857142856,
"grad_norm": 0.10770270973443985,
"learning_rate": 2e-05,
"loss": 0.0057,
"step": 421
},
{
"epoch": 1.8839285714285714,
"grad_norm": 0.12750625610351562,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 422
},
{
"epoch": 1.8883928571428572,
"grad_norm": 0.07994697988033295,
"learning_rate": 2e-05,
"loss": 0.0054,
"step": 423
},
{
"epoch": 1.8928571428571428,
"grad_norm": 0.16716712713241577,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 424
},
{
"epoch": 1.8973214285714286,
"grad_norm": 0.10230574756860733,
"learning_rate": 2e-05,
"loss": 0.0098,
"step": 425
},
{
"epoch": 1.9017857142857144,
"grad_norm": 0.17903654277324677,
"learning_rate": 2e-05,
"loss": 0.0095,
"step": 426
},
{
"epoch": 1.90625,
"grad_norm": 0.12698078155517578,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 427
},
{
"epoch": 1.9107142857142856,
"grad_norm": 0.12924636900424957,
"learning_rate": 2e-05,
"loss": 0.0092,
"step": 428
},
{
"epoch": 1.9151785714285714,
"grad_norm": 0.07155907899141312,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 429
},
{
"epoch": 1.9196428571428572,
"grad_norm": 0.10363934934139252,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 430
},
{
"epoch": 1.9241071428571428,
"grad_norm": 0.11996760219335556,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 431
},
{
"epoch": 1.9285714285714286,
"grad_norm": 0.10494901239871979,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 432
},
{
"epoch": 1.9330357142857144,
"grad_norm": 0.18249598145484924,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 433
},
{
"epoch": 1.9375,
"grad_norm": 0.13870225846767426,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 434
},
{
"epoch": 1.9419642857142856,
"grad_norm": 0.09797409921884537,
"learning_rate": 2e-05,
"loss": 0.0052,
"step": 435
},
{
"epoch": 1.9464285714285714,
"grad_norm": 0.34259602427482605,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 436
},
{
"epoch": 1.9508928571428572,
"grad_norm": 0.16433046758174896,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 437
},
{
"epoch": 1.9553571428571428,
"grad_norm": 0.1122380718588829,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 438
},
{
"epoch": 1.9598214285714286,
"grad_norm": 0.09637662768363953,
"learning_rate": 2e-05,
"loss": 0.0057,
"step": 439
},
{
"epoch": 1.9642857142857144,
"grad_norm": 0.1375151425600052,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 440
},
{
"epoch": 1.96875,
"grad_norm": 0.1535470336675644,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 441
},
{
"epoch": 1.9732142857142856,
"grad_norm": 0.08874325454235077,
"learning_rate": 2e-05,
"loss": 0.006,
"step": 442
},
{
"epoch": 1.9776785714285714,
"grad_norm": 0.1507561206817627,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 443
},
{
"epoch": 1.9821428571428572,
"grad_norm": 0.10507656633853912,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 444
},
{
"epoch": 1.9866071428571428,
"grad_norm": 0.19875890016555786,
"learning_rate": 2e-05,
"loss": 0.0094,
"step": 445
},
{
"epoch": 1.9910714285714286,
"grad_norm": 0.09340199083089828,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 446
},
{
"epoch": 1.9955357142857144,
"grad_norm": 0.06378448009490967,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 447
},
{
"epoch": 2.0,
"grad_norm": 0.07624057680368423,
"learning_rate": 2e-05,
"loss": 0.006,
"step": 448
},
{
"epoch": 2.0044642857142856,
"grad_norm": 0.11424339562654495,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 449
},
{
"epoch": 2.0089285714285716,
"grad_norm": 0.1132059097290039,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 450
},
{
"epoch": 2.013392857142857,
"grad_norm": 0.06459866464138031,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 451
},
{
"epoch": 2.017857142857143,
"grad_norm": 0.05782695114612579,
"learning_rate": 2e-05,
"loss": 0.0052,
"step": 452
},
{
"epoch": 2.0223214285714284,
"grad_norm": 0.09242033213376999,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 453
},
{
"epoch": 2.0267857142857144,
"grad_norm": 0.07749675959348679,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 454
},
{
"epoch": 2.03125,
"grad_norm": 0.18795126676559448,
"learning_rate": 2e-05,
"loss": 0.0056,
"step": 455
},
{
"epoch": 2.0357142857142856,
"grad_norm": 0.08309225738048553,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 456
},
{
"epoch": 2.0401785714285716,
"grad_norm": 0.07069497555494308,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 457
},
{
"epoch": 2.044642857142857,
"grad_norm": 0.17089472711086273,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 458
},
{
"epoch": 2.049107142857143,
"grad_norm": 0.15188992023468018,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 459
},
{
"epoch": 2.0535714285714284,
"grad_norm": 0.16761493682861328,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 460
},
{
"epoch": 2.0580357142857144,
"grad_norm": 0.06758573651313782,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 461
},
{
"epoch": 2.0625,
"grad_norm": 0.05804857611656189,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 462
},
{
"epoch": 2.0669642857142856,
"grad_norm": 0.0982503667473793,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 463
},
{
"epoch": 2.0714285714285716,
"grad_norm": 0.16003400087356567,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 464
},
{
"epoch": 2.075892857142857,
"grad_norm": 0.0709138959646225,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 465
},
{
"epoch": 2.080357142857143,
"grad_norm": 0.1403375118970871,
"learning_rate": 2e-05,
"loss": 0.009,
"step": 466
},
{
"epoch": 2.0848214285714284,
"grad_norm": 0.103029765188694,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 467
},
{
"epoch": 2.0892857142857144,
"grad_norm": 0.06266972422599792,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 468
},
{
"epoch": 2.09375,
"grad_norm": 0.09076008945703506,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 469
},
{
"epoch": 2.0982142857142856,
"grad_norm": 0.04648640751838684,
"learning_rate": 2e-05,
"loss": 0.0061,
"step": 470
},
{
"epoch": 2.1026785714285716,
"grad_norm": 0.1504824310541153,
"learning_rate": 2e-05,
"loss": 0.0117,
"step": 471
},
{
"epoch": 2.107142857142857,
"grad_norm": 0.0596964955329895,
"learning_rate": 2e-05,
"loss": 0.0053,
"step": 472
},
{
"epoch": 2.111607142857143,
"grad_norm": 0.11864890903234482,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 473
},
{
"epoch": 2.1160714285714284,
"grad_norm": 0.12213343381881714,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 474
},
{
"epoch": 2.1205357142857144,
"grad_norm": 0.11169180274009705,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 475
},
{
"epoch": 2.125,
"grad_norm": 0.1022031381726265,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 476
},
{
"epoch": 2.1294642857142856,
"grad_norm": 0.07049895823001862,
"learning_rate": 2e-05,
"loss": 0.0056,
"step": 477
},
{
"epoch": 2.1339285714285716,
"grad_norm": 0.11264973878860474,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 478
},
{
"epoch": 2.138392857142857,
"grad_norm": 0.07296457141637802,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 479
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.11134887486696243,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 480
},
{
"epoch": 2.1473214285714284,
"grad_norm": 0.06486133486032486,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 481
},
{
"epoch": 2.1517857142857144,
"grad_norm": 0.0773395225405693,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 482
},
{
"epoch": 2.15625,
"grad_norm": 0.13982515037059784,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 483
},
{
"epoch": 2.1607142857142856,
"grad_norm": 0.10648379474878311,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 484
},
{
"epoch": 2.1651785714285716,
"grad_norm": 0.10965216904878616,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 485
},
{
"epoch": 2.169642857142857,
"grad_norm": 0.1464068591594696,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 486
},
{
"epoch": 2.174107142857143,
"grad_norm": 0.1061873659491539,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 487
},
{
"epoch": 2.1785714285714284,
"grad_norm": 0.061869096010923386,
"learning_rate": 2e-05,
"loss": 0.0054,
"step": 488
},
{
"epoch": 2.1830357142857144,
"grad_norm": 0.09782952070236206,
"learning_rate": 2e-05,
"loss": 0.0092,
"step": 489
},
{
"epoch": 2.1875,
"grad_norm": 0.08471392840147018,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 490
},
{
"epoch": 2.1919642857142856,
"grad_norm": 0.10430457442998886,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 491
},
{
"epoch": 2.1964285714285716,
"grad_norm": 0.07273336499929428,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 492
},
{
"epoch": 2.200892857142857,
"grad_norm": 0.11176946014165878,
"learning_rate": 2e-05,
"loss": 0.008,
"step": 493
},
{
"epoch": 2.205357142857143,
"grad_norm": 0.06163870543241501,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 494
},
{
"epoch": 2.2098214285714284,
"grad_norm": 0.10791970789432526,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 495
},
{
"epoch": 2.2142857142857144,
"grad_norm": 0.20584189891815186,
"learning_rate": 2e-05,
"loss": 0.0058,
"step": 496
},
{
"epoch": 2.21875,
"grad_norm": 0.06841476261615753,
"learning_rate": 2e-05,
"loss": 0.0057,
"step": 497
},
{
"epoch": 2.2232142857142856,
"grad_norm": 0.08570678532123566,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 498
},
{
"epoch": 2.2276785714285716,
"grad_norm": 0.056776780635118484,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 499
},
{
"epoch": 2.232142857142857,
"grad_norm": 0.09146109968423843,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 500
},
{
"epoch": 2.236607142857143,
"grad_norm": 0.09654641896486282,
"learning_rate": 2e-05,
"loss": 0.0047,
"step": 501
},
{
"epoch": 2.2410714285714284,
"grad_norm": 0.13811850547790527,
"learning_rate": 2e-05,
"loss": 0.0098,
"step": 502
},
{
"epoch": 2.2455357142857144,
"grad_norm": 0.10810638964176178,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 503
},
{
"epoch": 2.25,
"grad_norm": 0.21392522752285004,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 504
},
{
"epoch": 2.2544642857142856,
"grad_norm": 0.11717508733272552,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 505
},
{
"epoch": 2.2589285714285716,
"grad_norm": 0.05745083466172218,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 506
},
{
"epoch": 2.263392857142857,
"grad_norm": 0.13906385004520416,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 507
},
{
"epoch": 2.267857142857143,
"grad_norm": 0.20656032860279083,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 508
},
{
"epoch": 2.2723214285714284,
"grad_norm": 0.11236809194087982,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 509
},
{
"epoch": 2.2767857142857144,
"grad_norm": 0.08433755487203598,
"learning_rate": 2e-05,
"loss": 0.0049,
"step": 510
},
{
"epoch": 2.28125,
"grad_norm": 0.1257009208202362,
"learning_rate": 2e-05,
"loss": 0.0098,
"step": 511
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.08553122729063034,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 512
},
{
"epoch": 2.2901785714285716,
"grad_norm": 0.13585498929023743,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 513
},
{
"epoch": 2.294642857142857,
"grad_norm": 0.08864466845989227,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 514
},
{
"epoch": 2.299107142857143,
"grad_norm": 0.11119361966848373,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 515
},
{
"epoch": 2.3035714285714284,
"grad_norm": 0.07501739263534546,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 516
},
{
"epoch": 2.3080357142857144,
"grad_norm": 0.1268368661403656,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 517
},
{
"epoch": 2.3125,
"grad_norm": 0.07790718972682953,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 518
},
{
"epoch": 2.3169642857142856,
"grad_norm": 0.11105362325906754,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 519
},
{
"epoch": 2.3214285714285716,
"grad_norm": 0.07915301620960236,
"learning_rate": 2e-05,
"loss": 0.0069,
"step": 520
},
{
"epoch": 2.325892857142857,
"grad_norm": 0.08192173391580582,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 521
},
{
"epoch": 2.330357142857143,
"grad_norm": 0.1169026792049408,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 522
},
{
"epoch": 2.3348214285714284,
"grad_norm": 0.21077628433704376,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 523
},
{
"epoch": 2.3392857142857144,
"grad_norm": 0.07853112369775772,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 524
},
{
"epoch": 2.34375,
"grad_norm": 0.13631939888000488,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 525
},
{
"epoch": 2.3482142857142856,
"grad_norm": 0.08369535207748413,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 526
},
{
"epoch": 2.3526785714285716,
"grad_norm": 0.055638108402490616,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 527
},
{
"epoch": 2.357142857142857,
"grad_norm": 0.06996078044176102,
"learning_rate": 2e-05,
"loss": 0.0062,
"step": 528
},
{
"epoch": 2.361607142857143,
"grad_norm": 0.08065176755189896,
"learning_rate": 2e-05,
"loss": 0.0061,
"step": 529
},
{
"epoch": 2.3660714285714284,
"grad_norm": 0.12515677511692047,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 530
},
{
"epoch": 2.3705357142857144,
"grad_norm": 0.1157045066356659,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 531
},
{
"epoch": 2.375,
"grad_norm": 0.11465250700712204,
"learning_rate": 2e-05,
"loss": 0.0071,
"step": 532
},
{
"epoch": 2.3794642857142856,
"grad_norm": 0.07074079662561417,
"learning_rate": 2e-05,
"loss": 0.0043,
"step": 533
},
{
"epoch": 2.3839285714285716,
"grad_norm": 0.09491592645645142,
"learning_rate": 2e-05,
"loss": 0.0053,
"step": 534
},
{
"epoch": 2.388392857142857,
"grad_norm": 0.09677135199308395,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 535
},
{
"epoch": 2.392857142857143,
"grad_norm": 0.12837085127830505,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 536
},
{
"epoch": 2.3973214285714284,
"grad_norm": 0.1667841225862503,
"learning_rate": 2e-05,
"loss": 0.0082,
"step": 537
},
{
"epoch": 2.4017857142857144,
"grad_norm": 0.1354755461215973,
"learning_rate": 2e-05,
"loss": 0.0096,
"step": 538
},
{
"epoch": 2.40625,
"grad_norm": 0.17251300811767578,
"learning_rate": 2e-05,
"loss": 0.0085,
"step": 539
},
{
"epoch": 2.4107142857142856,
"grad_norm": 0.12392369657754898,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 540
},
{
"epoch": 2.4151785714285716,
"grad_norm": 0.10283850133419037,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 541
},
{
"epoch": 2.419642857142857,
"grad_norm": 0.22526662051677704,
"learning_rate": 2e-05,
"loss": 0.0095,
"step": 542
},
{
"epoch": 2.424107142857143,
"grad_norm": 0.16849340498447418,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 543
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.1429499238729477,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 544
},
{
"epoch": 2.4330357142857144,
"grad_norm": 0.13862621784210205,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 545
},
{
"epoch": 2.4375,
"grad_norm": 0.1709546595811844,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 546
},
{
"epoch": 2.4419642857142856,
"grad_norm": 0.08103591948747635,
"learning_rate": 2e-05,
"loss": 0.0076,
"step": 547
},
{
"epoch": 2.4464285714285716,
"grad_norm": 0.09880509972572327,
"learning_rate": 2e-05,
"loss": 0.007,
"step": 548
},
{
"epoch": 2.450892857142857,
"grad_norm": 0.11954550445079803,
"learning_rate": 2e-05,
"loss": 0.0065,
"step": 549
},
{
"epoch": 2.455357142857143,
"grad_norm": 0.20056311786174774,
"learning_rate": 2e-05,
"loss": 0.0111,
"step": 550
},
{
"epoch": 2.4598214285714284,
"grad_norm": 0.08780152350664139,
"learning_rate": 2e-05,
"loss": 0.0087,
"step": 551
},
{
"epoch": 2.4642857142857144,
"grad_norm": 0.526067852973938,
"learning_rate": 2e-05,
"loss": 0.0063,
"step": 552
},
{
"epoch": 2.46875,
"grad_norm": 3.777337074279785,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 553
},
{
"epoch": 2.4732142857142856,
"grad_norm": 1.3595633506774902,
"learning_rate": 2e-05,
"loss": 0.0283,
"step": 554
},
{
"epoch": 2.4776785714285716,
"grad_norm": 2.552967071533203,
"learning_rate": 2e-05,
"loss": 0.0405,
"step": 555
},
{
"epoch": 2.482142857142857,
"grad_norm": 1.628051996231079,
"learning_rate": 2e-05,
"loss": 0.0271,
"step": 556
},
{
"epoch": 2.486607142857143,
"grad_norm": 1.537842869758606,
"learning_rate": 2e-05,
"loss": 0.0432,
"step": 557
},
{
"epoch": 2.4910714285714284,
"grad_norm": 1.3012171983718872,
"learning_rate": 2e-05,
"loss": 0.025,
"step": 558
},
{
"epoch": 2.4955357142857144,
"grad_norm": 1.285781741142273,
"learning_rate": 2e-05,
"loss": 0.0254,
"step": 559
},
{
"epoch": 2.5,
"grad_norm": 2.0242481231689453,
"learning_rate": 2e-05,
"loss": 0.0153,
"step": 560
},
{
"epoch": 2.5044642857142856,
"grad_norm": 6.9972357749938965,
"learning_rate": 2e-05,
"loss": 0.1738,
"step": 561
},
{
"epoch": 2.508928571428571,
"grad_norm": 1.1082912683486938,
"learning_rate": 2e-05,
"loss": 0.0221,
"step": 562
},
{
"epoch": 2.513392857142857,
"grad_norm": 3.411374092102051,
"learning_rate": 2e-05,
"loss": 0.0272,
"step": 563
},
{
"epoch": 2.517857142857143,
"grad_norm": 10.052404403686523,
"learning_rate": 2e-05,
"loss": 0.0897,
"step": 564
},
{
"epoch": 2.522321428571429,
"grad_norm": 2.627420663833618,
"learning_rate": 2e-05,
"loss": 0.0292,
"step": 565
},
{
"epoch": 2.5267857142857144,
"grad_norm": 3.180161237716675,
"learning_rate": 2e-05,
"loss": 0.034,
"step": 566
},
{
"epoch": 2.53125,
"grad_norm": 4.870179653167725,
"learning_rate": 2e-05,
"loss": 0.1194,
"step": 567
},
{
"epoch": 2.5357142857142856,
"grad_norm": 4.233872890472412,
"learning_rate": 2e-05,
"loss": 0.1097,
"step": 568
},
{
"epoch": 2.540178571428571,
"grad_norm": 1.2434048652648926,
"learning_rate": 2e-05,
"loss": 0.0258,
"step": 569
},
{
"epoch": 2.544642857142857,
"grad_norm": 2.1450345516204834,
"learning_rate": 2e-05,
"loss": 0.0845,
"step": 570
},
{
"epoch": 2.549107142857143,
"grad_norm": 1.3749841451644897,
"learning_rate": 2e-05,
"loss": 0.0326,
"step": 571
},
{
"epoch": 2.553571428571429,
"grad_norm": 0.5364799499511719,
"learning_rate": 2e-05,
"loss": 0.0141,
"step": 572
},
{
"epoch": 2.5580357142857144,
"grad_norm": 0.25783586502075195,
"learning_rate": 2e-05,
"loss": 0.0102,
"step": 573
},
{
"epoch": 2.5625,
"grad_norm": 0.31643831729888916,
"learning_rate": 2e-05,
"loss": 0.0156,
"step": 574
},
{
"epoch": 2.5669642857142856,
"grad_norm": 0.16987808048725128,
"learning_rate": 2e-05,
"loss": 0.0106,
"step": 575
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.13765154778957367,
"learning_rate": 2e-05,
"loss": 0.0097,
"step": 576
},
{
"epoch": 2.575892857142857,
"grad_norm": 0.1949804574251175,
"learning_rate": 2e-05,
"loss": 0.0122,
"step": 577
},
{
"epoch": 2.580357142857143,
"grad_norm": 0.1344509869813919,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 578
},
{
"epoch": 2.584821428571429,
"grad_norm": 0.16776832938194275,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 579
},
{
"epoch": 2.5892857142857144,
"grad_norm": 0.17442850768566132,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 580
},
{
"epoch": 2.59375,
"grad_norm": 0.30235546827316284,
"learning_rate": 2e-05,
"loss": 0.0195,
"step": 581
},
{
"epoch": 2.5982142857142856,
"grad_norm": 0.14607034623622894,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 582
},
{
"epoch": 2.602678571428571,
"grad_norm": 0.23565685749053955,
"learning_rate": 2e-05,
"loss": 0.0089,
"step": 583
},
{
"epoch": 2.607142857142857,
"grad_norm": 0.15442737936973572,
"learning_rate": 2e-05,
"loss": 0.0056,
"step": 584
},
{
"epoch": 2.611607142857143,
"grad_norm": 0.08780539035797119,
"learning_rate": 2e-05,
"loss": 0.0066,
"step": 585
},
{
"epoch": 2.616071428571429,
"grad_norm": 0.21069885790348053,
"learning_rate": 2e-05,
"loss": 0.0119,
"step": 586
},
{
"epoch": 2.6205357142857144,
"grad_norm": 0.07902006804943085,
"learning_rate": 2e-05,
"loss": 0.0052,
"step": 587
},
{
"epoch": 2.625,
"grad_norm": 0.18614117801189423,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 588
},
{
"epoch": 2.6294642857142856,
"grad_norm": 0.21328520774841309,
"learning_rate": 2e-05,
"loss": 0.009,
"step": 589
},
{
"epoch": 2.633928571428571,
"grad_norm": 0.13880658149719238,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 590
},
{
"epoch": 2.638392857142857,
"grad_norm": 1.2935315370559692,
"learning_rate": 2e-05,
"loss": 0.0163,
"step": 591
},
{
"epoch": 2.642857142857143,
"grad_norm": 0.2837115526199341,
"learning_rate": 2e-05,
"loss": 0.0127,
"step": 592
},
{
"epoch": 2.647321428571429,
"grad_norm": 0.908935010433197,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 593
},
{
"epoch": 2.6517857142857144,
"grad_norm": 1.0788531303405762,
"learning_rate": 2e-05,
"loss": 0.0163,
"step": 594
},
{
"epoch": 2.65625,
"grad_norm": 3.155200242996216,
"learning_rate": 2e-05,
"loss": 0.0355,
"step": 595
},
{
"epoch": 2.6607142857142856,
"grad_norm": 6.767505168914795,
"learning_rate": 2e-05,
"loss": 0.0595,
"step": 596
},
{
"epoch": 2.665178571428571,
"grad_norm": 1.3904411792755127,
"learning_rate": 2e-05,
"loss": 0.0465,
"step": 597
},
{
"epoch": 2.669642857142857,
"grad_norm": 5.910096645355225,
"learning_rate": 2e-05,
"loss": 0.0229,
"step": 598
},
{
"epoch": 2.674107142857143,
"grad_norm": 1.484846591949463,
"learning_rate": 2e-05,
"loss": 0.0302,
"step": 599
},
{
"epoch": 2.678571428571429,
"grad_norm": 0.35779526829719543,
"learning_rate": 2e-05,
"loss": 0.0131,
"step": 600
},
{
"epoch": 2.6830357142857144,
"grad_norm": 0.7565224766731262,
"learning_rate": 2e-05,
"loss": 0.0135,
"step": 601
},
{
"epoch": 2.6875,
"grad_norm": 0.5273329019546509,
"learning_rate": 2e-05,
"loss": 0.021,
"step": 602
},
{
"epoch": 2.6919642857142856,
"grad_norm": 0.23534627258777618,
"learning_rate": 2e-05,
"loss": 0.0127,
"step": 603
},
{
"epoch": 2.696428571428571,
"grad_norm": 4.416169166564941,
"learning_rate": 2e-05,
"loss": 0.0172,
"step": 604
},
{
"epoch": 2.700892857142857,
"grad_norm": 0.26013216376304626,
"learning_rate": 2e-05,
"loss": 0.0107,
"step": 605
},
{
"epoch": 2.705357142857143,
"grad_norm": 4.598071575164795,
"learning_rate": 2e-05,
"loss": 0.0608,
"step": 606
},
{
"epoch": 2.709821428571429,
"grad_norm": 2.953446865081787,
"learning_rate": 2e-05,
"loss": 0.0273,
"step": 607
},
{
"epoch": 2.7142857142857144,
"grad_norm": 1.1341112852096558,
"learning_rate": 2e-05,
"loss": 0.0165,
"step": 608
},
{
"epoch": 2.71875,
"grad_norm": 0.1598537564277649,
"learning_rate": 2e-05,
"loss": 0.011,
"step": 609
},
{
"epoch": 2.7232142857142856,
"grad_norm": 0.24823372066020966,
"learning_rate": 2e-05,
"loss": 0.0096,
"step": 610
},
{
"epoch": 2.727678571428571,
"grad_norm": 0.775355875492096,
"learning_rate": 2e-05,
"loss": 0.0085,
"step": 611
},
{
"epoch": 2.732142857142857,
"grad_norm": 0.08136750012636185,
"learning_rate": 2e-05,
"loss": 0.0088,
"step": 612
},
{
"epoch": 2.736607142857143,
"grad_norm": 9.017096519470215,
"learning_rate": 2e-05,
"loss": 0.0224,
"step": 613
},
{
"epoch": 2.741071428571429,
"grad_norm": 0.416096955537796,
"learning_rate": 2e-05,
"loss": 0.0108,
"step": 614
},
{
"epoch": 2.7455357142857144,
"grad_norm": 10.557942390441895,
"learning_rate": 2e-05,
"loss": 0.0159,
"step": 615
},
{
"epoch": 2.75,
"grad_norm": 1.6279311180114746,
"learning_rate": 2e-05,
"loss": 0.0752,
"step": 616
},
{
"epoch": 2.7544642857142856,
"grad_norm": 1.5116238594055176,
"learning_rate": 2e-05,
"loss": 0.0344,
"step": 617
},
{
"epoch": 2.758928571428571,
"grad_norm": 0.34003084897994995,
"learning_rate": 2e-05,
"loss": 0.0101,
"step": 618
},
{
"epoch": 2.763392857142857,
"grad_norm": 0.8232696056365967,
"learning_rate": 2e-05,
"loss": 0.0162,
"step": 619
},
{
"epoch": 2.767857142857143,
"grad_norm": 0.1500774621963501,
"learning_rate": 2e-05,
"loss": 0.0104,
"step": 620
},
{
"epoch": 2.772321428571429,
"grad_norm": 7.253946781158447,
"learning_rate": 2e-05,
"loss": 0.0323,
"step": 621
},
{
"epoch": 2.7767857142857144,
"grad_norm": 0.3118407428264618,
"learning_rate": 2e-05,
"loss": 0.014,
"step": 622
},
{
"epoch": 2.78125,
"grad_norm": 7.626855373382568,
"learning_rate": 2e-05,
"loss": 0.0127,
"step": 623
},
{
"epoch": 2.7857142857142856,
"grad_norm": 1.0912909507751465,
"learning_rate": 2e-05,
"loss": 0.0254,
"step": 624
},
{
"epoch": 2.790178571428571,
"grad_norm": 0.22835303843021393,
"learning_rate": 2e-05,
"loss": 0.0145,
"step": 625
},
{
"epoch": 2.794642857142857,
"grad_norm": 6.9012908935546875,
"learning_rate": 2e-05,
"loss": 0.0444,
"step": 626
},
{
"epoch": 2.799107142857143,
"grad_norm": 0.27794891595840454,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 627
},
{
"epoch": 2.803571428571429,
"grad_norm": 0.15842191874980927,
"learning_rate": 2e-05,
"loss": 0.0075,
"step": 628
},
{
"epoch": 2.8080357142857144,
"grad_norm": 67.39225769042969,
"learning_rate": 2e-05,
"loss": 0.025,
"step": 629
},
{
"epoch": 2.8125,
"grad_norm": 0.18912476301193237,
"learning_rate": 2e-05,
"loss": 0.011,
"step": 630
},
{
"epoch": 2.8169642857142856,
"grad_norm": 0.36825433373451233,
"learning_rate": 2e-05,
"loss": 0.009,
"step": 631
},
{
"epoch": 2.821428571428571,
"grad_norm": 0.22129683196544647,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 632
},
{
"epoch": 2.825892857142857,
"grad_norm": 0.1257479190826416,
"learning_rate": 2e-05,
"loss": 0.0064,
"step": 633
},
{
"epoch": 2.830357142857143,
"grad_norm": 0.21349452435970306,
"learning_rate": 2e-05,
"loss": 0.0111,
"step": 634
},
{
"epoch": 2.834821428571429,
"grad_norm": 0.09613204002380371,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 635
},
{
"epoch": 2.8392857142857144,
"grad_norm": 0.1867591142654419,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 636
},
{
"epoch": 2.84375,
"grad_norm": 0.3712433874607086,
"learning_rate": 2e-05,
"loss": 0.0151,
"step": 637
},
{
"epoch": 2.8482142857142856,
"grad_norm": 0.15406033396720886,
"learning_rate": 2e-05,
"loss": 0.0077,
"step": 638
},
{
"epoch": 2.852678571428571,
"grad_norm": 0.3927120566368103,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 639
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.1449354887008667,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 640
},
{
"epoch": 2.861607142857143,
"grad_norm": 8.02448558807373,
"learning_rate": 2e-05,
"loss": 0.0109,
"step": 641
},
{
"epoch": 2.866071428571429,
"grad_norm": 0.09204132109880447,
"learning_rate": 2e-05,
"loss": 0.0059,
"step": 642
},
{
"epoch": 2.8705357142857144,
"grad_norm": 0.1463308483362198,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 643
},
{
"epoch": 2.875,
"grad_norm": 0.11871679872274399,
"learning_rate": 2e-05,
"loss": 0.0072,
"step": 644
},
{
"epoch": 2.8794642857142856,
"grad_norm": 0.21967895328998566,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 645
},
{
"epoch": 2.883928571428571,
"grad_norm": 1.2042790651321411,
"learning_rate": 2e-05,
"loss": 0.0138,
"step": 646
},
{
"epoch": 2.888392857142857,
"grad_norm": 0.12121184170246124,
"learning_rate": 2e-05,
"loss": 0.0113,
"step": 647
},
{
"epoch": 2.892857142857143,
"grad_norm": 0.11416534334421158,
"learning_rate": 2e-05,
"loss": 0.0099,
"step": 648
},
{
"epoch": 2.897321428571429,
"grad_norm": 0.11109648644924164,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 649
},
{
"epoch": 2.9017857142857144,
"grad_norm": 0.09570077806711197,
"learning_rate": 2e-05,
"loss": 0.0067,
"step": 650
},
{
"epoch": 2.90625,
"grad_norm": 0.155325785279274,
"learning_rate": 2e-05,
"loss": 0.0079,
"step": 651
},
{
"epoch": 2.9107142857142856,
"grad_norm": 0.11843915283679962,
"learning_rate": 2e-05,
"loss": 0.01,
"step": 652
},
{
"epoch": 2.915178571428571,
"grad_norm": 0.1322368085384369,
"learning_rate": 2e-05,
"loss": 0.0081,
"step": 653
},
{
"epoch": 2.919642857142857,
"grad_norm": 0.11188462376594543,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 654
},
{
"epoch": 2.924107142857143,
"grad_norm": 0.1739046424627304,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 655
},
{
"epoch": 2.928571428571429,
"grad_norm": 0.09202327579259872,
"learning_rate": 2e-05,
"loss": 0.0084,
"step": 656
},
{
"epoch": 2.9330357142857144,
"grad_norm": 0.13602964580059052,
"learning_rate": 2e-05,
"loss": 0.0086,
"step": 657
},
{
"epoch": 2.9375,
"grad_norm": 0.1424887776374817,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 658
},
{
"epoch": 2.9419642857142856,
"grad_norm": 0.11014612764120102,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 659
},
{
"epoch": 2.946428571428571,
"grad_norm": 0.08697912842035294,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 660
},
{
"epoch": 2.950892857142857,
"grad_norm": 0.11514980345964432,
"learning_rate": 2e-05,
"loss": 0.0078,
"step": 661
},
{
"epoch": 2.955357142857143,
"grad_norm": 0.11393480747938156,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 662
},
{
"epoch": 2.959821428571429,
"grad_norm": 0.2073274850845337,
"learning_rate": 2e-05,
"loss": 0.0116,
"step": 663
},
{
"epoch": 2.9642857142857144,
"grad_norm": 0.11760195344686508,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 664
},
{
"epoch": 2.96875,
"grad_norm": 0.12061617523431778,
"learning_rate": 2e-05,
"loss": 0.0074,
"step": 665
},
{
"epoch": 2.9732142857142856,
"grad_norm": 0.11407007277011871,
"learning_rate": 2e-05,
"loss": 0.0073,
"step": 666
},
{
"epoch": 2.977678571428571,
"grad_norm": 0.06950999796390533,
"learning_rate": 2e-05,
"loss": 0.0057,
"step": 667
},
{
"epoch": 2.982142857142857,
"grad_norm": 0.2019437998533249,
"learning_rate": 2e-05,
"loss": 0.0112,
"step": 668
},
{
"epoch": 2.986607142857143,
"grad_norm": 0.10581085830926895,
"learning_rate": 2e-05,
"loss": 0.0056,
"step": 669
},
{
"epoch": 2.991071428571429,
"grad_norm": 0.20939861238002777,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 670
},
{
"epoch": 2.9955357142857144,
"grad_norm": 0.09610752016305923,
"learning_rate": 2e-05,
"loss": 0.0093,
"step": 671
},
{
"epoch": 3.0,
"grad_norm": 0.1326116919517517,
"learning_rate": 2e-05,
"loss": 0.0091,
"step": 672
},
{
"epoch": 3.0,
"step": 672,
"total_flos": 54480040427520.0,
"train_loss": 0.0410676886753035,
"train_runtime": 14462.764,
"train_samples_per_second": 2.968,
"train_steps_per_second": 0.046
}
],
"logging_steps": 1.0,
"max_steps": 672,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 54480040427520.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}