JW17's picture
Add files using upload-large-folder tool
39b54c1 verified
raw
history blame
113 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"Batch Mean": -3.4444580078125,
"accuracy": 0.40625,
"epoch": 0,
"step": 0
},
{
"epoch": 0.0025,
"grad_norm": 43.31455612182617,
"learning_rate": 1.5000000000000002e-07,
"loss": 0.9077,
"step": 1
},
{
"Batch Mean": -3.43255615234375,
"accuracy": 0.4921875,
"epoch": 0.0025,
"step": 1
},
{
"epoch": 0.005,
"grad_norm": 50.06682205200195,
"learning_rate": 3.0000000000000004e-07,
"loss": 0.8907,
"step": 2
},
{
"Batch Mean": -3.44775390625,
"accuracy": 0.453125,
"epoch": 0.005,
"step": 2
},
{
"epoch": 0.0075,
"grad_norm": 47.389522552490234,
"learning_rate": 4.5e-07,
"loss": 0.8899,
"step": 3
},
{
"Batch Mean": -3.4224853515625,
"accuracy": 0.4375,
"epoch": 0.0075,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 49.24280548095703,
"learning_rate": 6.000000000000001e-07,
"loss": 0.8833,
"step": 4
},
{
"Batch Mean": -3.39697265625,
"accuracy": 0.5234375,
"epoch": 0.01,
"step": 4
},
{
"epoch": 0.0125,
"grad_norm": 44.11930847167969,
"learning_rate": 7.5e-07,
"loss": 0.8661,
"step": 5
},
{
"Batch Mean": -3.38104248046875,
"accuracy": 0.5078125,
"epoch": 0.0125,
"step": 5
},
{
"epoch": 0.015,
"grad_norm": 44.9741325378418,
"learning_rate": 9e-07,
"loss": 0.8805,
"step": 6
},
{
"Batch Mean": -3.19671630859375,
"accuracy": 0.5078125,
"epoch": 0.015,
"step": 6
},
{
"epoch": 0.0175,
"grad_norm": 42.32276153564453,
"learning_rate": 1.05e-06,
"loss": 0.8547,
"step": 7
},
{
"Batch Mean": -3.1279296875,
"accuracy": 0.4921875,
"epoch": 0.0175,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 44.22520065307617,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.8464,
"step": 8
},
{
"Batch Mean": -2.520538330078125,
"accuracy": 0.5,
"epoch": 0.02,
"step": 8
},
{
"epoch": 0.0225,
"grad_norm": 36.41325378417969,
"learning_rate": 1.35e-06,
"loss": 0.7946,
"step": 9
},
{
"Batch Mean": -2.413787841796875,
"accuracy": 0.515625,
"epoch": 0.0225,
"step": 9
},
{
"epoch": 0.025,
"grad_norm": 31.74874496459961,
"learning_rate": 1.5e-06,
"loss": 0.7939,
"step": 10
},
{
"Batch Mean": -2.218231201171875,
"accuracy": 0.4921875,
"epoch": 0.025,
"step": 10
},
{
"epoch": 0.0275,
"grad_norm": 34.12691879272461,
"learning_rate": 1.65e-06,
"loss": 0.7505,
"step": 11
},
{
"Batch Mean": -0.2393415868282318,
"accuracy": 0.53125,
"epoch": 0.0275,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 10.481990814208984,
"learning_rate": 1.8e-06,
"loss": 0.6779,
"step": 12
},
{
"Batch Mean": 0.2733480930328369,
"accuracy": 0.5703125,
"epoch": 0.03,
"step": 12
},
{
"epoch": 0.0325,
"grad_norm": 8.39962387084961,
"learning_rate": 1.95e-06,
"loss": 0.6728,
"step": 13
},
{
"Batch Mean": 0.5041149854660034,
"accuracy": 0.671875,
"epoch": 0.0325,
"step": 13
},
{
"epoch": 0.035,
"grad_norm": 15.906760215759277,
"learning_rate": 2.1e-06,
"loss": 0.6679,
"step": 14
},
{
"Batch Mean": 0.8821840286254883,
"accuracy": 0.578125,
"epoch": 0.035,
"step": 14
},
{
"epoch": 0.0375,
"grad_norm": 19.03310775756836,
"learning_rate": 2.25e-06,
"loss": 0.7024,
"step": 15
},
{
"Batch Mean": 1.1891746520996094,
"accuracy": 0.5859375,
"epoch": 0.0375,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 20.89366340637207,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.6868,
"step": 16
},
{
"Batch Mean": 2.1533203125,
"accuracy": 0.671875,
"epoch": 0.04,
"step": 16
},
{
"epoch": 0.0425,
"grad_norm": 34.633541107177734,
"learning_rate": 2.55e-06,
"loss": 0.7297,
"step": 17
},
{
"Batch Mean": 2.5652008056640625,
"accuracy": 0.6484375,
"epoch": 0.0425,
"step": 17
},
{
"epoch": 0.045,
"grad_norm": 41.46820068359375,
"learning_rate": 2.7e-06,
"loss": 0.7387,
"step": 18
},
{
"Batch Mean": 2.4411849975585938,
"accuracy": 0.75,
"epoch": 0.045,
"step": 18
},
{
"epoch": 0.0475,
"grad_norm": 42.9193000793457,
"learning_rate": 2.85e-06,
"loss": 0.6813,
"step": 19
},
{
"Batch Mean": 2.1285018920898438,
"accuracy": 0.703125,
"epoch": 0.0475,
"step": 19
},
{
"epoch": 0.05,
"grad_norm": 37.16952133178711,
"learning_rate": 3e-06,
"loss": 0.672,
"step": 20
},
{
"Batch Mean": 1.4230481386184692,
"accuracy": 0.671875,
"epoch": 0.05,
"step": 20
},
{
"epoch": 0.0525,
"grad_norm": 27.2924747467041,
"learning_rate": 2.992105263157895e-06,
"loss": 0.647,
"step": 21
},
{
"Batch Mean": 0.5414033532142639,
"accuracy": 0.6953125,
"epoch": 0.0525,
"step": 21
},
{
"epoch": 0.055,
"grad_norm": 11.927323341369629,
"learning_rate": 2.9842105263157896e-06,
"loss": 0.5981,
"step": 22
},
{
"Batch Mean": -0.36795324087142944,
"accuracy": 0.7265625,
"epoch": 0.055,
"step": 22
},
{
"epoch": 0.0575,
"grad_norm": 13.455949783325195,
"learning_rate": 2.9763157894736843e-06,
"loss": 0.5763,
"step": 23
},
{
"Batch Mean": -1.0874066352844238,
"accuracy": 0.7421875,
"epoch": 0.0575,
"step": 23
},
{
"epoch": 0.06,
"grad_norm": 22.382274627685547,
"learning_rate": 2.968421052631579e-06,
"loss": 0.5763,
"step": 24
},
{
"Batch Mean": -1.103028416633606,
"accuracy": 0.6953125,
"epoch": 0.06,
"step": 24
},
{
"epoch": 0.0625,
"grad_norm": 19.854228973388672,
"learning_rate": 2.960526315789474e-06,
"loss": 0.6065,
"step": 25
},
{
"Batch Mean": -0.8232302069664001,
"accuracy": 0.734375,
"epoch": 0.0625,
"step": 25
},
{
"epoch": 0.065,
"grad_norm": 16.642824172973633,
"learning_rate": 2.9526315789473685e-06,
"loss": 0.5282,
"step": 26
},
{
"Batch Mean": -0.11966800689697266,
"accuracy": 0.6875,
"epoch": 0.065,
"step": 26
},
{
"epoch": 0.0675,
"grad_norm": 8.563494682312012,
"learning_rate": 2.9447368421052633e-06,
"loss": 0.5348,
"step": 27
},
{
"Batch Mean": 0.5674030780792236,
"accuracy": 0.671875,
"epoch": 0.0675,
"step": 27
},
{
"epoch": 0.07,
"grad_norm": 13.211753845214844,
"learning_rate": 2.936842105263158e-06,
"loss": 0.5812,
"step": 28
},
{
"Batch Mean": 1.1245126724243164,
"accuracy": 0.6171875,
"epoch": 0.07,
"step": 28
},
{
"epoch": 0.0725,
"grad_norm": 22.8548583984375,
"learning_rate": 2.9289473684210528e-06,
"loss": 0.7336,
"step": 29
},
{
"Batch Mean": 1.2212319374084473,
"accuracy": 0.6796875,
"epoch": 0.0725,
"step": 29
},
{
"epoch": 0.075,
"grad_norm": 21.96833038330078,
"learning_rate": 2.9210526315789475e-06,
"loss": 0.6167,
"step": 30
},
{
"Batch Mean": 0.7509702444076538,
"accuracy": 0.796875,
"epoch": 0.075,
"step": 30
},
{
"epoch": 0.0775,
"grad_norm": 12.47424030303955,
"learning_rate": 2.9131578947368423e-06,
"loss": 0.463,
"step": 31
},
{
"Batch Mean": 0.2412339150905609,
"accuracy": 0.7578125,
"epoch": 0.0775,
"step": 31
},
{
"epoch": 0.08,
"grad_norm": 9.769195556640625,
"learning_rate": 2.905263157894737e-06,
"loss": 0.5412,
"step": 32
},
{
"Batch Mean": -0.5602660179138184,
"accuracy": 0.7109375,
"epoch": 0.08,
"step": 32
},
{
"epoch": 0.0825,
"grad_norm": 11.323891639709473,
"learning_rate": 2.8973684210526318e-06,
"loss": 0.5601,
"step": 33
},
{
"Batch Mean": -0.8669416904449463,
"accuracy": 0.7734375,
"epoch": 0.0825,
"step": 33
},
{
"epoch": 0.085,
"grad_norm": 15.461758613586426,
"learning_rate": 2.8894736842105265e-06,
"loss": 0.5147,
"step": 34
},
{
"Batch Mean": -0.7635477781295776,
"accuracy": 0.75,
"epoch": 0.085,
"step": 34
},
{
"epoch": 0.0875,
"grad_norm": 14.736496925354004,
"learning_rate": 2.8815789473684213e-06,
"loss": 0.5421,
"step": 35
},
{
"Batch Mean": -0.5088728070259094,
"accuracy": 0.734375,
"epoch": 0.0875,
"step": 35
},
{
"epoch": 0.09,
"grad_norm": 11.982501029968262,
"learning_rate": 2.873684210526316e-06,
"loss": 0.5234,
"step": 36
},
{
"Batch Mean": -0.03705340623855591,
"accuracy": 0.7578125,
"epoch": 0.09,
"step": 36
},
{
"epoch": 0.0925,
"grad_norm": 7.854848861694336,
"learning_rate": 2.8657894736842103e-06,
"loss": 0.4996,
"step": 37
},
{
"Batch Mean": 0.5426892042160034,
"accuracy": 0.734375,
"epoch": 0.0925,
"step": 37
},
{
"epoch": 0.095,
"grad_norm": 10.834790229797363,
"learning_rate": 2.857894736842105e-06,
"loss": 0.5187,
"step": 38
},
{
"Batch Mean": 0.4622654914855957,
"accuracy": 0.7734375,
"epoch": 0.095,
"step": 38
},
{
"epoch": 0.0975,
"grad_norm": 10.764225959777832,
"learning_rate": 2.85e-06,
"loss": 0.4723,
"step": 39
},
{
"Batch Mean": 0.18012571334838867,
"accuracy": 0.7890625,
"epoch": 0.0975,
"step": 39
},
{
"epoch": 0.1,
"grad_norm": 9.500924110412598,
"learning_rate": 2.8421052631578946e-06,
"loss": 0.4545,
"step": 40
},
{
"Batch Mean": -0.05992317199707031,
"accuracy": 0.7890625,
"epoch": 0.1,
"step": 40
},
{
"epoch": 0.1025,
"grad_norm": 8.079994201660156,
"learning_rate": 2.8342105263157897e-06,
"loss": 0.49,
"step": 41
},
{
"Batch Mean": 0.04392993450164795,
"accuracy": 0.8515625,
"epoch": 0.1025,
"step": 41
},
{
"epoch": 0.105,
"grad_norm": 10.184918403625488,
"learning_rate": 2.8263157894736845e-06,
"loss": 0.3789,
"step": 42
},
{
"Batch Mean": -0.2515716552734375,
"accuracy": 0.8046875,
"epoch": 0.105,
"step": 42
},
{
"epoch": 0.1075,
"grad_norm": 9.894248008728027,
"learning_rate": 2.8184210526315792e-06,
"loss": 0.435,
"step": 43
},
{
"Batch Mean": 0.15310335159301758,
"accuracy": 0.7734375,
"epoch": 0.1075,
"step": 43
},
{
"epoch": 0.11,
"grad_norm": 10.671575546264648,
"learning_rate": 2.810526315789474e-06,
"loss": 0.4291,
"step": 44
},
{
"Batch Mean": 0.2563772201538086,
"accuracy": 0.765625,
"epoch": 0.11,
"step": 44
},
{
"epoch": 0.1125,
"grad_norm": 10.690945625305176,
"learning_rate": 2.8026315789473687e-06,
"loss": 0.4812,
"step": 45
},
{
"Batch Mean": 0.046463966369628906,
"accuracy": 0.875,
"epoch": 0.1125,
"step": 45
},
{
"epoch": 0.115,
"grad_norm": 9.616545677185059,
"learning_rate": 2.7947368421052635e-06,
"loss": 0.3082,
"step": 46
},
{
"Batch Mean": 0.11115455627441406,
"accuracy": 0.75,
"epoch": 0.115,
"step": 46
},
{
"epoch": 0.1175,
"grad_norm": 15.726456642150879,
"learning_rate": 2.7868421052631578e-06,
"loss": 0.5576,
"step": 47
},
{
"Batch Mean": 0.2878689765930176,
"accuracy": 0.8125,
"epoch": 0.1175,
"step": 47
},
{
"epoch": 0.12,
"grad_norm": 11.675898551940918,
"learning_rate": 2.7789473684210525e-06,
"loss": 0.472,
"step": 48
},
{
"Batch Mean": -0.25928783416748047,
"accuracy": 0.75,
"epoch": 0.12,
"step": 48
},
{
"epoch": 0.1225,
"grad_norm": 15.394309997558594,
"learning_rate": 2.7710526315789473e-06,
"loss": 0.5011,
"step": 49
},
{
"Batch Mean": -0.2885439097881317,
"accuracy": 0.703125,
"epoch": 0.1225,
"step": 49
},
{
"epoch": 0.125,
"grad_norm": 15.69314956665039,
"learning_rate": 2.763157894736842e-06,
"loss": 0.5974,
"step": 50
},
{
"Batch Mean": -0.8389434814453125,
"accuracy": 0.8046875,
"epoch": 0.125,
"step": 50
},
{
"epoch": 0.1275,
"grad_norm": 16.744606018066406,
"learning_rate": 2.7552631578947368e-06,
"loss": 0.5151,
"step": 51
},
{
"Batch Mean": -0.6685854196548462,
"accuracy": 0.703125,
"epoch": 0.1275,
"step": 51
},
{
"epoch": 0.13,
"grad_norm": 15.530059814453125,
"learning_rate": 2.7473684210526315e-06,
"loss": 0.5502,
"step": 52
},
{
"Batch Mean": -0.5919044017791748,
"accuracy": 0.71875,
"epoch": 0.13,
"step": 52
},
{
"epoch": 0.1325,
"grad_norm": 15.14562702178955,
"learning_rate": 2.7394736842105263e-06,
"loss": 0.5305,
"step": 53
},
{
"Batch Mean": -0.11876988410949707,
"accuracy": 0.75,
"epoch": 0.1325,
"step": 53
},
{
"epoch": 0.135,
"grad_norm": 7.011972427368164,
"learning_rate": 2.7315789473684214e-06,
"loss": 0.4802,
"step": 54
},
{
"Batch Mean": 0.09675300121307373,
"accuracy": 0.7578125,
"epoch": 0.135,
"step": 54
},
{
"epoch": 0.1375,
"grad_norm": 8.37578010559082,
"learning_rate": 2.723684210526316e-06,
"loss": 0.4354,
"step": 55
},
{
"Batch Mean": 0.3877286911010742,
"accuracy": 0.8046875,
"epoch": 0.1375,
"step": 55
},
{
"epoch": 0.14,
"grad_norm": 8.380095481872559,
"learning_rate": 2.715789473684211e-06,
"loss": 0.4114,
"step": 56
},
{
"Batch Mean": 0.7472854852676392,
"accuracy": 0.8046875,
"epoch": 0.14,
"step": 56
},
{
"epoch": 0.1425,
"grad_norm": 12.794017791748047,
"learning_rate": 2.7078947368421052e-06,
"loss": 0.465,
"step": 57
},
{
"Batch Mean": 0.5131087303161621,
"accuracy": 0.8046875,
"epoch": 0.1425,
"step": 57
},
{
"epoch": 0.145,
"grad_norm": 8.367005348205566,
"learning_rate": 2.7e-06,
"loss": 0.4407,
"step": 58
},
{
"Batch Mean": 0.5741372108459473,
"accuracy": 0.8515625,
"epoch": 0.145,
"step": 58
},
{
"epoch": 0.1475,
"grad_norm": 9.066951751708984,
"learning_rate": 2.6921052631578947e-06,
"loss": 0.4468,
"step": 59
},
{
"Batch Mean": 0.169053316116333,
"accuracy": 0.734375,
"epoch": 0.1475,
"step": 59
},
{
"epoch": 0.15,
"grad_norm": 6.888144016265869,
"learning_rate": 2.6842105263157895e-06,
"loss": 0.4849,
"step": 60
},
{
"Batch Mean": -0.18010663986206055,
"accuracy": 0.765625,
"epoch": 0.15,
"step": 60
},
{
"epoch": 0.1525,
"grad_norm": 6.846613883972168,
"learning_rate": 2.6763157894736842e-06,
"loss": 0.4643,
"step": 61
},
{
"Batch Mean": -0.4085197448730469,
"accuracy": 0.8359375,
"epoch": 0.1525,
"step": 61
},
{
"epoch": 0.155,
"grad_norm": 7.677695274353027,
"learning_rate": 2.668421052631579e-06,
"loss": 0.3926,
"step": 62
},
{
"Batch Mean": -0.48792409896850586,
"accuracy": 0.828125,
"epoch": 0.155,
"step": 62
},
{
"epoch": 0.1575,
"grad_norm": 8.656214714050293,
"learning_rate": 2.6605263157894737e-06,
"loss": 0.4116,
"step": 63
},
{
"Batch Mean": -0.19100165367126465,
"accuracy": 0.7734375,
"epoch": 0.1575,
"step": 63
},
{
"epoch": 0.16,
"grad_norm": 9.927976608276367,
"learning_rate": 2.6526315789473685e-06,
"loss": 0.4395,
"step": 64
},
{
"Batch Mean": 0.025106430053710938,
"accuracy": 0.8046875,
"epoch": 0.16,
"step": 64
},
{
"epoch": 0.1625,
"grad_norm": 8.97470474243164,
"learning_rate": 2.644736842105263e-06,
"loss": 0.4632,
"step": 65
},
{
"Batch Mean": 0.4497801661491394,
"accuracy": 0.796875,
"epoch": 0.1625,
"step": 65
},
{
"epoch": 0.165,
"grad_norm": 11.160425186157227,
"learning_rate": 2.636842105263158e-06,
"loss": 0.5073,
"step": 66
},
{
"Batch Mean": 0.6494235992431641,
"accuracy": 0.8203125,
"epoch": 0.165,
"step": 66
},
{
"epoch": 0.1675,
"grad_norm": 11.464187622070312,
"learning_rate": 2.6289473684210527e-06,
"loss": 0.4467,
"step": 67
},
{
"Batch Mean": -0.17097759246826172,
"accuracy": 0.828125,
"epoch": 0.1675,
"step": 67
},
{
"epoch": 0.17,
"grad_norm": 8.820326805114746,
"learning_rate": 2.6210526315789474e-06,
"loss": 0.4543,
"step": 68
},
{
"Batch Mean": -0.1554449200630188,
"accuracy": 0.8046875,
"epoch": 0.17,
"step": 68
},
{
"epoch": 0.1725,
"grad_norm": 9.248348236083984,
"learning_rate": 2.613157894736842e-06,
"loss": 0.3796,
"step": 69
},
{
"Batch Mean": -0.21274375915527344,
"accuracy": 0.75,
"epoch": 0.1725,
"step": 69
},
{
"epoch": 0.175,
"grad_norm": 8.15241813659668,
"learning_rate": 2.605263157894737e-06,
"loss": 0.4445,
"step": 70
},
{
"Batch Mean": -0.06371593475341797,
"accuracy": 0.765625,
"epoch": 0.175,
"step": 70
},
{
"epoch": 0.1775,
"grad_norm": 6.543805122375488,
"learning_rate": 2.5973684210526317e-06,
"loss": 0.4376,
"step": 71
},
{
"Batch Mean": -0.10743597149848938,
"accuracy": 0.7265625,
"epoch": 0.1775,
"step": 71
},
{
"epoch": 0.18,
"grad_norm": 7.594844341278076,
"learning_rate": 2.5894736842105264e-06,
"loss": 0.4363,
"step": 72
},
{
"Batch Mean": 0.08045387268066406,
"accuracy": 0.84375,
"epoch": 0.18,
"step": 72
},
{
"epoch": 0.1825,
"grad_norm": 7.977687835693359,
"learning_rate": 2.581578947368421e-06,
"loss": 0.3616,
"step": 73
},
{
"Batch Mean": -0.11765336990356445,
"accuracy": 0.796875,
"epoch": 0.1825,
"step": 73
},
{
"epoch": 0.185,
"grad_norm": 6.521571636199951,
"learning_rate": 2.573684210526316e-06,
"loss": 0.4001,
"step": 74
},
{
"Batch Mean": 0.0607762336730957,
"accuracy": 0.703125,
"epoch": 0.185,
"step": 74
},
{
"epoch": 0.1875,
"grad_norm": 8.15623950958252,
"learning_rate": 2.5657894736842107e-06,
"loss": 0.4941,
"step": 75
},
{
"Batch Mean": 0.4910193681716919,
"accuracy": 0.8046875,
"epoch": 0.1875,
"step": 75
},
{
"epoch": 0.19,
"grad_norm": 9.602405548095703,
"learning_rate": 2.5578947368421054e-06,
"loss": 0.4224,
"step": 76
},
{
"Batch Mean": 0.06563162803649902,
"accuracy": 0.8046875,
"epoch": 0.19,
"step": 76
},
{
"epoch": 0.1925,
"grad_norm": 6.577213764190674,
"learning_rate": 2.55e-06,
"loss": 0.4047,
"step": 77
},
{
"Batch Mean": -0.08942317962646484,
"accuracy": 0.8046875,
"epoch": 0.1925,
"step": 77
},
{
"epoch": 0.195,
"grad_norm": 16.202590942382812,
"learning_rate": 2.542105263157895e-06,
"loss": 0.4418,
"step": 78
},
{
"Batch Mean": -0.37958288192749023,
"accuracy": 0.8125,
"epoch": 0.195,
"step": 78
},
{
"epoch": 0.1975,
"grad_norm": 23.45298194885254,
"learning_rate": 2.5342105263157892e-06,
"loss": 0.391,
"step": 79
},
{
"Batch Mean": -0.5664916038513184,
"accuracy": 0.8125,
"epoch": 0.1975,
"step": 79
},
{
"epoch": 0.2,
"grad_norm": 10.679176330566406,
"learning_rate": 2.526315789473684e-06,
"loss": 0.4402,
"step": 80
},
{
"Batch Mean": -0.31339550018310547,
"accuracy": 0.8359375,
"epoch": 0.2,
"step": 80
},
{
"epoch": 0.2025,
"grad_norm": 16.459426879882812,
"learning_rate": 2.5184210526315787e-06,
"loss": 0.3765,
"step": 81
},
{
"Batch Mean": 0.20376300811767578,
"accuracy": 0.78125,
"epoch": 0.2025,
"step": 81
},
{
"epoch": 0.205,
"grad_norm": 8.887246131896973,
"learning_rate": 2.510526315789474e-06,
"loss": 0.4509,
"step": 82
},
{
"Batch Mean": -0.054105013608932495,
"accuracy": 0.796875,
"epoch": 0.205,
"step": 82
},
{
"epoch": 0.2075,
"grad_norm": 8.116570472717285,
"learning_rate": 2.5026315789473686e-06,
"loss": 0.3906,
"step": 83
},
{
"Batch Mean": 0.4383058547973633,
"accuracy": 0.875,
"epoch": 0.2075,
"step": 83
},
{
"epoch": 0.21,
"grad_norm": 9.300606727600098,
"learning_rate": 2.4947368421052634e-06,
"loss": 0.3099,
"step": 84
},
{
"Batch Mean": 0.4242062568664551,
"accuracy": 0.7890625,
"epoch": 0.21,
"step": 84
},
{
"epoch": 0.2125,
"grad_norm": 8.7870512008667,
"learning_rate": 2.486842105263158e-06,
"loss": 0.3996,
"step": 85
},
{
"Batch Mean": 0.22194945812225342,
"accuracy": 0.78125,
"epoch": 0.2125,
"step": 85
},
{
"epoch": 0.215,
"grad_norm": 8.691889762878418,
"learning_rate": 2.478947368421053e-06,
"loss": 0.4512,
"step": 86
},
{
"Batch Mean": -0.14107918739318848,
"accuracy": 0.8359375,
"epoch": 0.215,
"step": 86
},
{
"epoch": 0.2175,
"grad_norm": 7.0445146560668945,
"learning_rate": 2.4710526315789476e-06,
"loss": 0.3771,
"step": 87
},
{
"Batch Mean": -0.35906583070755005,
"accuracy": 0.765625,
"epoch": 0.2175,
"step": 87
},
{
"epoch": 0.22,
"grad_norm": 9.609421730041504,
"learning_rate": 2.4631578947368424e-06,
"loss": 0.4558,
"step": 88
},
{
"Batch Mean": -0.45313501358032227,
"accuracy": 0.828125,
"epoch": 0.22,
"step": 88
},
{
"epoch": 0.2225,
"grad_norm": 9.091865539550781,
"learning_rate": 2.4552631578947367e-06,
"loss": 0.4292,
"step": 89
},
{
"Batch Mean": -0.35767626762390137,
"accuracy": 0.8359375,
"epoch": 0.2225,
"step": 89
},
{
"epoch": 0.225,
"grad_norm": 7.811399459838867,
"learning_rate": 2.4473684210526314e-06,
"loss": 0.3823,
"step": 90
},
{
"Batch Mean": 0.0022423267364501953,
"accuracy": 0.8046875,
"epoch": 0.225,
"step": 90
},
{
"epoch": 0.2275,
"grad_norm": 7.804200172424316,
"learning_rate": 2.439473684210526e-06,
"loss": 0.4079,
"step": 91
},
{
"Batch Mean": 0.17678523063659668,
"accuracy": 0.8046875,
"epoch": 0.2275,
"step": 91
},
{
"epoch": 0.23,
"grad_norm": 7.715360641479492,
"learning_rate": 2.431578947368421e-06,
"loss": 0.391,
"step": 92
},
{
"Batch Mean": -0.22452354431152344,
"accuracy": 0.8203125,
"epoch": 0.23,
"step": 92
},
{
"epoch": 0.2325,
"grad_norm": 7.197687149047852,
"learning_rate": 2.4236842105263157e-06,
"loss": 0.3864,
"step": 93
},
{
"Batch Mean": 0.30264759063720703,
"accuracy": 0.84375,
"epoch": 0.2325,
"step": 93
},
{
"epoch": 0.235,
"grad_norm": 7.754411697387695,
"learning_rate": 2.4157894736842104e-06,
"loss": 0.3939,
"step": 94
},
{
"Batch Mean": -0.2516747713088989,
"accuracy": 0.7890625,
"epoch": 0.235,
"step": 94
},
{
"epoch": 0.2375,
"grad_norm": 8.55639362335205,
"learning_rate": 2.4078947368421056e-06,
"loss": 0.4671,
"step": 95
},
{
"Batch Mean": -0.15928316116333008,
"accuracy": 0.8046875,
"epoch": 0.2375,
"step": 95
},
{
"epoch": 0.24,
"grad_norm": 7.463867664337158,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.471,
"step": 96
},
{
"Batch Mean": -0.0638132095336914,
"accuracy": 0.8125,
"epoch": 0.24,
"step": 96
},
{
"epoch": 0.2425,
"grad_norm": 6.797260761260986,
"learning_rate": 2.392105263157895e-06,
"loss": 0.3892,
"step": 97
},
{
"Batch Mean": -0.08082282543182373,
"accuracy": 0.8203125,
"epoch": 0.2425,
"step": 97
},
{
"epoch": 0.245,
"grad_norm": 7.096107482910156,
"learning_rate": 2.38421052631579e-06,
"loss": 0.3942,
"step": 98
},
{
"Batch Mean": 0.3185189962387085,
"accuracy": 0.796875,
"epoch": 0.245,
"step": 98
},
{
"epoch": 0.2475,
"grad_norm": 9.447708129882812,
"learning_rate": 2.376315789473684e-06,
"loss": 0.5037,
"step": 99
},
{
"Batch Mean": 0.26651012897491455,
"accuracy": 0.890625,
"epoch": 0.2475,
"step": 99
},
{
"epoch": 0.25,
"grad_norm": 9.391190528869629,
"learning_rate": 2.368421052631579e-06,
"loss": 0.3352,
"step": 100
},
{
"Batch Mean": -0.0040132105350494385,
"accuracy": 0.7578125,
"epoch": 0.25,
"step": 100
},
{
"epoch": 0.2525,
"grad_norm": 7.999260425567627,
"learning_rate": 2.3605263157894736e-06,
"loss": 0.4583,
"step": 101
},
{
"Batch Mean": 0.13753342628479004,
"accuracy": 0.8125,
"epoch": 0.2525,
"step": 101
},
{
"epoch": 0.255,
"grad_norm": 7.398046493530273,
"learning_rate": 2.3526315789473684e-06,
"loss": 0.3847,
"step": 102
},
{
"Batch Mean": 0.15256333351135254,
"accuracy": 0.8671875,
"epoch": 0.255,
"step": 102
},
{
"epoch": 0.2575,
"grad_norm": 7.636544227600098,
"learning_rate": 2.344736842105263e-06,
"loss": 0.332,
"step": 103
},
{
"Batch Mean": -0.2140512466430664,
"accuracy": 0.75,
"epoch": 0.2575,
"step": 103
},
{
"epoch": 0.26,
"grad_norm": 8.101052284240723,
"learning_rate": 2.336842105263158e-06,
"loss": 0.4434,
"step": 104
},
{
"Batch Mean": -0.2993035316467285,
"accuracy": 0.8359375,
"epoch": 0.26,
"step": 104
},
{
"epoch": 0.2625,
"grad_norm": 7.990828514099121,
"learning_rate": 2.3289473684210526e-06,
"loss": 0.3701,
"step": 105
},
{
"Batch Mean": -0.0038167238235473633,
"accuracy": 0.796875,
"epoch": 0.2625,
"step": 105
},
{
"epoch": 0.265,
"grad_norm": 6.86246919631958,
"learning_rate": 2.3210526315789473e-06,
"loss": 0.3546,
"step": 106
},
{
"Batch Mean": 0.4233437776565552,
"accuracy": 0.796875,
"epoch": 0.265,
"step": 106
},
{
"epoch": 0.2675,
"grad_norm": 9.27191162109375,
"learning_rate": 2.313157894736842e-06,
"loss": 0.414,
"step": 107
},
{
"Batch Mean": 0.5853117108345032,
"accuracy": 0.8046875,
"epoch": 0.2675,
"step": 107
},
{
"epoch": 0.27,
"grad_norm": 12.894753456115723,
"learning_rate": 2.305263157894737e-06,
"loss": 0.4258,
"step": 108
},
{
"Batch Mean": 0.07786870002746582,
"accuracy": 0.8203125,
"epoch": 0.27,
"step": 108
},
{
"epoch": 0.2725,
"grad_norm": 10.551774024963379,
"learning_rate": 2.2973684210526316e-06,
"loss": 0.455,
"step": 109
},
{
"Batch Mean": 0.003017425537109375,
"accuracy": 0.8515625,
"epoch": 0.2725,
"step": 109
},
{
"epoch": 0.275,
"grad_norm": 8.059015274047852,
"learning_rate": 2.2894736842105263e-06,
"loss": 0.3559,
"step": 110
},
{
"Batch Mean": 0.08839225769042969,
"accuracy": 0.859375,
"epoch": 0.275,
"step": 110
},
{
"epoch": 0.2775,
"grad_norm": 7.2062530517578125,
"learning_rate": 2.281578947368421e-06,
"loss": 0.2765,
"step": 111
},
{
"Batch Mean": -0.4452195167541504,
"accuracy": 0.890625,
"epoch": 0.2775,
"step": 111
},
{
"epoch": 0.28,
"grad_norm": 9.328097343444824,
"learning_rate": 2.273684210526316e-06,
"loss": 0.2535,
"step": 112
},
{
"Batch Mean": -0.2987861633300781,
"accuracy": 0.828125,
"epoch": 0.28,
"step": 112
},
{
"epoch": 0.2825,
"grad_norm": 10.32376766204834,
"learning_rate": 2.2657894736842106e-06,
"loss": 0.4152,
"step": 113
},
{
"Batch Mean": -0.05189800262451172,
"accuracy": 0.875,
"epoch": 0.2825,
"step": 113
},
{
"epoch": 0.285,
"grad_norm": 7.734317779541016,
"learning_rate": 2.2578947368421053e-06,
"loss": 0.2874,
"step": 114
},
{
"Batch Mean": 0.8154248595237732,
"accuracy": 0.7578125,
"epoch": 0.285,
"step": 114
},
{
"epoch": 0.2875,
"grad_norm": 16.42246437072754,
"learning_rate": 2.25e-06,
"loss": 0.517,
"step": 115
},
{
"Batch Mean": 0.2764568328857422,
"accuracy": 0.8203125,
"epoch": 0.2875,
"step": 115
},
{
"epoch": 0.29,
"grad_norm": 10.174060821533203,
"learning_rate": 2.242105263157895e-06,
"loss": 0.3843,
"step": 116
},
{
"Batch Mean": -0.10035037994384766,
"accuracy": 0.84375,
"epoch": 0.29,
"step": 116
},
{
"epoch": 0.2925,
"grad_norm": 7.6288161277771,
"learning_rate": 2.2342105263157895e-06,
"loss": 0.3509,
"step": 117
},
{
"Batch Mean": -0.22602462768554688,
"accuracy": 0.84375,
"epoch": 0.2925,
"step": 117
},
{
"epoch": 0.295,
"grad_norm": 8.613261222839355,
"learning_rate": 2.2263157894736843e-06,
"loss": 0.3731,
"step": 118
},
{
"Batch Mean": -0.09805738925933838,
"accuracy": 0.7265625,
"epoch": 0.295,
"step": 118
},
{
"epoch": 0.2975,
"grad_norm": 10.688722610473633,
"learning_rate": 2.218421052631579e-06,
"loss": 0.4856,
"step": 119
},
{
"Batch Mean": -0.21628332138061523,
"accuracy": 0.8125,
"epoch": 0.2975,
"step": 119
},
{
"epoch": 0.3,
"grad_norm": 7.811151027679443,
"learning_rate": 2.2105263157894738e-06,
"loss": 0.3613,
"step": 120
},
{
"Batch Mean": -0.4057881832122803,
"accuracy": 0.796875,
"epoch": 0.3,
"step": 120
},
{
"epoch": 0.3025,
"grad_norm": 8.88042163848877,
"learning_rate": 2.2026315789473685e-06,
"loss": 0.3821,
"step": 121
},
{
"Batch Mean": -0.3958141803741455,
"accuracy": 0.84375,
"epoch": 0.3025,
"step": 121
},
{
"epoch": 0.305,
"grad_norm": 8.513867378234863,
"learning_rate": 2.1947368421052633e-06,
"loss": 0.3592,
"step": 122
},
{
"Batch Mean": 0.11790943145751953,
"accuracy": 0.8671875,
"epoch": 0.305,
"step": 122
},
{
"epoch": 0.3075,
"grad_norm": 6.011488437652588,
"learning_rate": 2.186842105263158e-06,
"loss": 0.3249,
"step": 123
},
{
"Batch Mean": 0.7885537147521973,
"accuracy": 0.84375,
"epoch": 0.3075,
"step": 123
},
{
"epoch": 0.31,
"grad_norm": 13.359249114990234,
"learning_rate": 2.1789473684210528e-06,
"loss": 0.3776,
"step": 124
},
{
"Batch Mean": 0.906917929649353,
"accuracy": 0.8125,
"epoch": 0.31,
"step": 124
},
{
"epoch": 0.3125,
"grad_norm": 13.69727897644043,
"learning_rate": 2.1710526315789475e-06,
"loss": 0.4525,
"step": 125
},
{
"Batch Mean": 0.7685656547546387,
"accuracy": 0.8046875,
"epoch": 0.3125,
"step": 125
},
{
"epoch": 0.315,
"grad_norm": 13.353426933288574,
"learning_rate": 2.1631578947368423e-06,
"loss": 0.3873,
"step": 126
},
{
"Batch Mean": 0.31209874153137207,
"accuracy": 0.8515625,
"epoch": 0.315,
"step": 126
},
{
"epoch": 0.3175,
"grad_norm": 8.501708030700684,
"learning_rate": 2.155263157894737e-06,
"loss": 0.3643,
"step": 127
},
{
"Batch Mean": -0.0774112343788147,
"accuracy": 0.8515625,
"epoch": 0.3175,
"step": 127
},
{
"epoch": 0.32,
"grad_norm": 6.79693078994751,
"learning_rate": 2.1473684210526317e-06,
"loss": 0.3361,
"step": 128
},
{
"Batch Mean": -1.1531860828399658,
"accuracy": 0.875,
"epoch": 0.32,
"step": 128
},
{
"epoch": 0.3225,
"grad_norm": 18.78434181213379,
"learning_rate": 2.1394736842105265e-06,
"loss": 0.381,
"step": 129
},
{
"Batch Mean": -0.9209874868392944,
"accuracy": 0.828125,
"epoch": 0.3225,
"step": 129
},
{
"epoch": 0.325,
"grad_norm": 15.549153327941895,
"learning_rate": 2.1315789473684212e-06,
"loss": 0.352,
"step": 130
},
{
"Batch Mean": -0.5993038415908813,
"accuracy": 0.7734375,
"epoch": 0.325,
"step": 130
},
{
"epoch": 0.3275,
"grad_norm": 11.017681121826172,
"learning_rate": 2.123684210526316e-06,
"loss": 0.4731,
"step": 131
},
{
"Batch Mean": -0.5436639785766602,
"accuracy": 0.859375,
"epoch": 0.3275,
"step": 131
},
{
"epoch": 0.33,
"grad_norm": 9.474353790283203,
"learning_rate": 2.1157894736842103e-06,
"loss": 0.3224,
"step": 132
},
{
"Batch Mean": 0.03529167175292969,
"accuracy": 0.7890625,
"epoch": 0.33,
"step": 132
},
{
"epoch": 0.3325,
"grad_norm": 9.17381763458252,
"learning_rate": 2.107894736842105e-06,
"loss": 0.4091,
"step": 133
},
{
"Batch Mean": 0.37068402767181396,
"accuracy": 0.7890625,
"epoch": 0.3325,
"step": 133
},
{
"epoch": 0.335,
"grad_norm": 8.768671035766602,
"learning_rate": 2.1e-06,
"loss": 0.4094,
"step": 134
},
{
"Batch Mean": 0.8418741226196289,
"accuracy": 0.8203125,
"epoch": 0.335,
"step": 134
},
{
"epoch": 0.3375,
"grad_norm": 13.19222354888916,
"learning_rate": 2.0921052631578945e-06,
"loss": 0.4361,
"step": 135
},
{
"Batch Mean": 0.38561856746673584,
"accuracy": 0.828125,
"epoch": 0.3375,
"step": 135
},
{
"epoch": 0.34,
"grad_norm": 7.658647537231445,
"learning_rate": 2.0842105263157897e-06,
"loss": 0.3619,
"step": 136
},
{
"Batch Mean": 0.24846503138542175,
"accuracy": 0.8203125,
"epoch": 0.34,
"step": 136
},
{
"epoch": 0.3425,
"grad_norm": 6.886051654815674,
"learning_rate": 2.0763157894736845e-06,
"loss": 0.3778,
"step": 137
},
{
"Batch Mean": -0.3311891555786133,
"accuracy": 0.8203125,
"epoch": 0.3425,
"step": 137
},
{
"epoch": 0.345,
"grad_norm": 7.944254398345947,
"learning_rate": 2.068421052631579e-06,
"loss": 0.3948,
"step": 138
},
{
"Batch Mean": -0.568354606628418,
"accuracy": 0.84375,
"epoch": 0.345,
"step": 138
},
{
"epoch": 0.3475,
"grad_norm": 9.763813018798828,
"learning_rate": 2.060526315789474e-06,
"loss": 0.3355,
"step": 139
},
{
"Batch Mean": -0.39496636390686035,
"accuracy": 0.8671875,
"epoch": 0.3475,
"step": 139
},
{
"epoch": 0.35,
"grad_norm": 7.516551971435547,
"learning_rate": 2.0526315789473687e-06,
"loss": 0.3501,
"step": 140
},
{
"Batch Mean": -0.15581655502319336,
"accuracy": 0.828125,
"epoch": 0.35,
"step": 140
},
{
"epoch": 0.3525,
"grad_norm": 8.155888557434082,
"learning_rate": 2.0447368421052634e-06,
"loss": 0.3773,
"step": 141
},
{
"Batch Mean": 0.44981956481933594,
"accuracy": 0.7578125,
"epoch": 0.3525,
"step": 141
},
{
"epoch": 0.355,
"grad_norm": 9.395730018615723,
"learning_rate": 2.0368421052631578e-06,
"loss": 0.4603,
"step": 142
},
{
"Batch Mean": 0.33932405710220337,
"accuracy": 0.8125,
"epoch": 0.355,
"step": 142
},
{
"epoch": 0.3575,
"grad_norm": 8.501273155212402,
"learning_rate": 2.0289473684210525e-06,
"loss": 0.4167,
"step": 143
},
{
"Batch Mean": 0.31684207916259766,
"accuracy": 0.875,
"epoch": 0.3575,
"step": 143
},
{
"epoch": 0.36,
"grad_norm": 7.186644554138184,
"learning_rate": 2.0210526315789473e-06,
"loss": 0.3359,
"step": 144
},
{
"Batch Mean": 0.14882159233093262,
"accuracy": 0.8359375,
"epoch": 0.36,
"step": 144
},
{
"epoch": 0.3625,
"grad_norm": 7.065666675567627,
"learning_rate": 2.013157894736842e-06,
"loss": 0.3773,
"step": 145
},
{
"Batch Mean": 0.010605573654174805,
"accuracy": 0.8671875,
"epoch": 0.3625,
"step": 145
},
{
"epoch": 0.365,
"grad_norm": 6.343169212341309,
"learning_rate": 2.0052631578947367e-06,
"loss": 0.3441,
"step": 146
},
{
"Batch Mean": 0.13873529434204102,
"accuracy": 0.828125,
"epoch": 0.365,
"step": 146
},
{
"epoch": 0.3675,
"grad_norm": 7.586203098297119,
"learning_rate": 1.9973684210526315e-06,
"loss": 0.3723,
"step": 147
},
{
"Batch Mean": -0.6648737192153931,
"accuracy": 0.7890625,
"epoch": 0.3675,
"step": 147
},
{
"epoch": 0.37,
"grad_norm": 11.366660118103027,
"learning_rate": 1.9894736842105262e-06,
"loss": 0.4372,
"step": 148
},
{
"Batch Mean": -0.3633718490600586,
"accuracy": 0.8046875,
"epoch": 0.37,
"step": 148
},
{
"epoch": 0.3725,
"grad_norm": 7.831110000610352,
"learning_rate": 1.9815789473684214e-06,
"loss": 0.4074,
"step": 149
},
{
"Batch Mean": -0.12644743919372559,
"accuracy": 0.8828125,
"epoch": 0.3725,
"step": 149
},
{
"epoch": 0.375,
"grad_norm": 6.3676371574401855,
"learning_rate": 1.973684210526316e-06,
"loss": 0.304,
"step": 150
},
{
"Batch Mean": 0.36059999465942383,
"accuracy": 0.8125,
"epoch": 0.375,
"step": 150
},
{
"epoch": 0.3775,
"grad_norm": 8.31554889678955,
"learning_rate": 1.965789473684211e-06,
"loss": 0.3811,
"step": 151
},
{
"Batch Mean": 0.3979158401489258,
"accuracy": 0.8125,
"epoch": 0.3775,
"step": 151
},
{
"epoch": 0.38,
"grad_norm": 9.095989227294922,
"learning_rate": 1.9578947368421052e-06,
"loss": 0.4376,
"step": 152
},
{
"Batch Mean": 0.443440318107605,
"accuracy": 0.8359375,
"epoch": 0.38,
"step": 152
},
{
"epoch": 0.3825,
"grad_norm": 8.482221603393555,
"learning_rate": 1.95e-06,
"loss": 0.3351,
"step": 153
},
{
"Batch Mean": 0.3431110382080078,
"accuracy": 0.828125,
"epoch": 0.3825,
"step": 153
},
{
"epoch": 0.385,
"grad_norm": 8.352450370788574,
"learning_rate": 1.9421052631578947e-06,
"loss": 0.3627,
"step": 154
},
{
"Batch Mean": 0.17504891753196716,
"accuracy": 0.8828125,
"epoch": 0.385,
"step": 154
},
{
"epoch": 0.3875,
"grad_norm": 6.995707035064697,
"learning_rate": 1.9342105263157895e-06,
"loss": 0.323,
"step": 155
},
{
"Batch Mean": -0.1753711700439453,
"accuracy": 0.8515625,
"epoch": 0.3875,
"step": 155
},
{
"epoch": 0.39,
"grad_norm": 7.297128677368164,
"learning_rate": 1.926315789473684e-06,
"loss": 0.3426,
"step": 156
},
{
"Batch Mean": -0.17636048793792725,
"accuracy": 0.8515625,
"epoch": 0.39,
"step": 156
},
{
"epoch": 0.3925,
"grad_norm": 6.796414375305176,
"learning_rate": 1.918421052631579e-06,
"loss": 0.3263,
"step": 157
},
{
"Batch Mean": -0.7188338041305542,
"accuracy": 0.875,
"epoch": 0.3925,
"step": 157
},
{
"epoch": 0.395,
"grad_norm": 10.371746063232422,
"learning_rate": 1.9105263157894737e-06,
"loss": 0.3046,
"step": 158
},
{
"Batch Mean": -0.5050258636474609,
"accuracy": 0.8671875,
"epoch": 0.395,
"step": 158
},
{
"epoch": 0.3975,
"grad_norm": 8.59885311126709,
"learning_rate": 1.9026315789473684e-06,
"loss": 0.3257,
"step": 159
},
{
"Batch Mean": 0.15757006406784058,
"accuracy": 0.875,
"epoch": 0.3975,
"step": 159
},
{
"epoch": 0.4,
"grad_norm": 6.61456298828125,
"learning_rate": 1.8947368421052632e-06,
"loss": 0.2748,
"step": 160
},
{
"Batch Mean": 0.4738607406616211,
"accuracy": 0.8359375,
"epoch": 0.4,
"step": 160
},
{
"epoch": 0.4025,
"grad_norm": 9.277711868286133,
"learning_rate": 1.8868421052631577e-06,
"loss": 0.3558,
"step": 161
},
{
"Batch Mean": 0.9098443984985352,
"accuracy": 0.8515625,
"epoch": 0.4025,
"step": 161
},
{
"epoch": 0.405,
"grad_norm": 14.112342834472656,
"learning_rate": 1.8789473684210525e-06,
"loss": 0.3479,
"step": 162
},
{
"Batch Mean": -0.09790003299713135,
"accuracy": 0.8515625,
"epoch": 0.405,
"step": 162
},
{
"epoch": 0.4075,
"grad_norm": 7.988643169403076,
"learning_rate": 1.8710526315789476e-06,
"loss": 0.3329,
"step": 163
},
{
"Batch Mean": -0.3589191436767578,
"accuracy": 0.875,
"epoch": 0.4075,
"step": 163
},
{
"epoch": 0.41,
"grad_norm": 8.374390602111816,
"learning_rate": 1.8631578947368424e-06,
"loss": 0.3404,
"step": 164
},
{
"Batch Mean": -0.2895526885986328,
"accuracy": 0.8671875,
"epoch": 0.41,
"step": 164
},
{
"epoch": 0.4125,
"grad_norm": 10.125029563903809,
"learning_rate": 1.855263157894737e-06,
"loss": 0.3828,
"step": 165
},
{
"Batch Mean": -0.7614960670471191,
"accuracy": 0.8515625,
"epoch": 0.4125,
"step": 165
},
{
"epoch": 0.415,
"grad_norm": 12.611261367797852,
"learning_rate": 1.8473684210526317e-06,
"loss": 0.3182,
"step": 166
},
{
"Batch Mean": 0.047823190689086914,
"accuracy": 0.84375,
"epoch": 0.415,
"step": 166
},
{
"epoch": 0.4175,
"grad_norm": 7.5092644691467285,
"learning_rate": 1.8394736842105264e-06,
"loss": 0.3277,
"step": 167
},
{
"Batch Mean": 0.3487553596496582,
"accuracy": 0.8828125,
"epoch": 0.4175,
"step": 167
},
{
"epoch": 0.42,
"grad_norm": 7.762388229370117,
"learning_rate": 1.8315789473684211e-06,
"loss": 0.3021,
"step": 168
},
{
"Batch Mean": -0.34416115283966064,
"accuracy": 0.7890625,
"epoch": 0.42,
"step": 168
},
{
"epoch": 0.4225,
"grad_norm": 9.338401794433594,
"learning_rate": 1.8236842105263159e-06,
"loss": 0.4031,
"step": 169
},
{
"Batch Mean": 0.013556480407714844,
"accuracy": 0.8515625,
"epoch": 0.4225,
"step": 169
},
{
"epoch": 0.425,
"grad_norm": 8.208139419555664,
"learning_rate": 1.8157894736842106e-06,
"loss": 0.367,
"step": 170
},
{
"Batch Mean": -0.1292668581008911,
"accuracy": 0.8828125,
"epoch": 0.425,
"step": 170
},
{
"epoch": 0.4275,
"grad_norm": 6.575423717498779,
"learning_rate": 1.8078947368421052e-06,
"loss": 0.2778,
"step": 171
},
{
"Batch Mean": -0.5196864604949951,
"accuracy": 0.828125,
"epoch": 0.4275,
"step": 171
},
{
"epoch": 0.43,
"grad_norm": 9.9501953125,
"learning_rate": 1.8e-06,
"loss": 0.388,
"step": 172
},
{
"Batch Mean": -0.10827267169952393,
"accuracy": 0.8515625,
"epoch": 0.43,
"step": 172
},
{
"epoch": 0.4325,
"grad_norm": 6.746122360229492,
"learning_rate": 1.7921052631578947e-06,
"loss": 0.3337,
"step": 173
},
{
"Batch Mean": -0.20117001235485077,
"accuracy": 0.859375,
"epoch": 0.4325,
"step": 173
},
{
"epoch": 0.435,
"grad_norm": 7.591915130615234,
"learning_rate": 1.7842105263157894e-06,
"loss": 0.3576,
"step": 174
},
{
"Batch Mean": 0.4478938579559326,
"accuracy": 0.8125,
"epoch": 0.435,
"step": 174
},
{
"epoch": 0.4375,
"grad_norm": 8.740865707397461,
"learning_rate": 1.7763157894736842e-06,
"loss": 0.3836,
"step": 175
},
{
"Batch Mean": 0.1370692253112793,
"accuracy": 0.8359375,
"epoch": 0.4375,
"step": 175
},
{
"epoch": 0.44,
"grad_norm": 6.480626583099365,
"learning_rate": 1.768421052631579e-06,
"loss": 0.3648,
"step": 176
},
{
"Batch Mean": 0.47721147537231445,
"accuracy": 0.859375,
"epoch": 0.44,
"step": 176
},
{
"epoch": 0.4425,
"grad_norm": 9.018311500549316,
"learning_rate": 1.7605263157894739e-06,
"loss": 0.4128,
"step": 177
},
{
"Batch Mean": 0.13465499877929688,
"accuracy": 0.8671875,
"epoch": 0.4425,
"step": 177
},
{
"epoch": 0.445,
"grad_norm": 6.7764973640441895,
"learning_rate": 1.7526315789473686e-06,
"loss": 0.3124,
"step": 178
},
{
"Batch Mean": 0.25302886962890625,
"accuracy": 0.8671875,
"epoch": 0.445,
"step": 178
},
{
"epoch": 0.4475,
"grad_norm": 7.894146919250488,
"learning_rate": 1.7447368421052633e-06,
"loss": 0.3335,
"step": 179
},
{
"Batch Mean": -0.5794901847839355,
"accuracy": 0.7734375,
"epoch": 0.4475,
"step": 179
},
{
"epoch": 0.45,
"grad_norm": 8.826544761657715,
"learning_rate": 1.736842105263158e-06,
"loss": 0.3982,
"step": 180
},
{
"Batch Mean": 0.16424822807312012,
"accuracy": 0.84375,
"epoch": 0.45,
"step": 180
},
{
"epoch": 0.4525,
"grad_norm": 5.991344451904297,
"learning_rate": 1.7289473684210526e-06,
"loss": 0.3408,
"step": 181
},
{
"Batch Mean": -0.13757801055908203,
"accuracy": 0.8671875,
"epoch": 0.4525,
"step": 181
},
{
"epoch": 0.455,
"grad_norm": 5.649535179138184,
"learning_rate": 1.7210526315789474e-06,
"loss": 0.3081,
"step": 182
},
{
"Batch Mean": 0.13538789749145508,
"accuracy": 0.78125,
"epoch": 0.455,
"step": 182
},
{
"epoch": 0.4575,
"grad_norm": 7.989570140838623,
"learning_rate": 1.7131578947368421e-06,
"loss": 0.4532,
"step": 183
},
{
"Batch Mean": -0.09452962875366211,
"accuracy": 0.796875,
"epoch": 0.4575,
"step": 183
},
{
"epoch": 0.46,
"grad_norm": 6.7350616455078125,
"learning_rate": 1.7052631578947369e-06,
"loss": 0.3937,
"step": 184
},
{
"Batch Mean": -0.19620466232299805,
"accuracy": 0.8046875,
"epoch": 0.46,
"step": 184
},
{
"epoch": 0.4625,
"grad_norm": 32.979637145996094,
"learning_rate": 1.6973684210526316e-06,
"loss": 0.4651,
"step": 185
},
{
"Batch Mean": 0.203094482421875,
"accuracy": 0.8203125,
"epoch": 0.4625,
"step": 185
},
{
"epoch": 0.465,
"grad_norm": 6.455933570861816,
"learning_rate": 1.6894736842105264e-06,
"loss": 0.3405,
"step": 186
},
{
"Batch Mean": 0.39686131477355957,
"accuracy": 0.875,
"epoch": 0.465,
"step": 186
},
{
"epoch": 0.4675,
"grad_norm": 6.76831579208374,
"learning_rate": 1.6815789473684209e-06,
"loss": 0.3486,
"step": 187
},
{
"Batch Mean": -0.008591651916503906,
"accuracy": 0.859375,
"epoch": 0.4675,
"step": 187
},
{
"epoch": 0.47,
"grad_norm": 5.692453861236572,
"learning_rate": 1.6736842105263156e-06,
"loss": 0.3288,
"step": 188
},
{
"Batch Mean": -0.36408185958862305,
"accuracy": 0.8046875,
"epoch": 0.47,
"step": 188
},
{
"epoch": 0.4725,
"grad_norm": 7.711721897125244,
"learning_rate": 1.6657894736842104e-06,
"loss": 0.4075,
"step": 189
},
{
"Batch Mean": -0.4406242370605469,
"accuracy": 0.890625,
"epoch": 0.4725,
"step": 189
},
{
"epoch": 0.475,
"grad_norm": 7.3570756912231445,
"learning_rate": 1.6578947368421056e-06,
"loss": 0.3025,
"step": 190
},
{
"Batch Mean": 0.1726367473602295,
"accuracy": 0.8125,
"epoch": 0.475,
"step": 190
},
{
"epoch": 0.4775,
"grad_norm": 7.980853080749512,
"learning_rate": 1.65e-06,
"loss": 0.3876,
"step": 191
},
{
"Batch Mean": -0.0961066484451294,
"accuracy": 0.90625,
"epoch": 0.4775,
"step": 191
},
{
"epoch": 0.48,
"grad_norm": 5.444026947021484,
"learning_rate": 1.6421052631578948e-06,
"loss": 0.2777,
"step": 192
},
{
"Batch Mean": -0.12086796760559082,
"accuracy": 0.8515625,
"epoch": 0.48,
"step": 192
},
{
"epoch": 0.4825,
"grad_norm": 5.993566989898682,
"learning_rate": 1.6342105263157896e-06,
"loss": 0.3327,
"step": 193
},
{
"Batch Mean": 0.5805015563964844,
"accuracy": 0.84375,
"epoch": 0.4825,
"step": 193
},
{
"epoch": 0.485,
"grad_norm": 9.61883544921875,
"learning_rate": 1.6263157894736843e-06,
"loss": 0.3707,
"step": 194
},
{
"Batch Mean": 0.8935675024986267,
"accuracy": 0.8046875,
"epoch": 0.485,
"step": 194
},
{
"epoch": 0.4875,
"grad_norm": 12.503327369689941,
"learning_rate": 1.618421052631579e-06,
"loss": 0.3951,
"step": 195
},
{
"Batch Mean": 0.2793365716934204,
"accuracy": 0.8046875,
"epoch": 0.4875,
"step": 195
},
{
"epoch": 0.49,
"grad_norm": 7.763684272766113,
"learning_rate": 1.6105263157894738e-06,
"loss": 0.3955,
"step": 196
},
{
"Batch Mean": -0.14763522148132324,
"accuracy": 0.8359375,
"epoch": 0.49,
"step": 196
},
{
"epoch": 0.4925,
"grad_norm": 7.21512508392334,
"learning_rate": 1.6026315789473683e-06,
"loss": 0.3188,
"step": 197
},
{
"Batch Mean": -0.5799511671066284,
"accuracy": 0.8671875,
"epoch": 0.4925,
"step": 197
},
{
"epoch": 0.495,
"grad_norm": 8.71732234954834,
"learning_rate": 1.594736842105263e-06,
"loss": 0.3139,
"step": 198
},
{
"Batch Mean": -0.46822547912597656,
"accuracy": 0.890625,
"epoch": 0.495,
"step": 198
},
{
"epoch": 0.4975,
"grad_norm": 7.613549709320068,
"learning_rate": 1.5868421052631578e-06,
"loss": 0.2978,
"step": 199
},
{
"Batch Mean": -0.370266854763031,
"accuracy": 0.875,
"epoch": 0.4975,
"step": 199
},
{
"epoch": 0.5,
"grad_norm": 7.669687271118164,
"learning_rate": 1.5789473684210526e-06,
"loss": 0.2982,
"step": 200
},
{
"Batch Mean": -0.09551525115966797,
"accuracy": 0.8359375,
"epoch": 0.5,
"step": 200
},
{
"epoch": 0.5025,
"grad_norm": 6.744908332824707,
"learning_rate": 1.5710526315789473e-06,
"loss": 0.3635,
"step": 201
},
{
"Batch Mean": -0.1286020278930664,
"accuracy": 0.90625,
"epoch": 0.5025,
"step": 201
},
{
"epoch": 0.505,
"grad_norm": 5.752479076385498,
"learning_rate": 1.563157894736842e-06,
"loss": 0.2412,
"step": 202
},
{
"Batch Mean": 0.24757051467895508,
"accuracy": 0.890625,
"epoch": 0.505,
"step": 202
},
{
"epoch": 0.5075,
"grad_norm": 6.615243434906006,
"learning_rate": 1.5552631578947368e-06,
"loss": 0.2638,
"step": 203
},
{
"Batch Mean": 0.6454036235809326,
"accuracy": 0.84375,
"epoch": 0.5075,
"step": 203
},
{
"epoch": 0.51,
"grad_norm": 11.68124771118164,
"learning_rate": 1.5473684210526318e-06,
"loss": 0.3812,
"step": 204
},
{
"Batch Mean": 0.674675464630127,
"accuracy": 0.8046875,
"epoch": 0.51,
"step": 204
},
{
"epoch": 0.5125,
"grad_norm": 11.759324073791504,
"learning_rate": 1.5394736842105265e-06,
"loss": 0.3925,
"step": 205
},
{
"Batch Mean": 0.8345117568969727,
"accuracy": 0.7890625,
"epoch": 0.5125,
"step": 205
},
{
"epoch": 0.515,
"grad_norm": 12.204639434814453,
"learning_rate": 1.5315789473684213e-06,
"loss": 0.416,
"step": 206
},
{
"Batch Mean": -0.10074353218078613,
"accuracy": 0.8359375,
"epoch": 0.515,
"step": 206
},
{
"epoch": 0.5175,
"grad_norm": 7.523916721343994,
"learning_rate": 1.5236842105263158e-06,
"loss": 0.3201,
"step": 207
},
{
"Batch Mean": -0.37576836347579956,
"accuracy": 0.8671875,
"epoch": 0.5175,
"step": 207
},
{
"epoch": 0.52,
"grad_norm": 8.584857940673828,
"learning_rate": 1.5157894736842105e-06,
"loss": 0.3001,
"step": 208
},
{
"Batch Mean": 0.14268207550048828,
"accuracy": 0.875,
"epoch": 0.52,
"step": 208
},
{
"epoch": 0.5225,
"grad_norm": 6.892088413238525,
"learning_rate": 1.5078947368421053e-06,
"loss": 0.3227,
"step": 209
},
{
"Batch Mean": -0.4613943099975586,
"accuracy": 0.859375,
"epoch": 0.5225,
"step": 209
},
{
"epoch": 0.525,
"grad_norm": 8.836865425109863,
"learning_rate": 1.5e-06,
"loss": 0.3391,
"step": 210
},
{
"Batch Mean": -0.3657095432281494,
"accuracy": 0.8125,
"epoch": 0.525,
"step": 210
},
{
"epoch": 0.5275,
"grad_norm": 10.453470230102539,
"learning_rate": 1.4921052631578948e-06,
"loss": 0.3723,
"step": 211
},
{
"Batch Mean": -0.2775421142578125,
"accuracy": 0.8203125,
"epoch": 0.5275,
"step": 211
},
{
"epoch": 0.53,
"grad_norm": 8.066608428955078,
"learning_rate": 1.4842105263157895e-06,
"loss": 0.3386,
"step": 212
},
{
"Batch Mean": 0.16974592208862305,
"accuracy": 0.828125,
"epoch": 0.53,
"step": 212
},
{
"epoch": 0.5325,
"grad_norm": 7.698607444763184,
"learning_rate": 1.4763157894736843e-06,
"loss": 0.3777,
"step": 213
},
{
"Batch Mean": 0.3739891052246094,
"accuracy": 0.8515625,
"epoch": 0.5325,
"step": 213
},
{
"epoch": 0.535,
"grad_norm": 9.513444900512695,
"learning_rate": 1.468421052631579e-06,
"loss": 0.3374,
"step": 214
},
{
"Batch Mean": 0.13162872195243835,
"accuracy": 0.828125,
"epoch": 0.535,
"step": 214
},
{
"epoch": 0.5375,
"grad_norm": 7.565595626831055,
"learning_rate": 1.4605263157894738e-06,
"loss": 0.3599,
"step": 215
},
{
"Batch Mean": -0.06913161277770996,
"accuracy": 0.84375,
"epoch": 0.5375,
"step": 215
},
{
"epoch": 0.54,
"grad_norm": 8.069997787475586,
"learning_rate": 1.4526315789473685e-06,
"loss": 0.372,
"step": 216
},
{
"Batch Mean": 0.10277748107910156,
"accuracy": 0.84375,
"epoch": 0.54,
"step": 216
},
{
"epoch": 0.5425,
"grad_norm": 6.85213041305542,
"learning_rate": 1.4447368421052633e-06,
"loss": 0.3318,
"step": 217
},
{
"Batch Mean": 0.10326993465423584,
"accuracy": 0.859375,
"epoch": 0.5425,
"step": 217
},
{
"epoch": 0.545,
"grad_norm": 7.071889877319336,
"learning_rate": 1.436842105263158e-06,
"loss": 0.3079,
"step": 218
},
{
"Batch Mean": -0.24563944339752197,
"accuracy": 0.859375,
"epoch": 0.545,
"step": 218
},
{
"epoch": 0.5475,
"grad_norm": 6.660257816314697,
"learning_rate": 1.4289473684210525e-06,
"loss": 0.3241,
"step": 219
},
{
"Batch Mean": -0.0343480110168457,
"accuracy": 0.890625,
"epoch": 0.5475,
"step": 219
},
{
"epoch": 0.55,
"grad_norm": 6.178592681884766,
"learning_rate": 1.4210526315789473e-06,
"loss": 0.3169,
"step": 220
},
{
"Batch Mean": -0.4012432098388672,
"accuracy": 0.84375,
"epoch": 0.55,
"step": 220
},
{
"epoch": 0.5525,
"grad_norm": 8.061397552490234,
"learning_rate": 1.4131578947368422e-06,
"loss": 0.3649,
"step": 221
},
{
"Batch Mean": 0.20463943481445312,
"accuracy": 0.828125,
"epoch": 0.5525,
"step": 221
},
{
"epoch": 0.555,
"grad_norm": 7.943480491638184,
"learning_rate": 1.405263157894737e-06,
"loss": 0.4093,
"step": 222
},
{
"Batch Mean": 0.2658262252807617,
"accuracy": 0.8203125,
"epoch": 0.555,
"step": 222
},
{
"epoch": 0.5575,
"grad_norm": 7.583705425262451,
"learning_rate": 1.3973684210526317e-06,
"loss": 0.3637,
"step": 223
},
{
"Batch Mean": 0.20087218284606934,
"accuracy": 0.8671875,
"epoch": 0.5575,
"step": 223
},
{
"epoch": 0.56,
"grad_norm": 6.919849395751953,
"learning_rate": 1.3894736842105263e-06,
"loss": 0.2951,
"step": 224
},
{
"Batch Mean": -0.3413984775543213,
"accuracy": 0.84375,
"epoch": 0.56,
"step": 224
},
{
"epoch": 0.5625,
"grad_norm": 8.942002296447754,
"learning_rate": 1.381578947368421e-06,
"loss": 0.3762,
"step": 225
},
{
"Batch Mean": 0.12860620021820068,
"accuracy": 0.8515625,
"epoch": 0.5625,
"step": 225
},
{
"epoch": 0.565,
"grad_norm": 6.127406120300293,
"learning_rate": 1.3736842105263158e-06,
"loss": 0.3079,
"step": 226
},
{
"Batch Mean": 0.04802989959716797,
"accuracy": 0.890625,
"epoch": 0.565,
"step": 226
},
{
"epoch": 0.5675,
"grad_norm": 5.955417156219482,
"learning_rate": 1.3657894736842107e-06,
"loss": 0.2864,
"step": 227
},
{
"Batch Mean": 0.22469806671142578,
"accuracy": 0.796875,
"epoch": 0.5675,
"step": 227
},
{
"epoch": 0.57,
"grad_norm": 8.0281343460083,
"learning_rate": 1.3578947368421055e-06,
"loss": 0.416,
"step": 228
},
{
"Batch Mean": -0.13057279586791992,
"accuracy": 0.8203125,
"epoch": 0.57,
"step": 228
},
{
"epoch": 0.5725,
"grad_norm": 7.234789848327637,
"learning_rate": 1.35e-06,
"loss": 0.3897,
"step": 229
},
{
"Batch Mean": -0.5940704345703125,
"accuracy": 0.875,
"epoch": 0.5725,
"step": 229
},
{
"epoch": 0.575,
"grad_norm": 9.370462417602539,
"learning_rate": 1.3421052631578947e-06,
"loss": 0.3068,
"step": 230
},
{
"Batch Mean": 0.026377856731414795,
"accuracy": 0.796875,
"epoch": 0.575,
"step": 230
},
{
"epoch": 0.5775,
"grad_norm": 8.720721244812012,
"learning_rate": 1.3342105263157895e-06,
"loss": 0.4091,
"step": 231
},
{
"Batch Mean": -0.19702854752540588,
"accuracy": 0.8046875,
"epoch": 0.5775,
"step": 231
},
{
"epoch": 0.58,
"grad_norm": 8.109902381896973,
"learning_rate": 1.3263157894736842e-06,
"loss": 0.3888,
"step": 232
},
{
"Batch Mean": 0.021877527236938477,
"accuracy": 0.8125,
"epoch": 0.58,
"step": 232
},
{
"epoch": 0.5825,
"grad_norm": 6.799435615539551,
"learning_rate": 1.318421052631579e-06,
"loss": 0.3566,
"step": 233
},
{
"Batch Mean": 0.04909229278564453,
"accuracy": 0.875,
"epoch": 0.5825,
"step": 233
},
{
"epoch": 0.585,
"grad_norm": 6.230069160461426,
"learning_rate": 1.3105263157894737e-06,
"loss": 0.2963,
"step": 234
},
{
"Batch Mean": 0.25316929817199707,
"accuracy": 0.8671875,
"epoch": 0.585,
"step": 234
},
{
"epoch": 0.5875,
"grad_norm": 6.59328556060791,
"learning_rate": 1.3026315789473685e-06,
"loss": 0.3138,
"step": 235
},
{
"Batch Mean": 0.4174957275390625,
"accuracy": 0.84375,
"epoch": 0.5875,
"step": 235
},
{
"epoch": 0.59,
"grad_norm": 8.933128356933594,
"learning_rate": 1.2947368421052632e-06,
"loss": 0.3271,
"step": 236
},
{
"Batch Mean": 0.17341864109039307,
"accuracy": 0.8671875,
"epoch": 0.59,
"step": 236
},
{
"epoch": 0.5925,
"grad_norm": 5.806175708770752,
"learning_rate": 1.286842105263158e-06,
"loss": 0.2857,
"step": 237
},
{
"Batch Mean": -0.5694665908813477,
"accuracy": 0.859375,
"epoch": 0.5925,
"step": 237
},
{
"epoch": 0.595,
"grad_norm": 9.185583114624023,
"learning_rate": 1.2789473684210527e-06,
"loss": 0.3015,
"step": 238
},
{
"Batch Mean": 0.07436943054199219,
"accuracy": 0.8359375,
"epoch": 0.595,
"step": 238
},
{
"epoch": 0.5975,
"grad_norm": 6.160710334777832,
"learning_rate": 1.2710526315789474e-06,
"loss": 0.3175,
"step": 239
},
{
"Batch Mean": 0.46900463104248047,
"accuracy": 0.8359375,
"epoch": 0.5975,
"step": 239
},
{
"epoch": 0.6,
"grad_norm": 9.330708503723145,
"learning_rate": 1.263157894736842e-06,
"loss": 0.3361,
"step": 240
},
{
"Batch Mean": -0.2093944549560547,
"accuracy": 0.8359375,
"epoch": 0.6,
"step": 240
},
{
"epoch": 0.6025,
"grad_norm": 7.174565315246582,
"learning_rate": 1.255263157894737e-06,
"loss": 0.3779,
"step": 241
},
{
"Batch Mean": 0.24665546417236328,
"accuracy": 0.875,
"epoch": 0.6025,
"step": 241
},
{
"epoch": 0.605,
"grad_norm": 6.129975318908691,
"learning_rate": 1.2473684210526317e-06,
"loss": 0.2615,
"step": 242
},
{
"Batch Mean": -0.7079062461853027,
"accuracy": 0.859375,
"epoch": 0.605,
"step": 242
},
{
"epoch": 0.6075,
"grad_norm": 10.241421699523926,
"learning_rate": 1.2394736842105264e-06,
"loss": 0.3291,
"step": 243
},
{
"Batch Mean": 0.11448732018470764,
"accuracy": 0.8046875,
"epoch": 0.6075,
"step": 243
},
{
"epoch": 0.61,
"grad_norm": 7.953644275665283,
"learning_rate": 1.2315789473684212e-06,
"loss": 0.4133,
"step": 244
},
{
"Batch Mean": -0.21373581886291504,
"accuracy": 0.8203125,
"epoch": 0.61,
"step": 244
},
{
"epoch": 0.6125,
"grad_norm": 7.578818321228027,
"learning_rate": 1.2236842105263157e-06,
"loss": 0.3893,
"step": 245
},
{
"Batch Mean": -0.2655982971191406,
"accuracy": 0.7890625,
"epoch": 0.6125,
"step": 245
},
{
"epoch": 0.615,
"grad_norm": 10.027579307556152,
"learning_rate": 1.2157894736842105e-06,
"loss": 0.4864,
"step": 246
},
{
"Batch Mean": -0.15862655639648438,
"accuracy": 0.8359375,
"epoch": 0.615,
"step": 246
},
{
"epoch": 0.6175,
"grad_norm": 6.777160167694092,
"learning_rate": 1.2078947368421052e-06,
"loss": 0.3316,
"step": 247
},
{
"Batch Mean": -0.17922639846801758,
"accuracy": 0.8671875,
"epoch": 0.6175,
"step": 247
},
{
"epoch": 0.62,
"grad_norm": 5.9845781326293945,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.3218,
"step": 248
},
{
"Batch Mean": 0.1434774398803711,
"accuracy": 0.78125,
"epoch": 0.62,
"step": 248
},
{
"epoch": 0.6225,
"grad_norm": 7.812133312225342,
"learning_rate": 1.192105263157895e-06,
"loss": 0.4105,
"step": 249
},
{
"Batch Mean": 0.1399485468864441,
"accuracy": 0.7890625,
"epoch": 0.6225,
"step": 249
},
{
"epoch": 0.625,
"grad_norm": 7.285275459289551,
"learning_rate": 1.1842105263157894e-06,
"loss": 0.3875,
"step": 250
},
{
"Batch Mean": 0.07010936737060547,
"accuracy": 0.890625,
"epoch": 0.625,
"step": 250
},
{
"epoch": 0.6275,
"grad_norm": 5.305445194244385,
"learning_rate": 1.1763157894736842e-06,
"loss": 0.2812,
"step": 251
},
{
"Batch Mean": 0.16651153564453125,
"accuracy": 0.828125,
"epoch": 0.6275,
"step": 251
},
{
"epoch": 0.63,
"grad_norm": 6.985728740692139,
"learning_rate": 1.168421052631579e-06,
"loss": 0.3698,
"step": 252
},
{
"Batch Mean": 0.37049341201782227,
"accuracy": 0.8359375,
"epoch": 0.63,
"step": 252
},
{
"epoch": 0.6325,
"grad_norm": 6.882497310638428,
"learning_rate": 1.1605263157894737e-06,
"loss": 0.3487,
"step": 253
},
{
"Batch Mean": 0.052080631256103516,
"accuracy": 0.8984375,
"epoch": 0.6325,
"step": 253
},
{
"epoch": 0.635,
"grad_norm": 5.3604302406311035,
"learning_rate": 1.1526315789473684e-06,
"loss": 0.2884,
"step": 254
},
{
"Batch Mean": -0.018785476684570312,
"accuracy": 0.8046875,
"epoch": 0.635,
"step": 254
},
{
"epoch": 0.6375,
"grad_norm": 7.260810375213623,
"learning_rate": 1.1447368421052632e-06,
"loss": 0.4406,
"step": 255
},
{
"Batch Mean": -0.04567432403564453,
"accuracy": 0.796875,
"epoch": 0.6375,
"step": 255
},
{
"epoch": 0.64,
"grad_norm": 5.7526397705078125,
"learning_rate": 1.136842105263158e-06,
"loss": 0.3769,
"step": 256
},
{
"Batch Mean": -0.09397172927856445,
"accuracy": 0.8515625,
"epoch": 0.64,
"step": 256
},
{
"epoch": 0.6425,
"grad_norm": 7.043140888214111,
"learning_rate": 1.1289473684210527e-06,
"loss": 0.3929,
"step": 257
},
{
"Batch Mean": -0.41216039657592773,
"accuracy": 0.84375,
"epoch": 0.6425,
"step": 257
},
{
"epoch": 0.645,
"grad_norm": 7.224827766418457,
"learning_rate": 1.1210526315789474e-06,
"loss": 0.3323,
"step": 258
},
{
"Batch Mean": -0.5969750881195068,
"accuracy": 0.84375,
"epoch": 0.645,
"step": 258
},
{
"epoch": 0.6475,
"grad_norm": 8.479996681213379,
"learning_rate": 1.1131578947368421e-06,
"loss": 0.355,
"step": 259
},
{
"Batch Mean": -0.06170177459716797,
"accuracy": 0.8671875,
"epoch": 0.6475,
"step": 259
},
{
"epoch": 0.65,
"grad_norm": 5.659763813018799,
"learning_rate": 1.1052631578947369e-06,
"loss": 0.3411,
"step": 260
},
{
"Batch Mean": -0.08865594863891602,
"accuracy": 0.8515625,
"epoch": 0.65,
"step": 260
},
{
"epoch": 0.6525,
"grad_norm": 5.9309773445129395,
"learning_rate": 1.0973684210526316e-06,
"loss": 0.3586,
"step": 261
},
{
"Batch Mean": 0.19340991973876953,
"accuracy": 0.8515625,
"epoch": 0.6525,
"step": 261
},
{
"epoch": 0.655,
"grad_norm": 5.759584426879883,
"learning_rate": 1.0894736842105264e-06,
"loss": 0.3353,
"step": 262
},
{
"Batch Mean": -0.2597666382789612,
"accuracy": 0.84375,
"epoch": 0.655,
"step": 262
},
{
"epoch": 0.6575,
"grad_norm": 7.266964435577393,
"learning_rate": 1.0815789473684211e-06,
"loss": 0.3751,
"step": 263
},
{
"Batch Mean": -0.14556348323822021,
"accuracy": 0.8359375,
"epoch": 0.6575,
"step": 263
},
{
"epoch": 0.66,
"grad_norm": 5.76481819152832,
"learning_rate": 1.0736842105263159e-06,
"loss": 0.3315,
"step": 264
},
{
"Batch Mean": 0.2170419692993164,
"accuracy": 0.859375,
"epoch": 0.66,
"step": 264
},
{
"epoch": 0.6625,
"grad_norm": 6.2871317863464355,
"learning_rate": 1.0657894736842106e-06,
"loss": 0.3095,
"step": 265
},
{
"Batch Mean": 0.6364197731018066,
"accuracy": 0.8203125,
"epoch": 0.6625,
"step": 265
},
{
"epoch": 0.665,
"grad_norm": 10.033836364746094,
"learning_rate": 1.0578947368421052e-06,
"loss": 0.3766,
"step": 266
},
{
"Batch Mean": 0.5460505485534668,
"accuracy": 0.8671875,
"epoch": 0.665,
"step": 266
},
{
"epoch": 0.6675,
"grad_norm": 8.776803970336914,
"learning_rate": 1.05e-06,
"loss": 0.3181,
"step": 267
},
{
"Batch Mean": 0.12220120429992676,
"accuracy": 0.8125,
"epoch": 0.6675,
"step": 267
},
{
"epoch": 0.67,
"grad_norm": 6.963134288787842,
"learning_rate": 1.0421052631578949e-06,
"loss": 0.359,
"step": 268
},
{
"Batch Mean": 0.7201442122459412,
"accuracy": 0.8359375,
"epoch": 0.67,
"step": 268
},
{
"epoch": 0.6725,
"grad_norm": 10.378531455993652,
"learning_rate": 1.0342105263157896e-06,
"loss": 0.344,
"step": 269
},
{
"Batch Mean": 0.3651304244995117,
"accuracy": 0.8359375,
"epoch": 0.6725,
"step": 269
},
{
"epoch": 0.675,
"grad_norm": 8.207369804382324,
"learning_rate": 1.0263157894736843e-06,
"loss": 0.332,
"step": 270
},
{
"Batch Mean": -0.18006157875061035,
"accuracy": 0.8671875,
"epoch": 0.675,
"step": 270
},
{
"epoch": 0.6775,
"grad_norm": 6.360766410827637,
"learning_rate": 1.0184210526315789e-06,
"loss": 0.2975,
"step": 271
},
{
"Batch Mean": -0.18421226739883423,
"accuracy": 0.84375,
"epoch": 0.6775,
"step": 271
},
{
"epoch": 0.68,
"grad_norm": 6.863755702972412,
"learning_rate": 1.0105263157894736e-06,
"loss": 0.2952,
"step": 272
},
{
"Batch Mean": -0.06856060028076172,
"accuracy": 0.8359375,
"epoch": 0.68,
"step": 272
},
{
"epoch": 0.6825,
"grad_norm": 7.211990833282471,
"learning_rate": 1.0026315789473684e-06,
"loss": 0.3316,
"step": 273
},
{
"Batch Mean": -0.47328972816467285,
"accuracy": 0.8125,
"epoch": 0.6825,
"step": 273
},
{
"epoch": 0.685,
"grad_norm": 8.822152137756348,
"learning_rate": 9.947368421052631e-07,
"loss": 0.3734,
"step": 274
},
{
"Batch Mean": -0.08451128005981445,
"accuracy": 0.84375,
"epoch": 0.685,
"step": 274
},
{
"epoch": 0.6875,
"grad_norm": 6.560077667236328,
"learning_rate": 9.86842105263158e-07,
"loss": 0.3236,
"step": 275
},
{
"Batch Mean": -0.2358086109161377,
"accuracy": 0.828125,
"epoch": 0.6875,
"step": 275
},
{
"epoch": 0.69,
"grad_norm": 6.617765426635742,
"learning_rate": 9.789473684210526e-07,
"loss": 0.365,
"step": 276
},
{
"Batch Mean": -0.09725213050842285,
"accuracy": 0.8125,
"epoch": 0.69,
"step": 276
},
{
"epoch": 0.6925,
"grad_norm": 8.159916877746582,
"learning_rate": 9.710526315789474e-07,
"loss": 0.4414,
"step": 277
},
{
"Batch Mean": -0.040779948234558105,
"accuracy": 0.84375,
"epoch": 0.6925,
"step": 277
},
{
"epoch": 0.695,
"grad_norm": 7.725914001464844,
"learning_rate": 9.63157894736842e-07,
"loss": 0.3067,
"step": 278
},
{
"Batch Mean": 0.0323566198348999,
"accuracy": 0.7734375,
"epoch": 0.695,
"step": 278
},
{
"epoch": 0.6975,
"grad_norm": 7.230583667755127,
"learning_rate": 9.552631578947368e-07,
"loss": 0.3767,
"step": 279
},
{
"Batch Mean": -0.07540559768676758,
"accuracy": 0.8515625,
"epoch": 0.6975,
"step": 279
},
{
"epoch": 0.7,
"grad_norm": 7.241883754730225,
"learning_rate": 9.473684210526316e-07,
"loss": 0.2982,
"step": 280
},
{
"Batch Mean": -0.3009473383426666,
"accuracy": 0.828125,
"epoch": 0.7,
"step": 280
},
{
"epoch": 0.7025,
"grad_norm": 8.05111312866211,
"learning_rate": 9.394736842105262e-07,
"loss": 0.3498,
"step": 281
},
{
"Batch Mean": 0.17420291900634766,
"accuracy": 0.859375,
"epoch": 0.7025,
"step": 281
},
{
"epoch": 0.705,
"grad_norm": 7.624542236328125,
"learning_rate": 9.315789473684212e-07,
"loss": 0.3384,
"step": 282
},
{
"Batch Mean": 0.3028104305267334,
"accuracy": 0.8359375,
"epoch": 0.705,
"step": 282
},
{
"epoch": 0.7075,
"grad_norm": 8.634331703186035,
"learning_rate": 9.236842105263158e-07,
"loss": 0.4123,
"step": 283
},
{
"Batch Mean": 0.26581865549087524,
"accuracy": 0.875,
"epoch": 0.7075,
"step": 283
},
{
"epoch": 0.71,
"grad_norm": 7.606550216674805,
"learning_rate": 9.157894736842106e-07,
"loss": 0.2663,
"step": 284
},
{
"Batch Mean": 0.00487518310546875,
"accuracy": 0.890625,
"epoch": 0.71,
"step": 284
},
{
"epoch": 0.7125,
"grad_norm": 5.981740474700928,
"learning_rate": 9.078947368421053e-07,
"loss": 0.2841,
"step": 285
},
{
"Batch Mean": 0.0009503364562988281,
"accuracy": 0.890625,
"epoch": 0.7125,
"step": 285
},
{
"epoch": 0.715,
"grad_norm": 6.947564601898193,
"learning_rate": 9e-07,
"loss": 0.3142,
"step": 286
},
{
"Batch Mean": 0.0575718879699707,
"accuracy": 0.8515625,
"epoch": 0.715,
"step": 286
},
{
"epoch": 0.7175,
"grad_norm": 8.3406343460083,
"learning_rate": 8.921052631578947e-07,
"loss": 0.3367,
"step": 287
},
{
"Batch Mean": -0.19788146018981934,
"accuracy": 0.8203125,
"epoch": 0.7175,
"step": 287
},
{
"epoch": 0.72,
"grad_norm": 8.32129192352295,
"learning_rate": 8.842105263157895e-07,
"loss": 0.3848,
"step": 288
},
{
"Batch Mean": -0.1112222671508789,
"accuracy": 0.875,
"epoch": 0.72,
"step": 288
},
{
"epoch": 0.7225,
"grad_norm": 6.531859397888184,
"learning_rate": 8.763157894736843e-07,
"loss": 0.2641,
"step": 289
},
{
"Batch Mean": -0.2570047974586487,
"accuracy": 0.8671875,
"epoch": 0.7225,
"step": 289
},
{
"epoch": 0.725,
"grad_norm": 7.266209125518799,
"learning_rate": 8.68421052631579e-07,
"loss": 0.3026,
"step": 290
},
{
"Batch Mean": 0.33021998405456543,
"accuracy": 0.84375,
"epoch": 0.725,
"step": 290
},
{
"epoch": 0.7275,
"grad_norm": 7.639843463897705,
"learning_rate": 8.605263157894737e-07,
"loss": 0.3685,
"step": 291
},
{
"Batch Mean": -0.1736621856689453,
"accuracy": 0.84375,
"epoch": 0.7275,
"step": 291
},
{
"epoch": 0.73,
"grad_norm": 9.343413352966309,
"learning_rate": 8.526315789473684e-07,
"loss": 0.3996,
"step": 292
},
{
"Batch Mean": -0.44870805740356445,
"accuracy": 0.8046875,
"epoch": 0.73,
"step": 292
},
{
"epoch": 0.7325,
"grad_norm": 10.543521881103516,
"learning_rate": 8.447368421052632e-07,
"loss": 0.4426,
"step": 293
},
{
"Batch Mean": 0.18045568466186523,
"accuracy": 0.828125,
"epoch": 0.7325,
"step": 293
},
{
"epoch": 0.735,
"grad_norm": 7.400291919708252,
"learning_rate": 8.368421052631578e-07,
"loss": 0.3405,
"step": 294
},
{
"Batch Mean": 0.21915674209594727,
"accuracy": 0.8125,
"epoch": 0.735,
"step": 294
},
{
"epoch": 0.7375,
"grad_norm": 7.723897457122803,
"learning_rate": 8.289473684210528e-07,
"loss": 0.3491,
"step": 295
},
{
"Batch Mean": -0.16765201091766357,
"accuracy": 0.8515625,
"epoch": 0.7375,
"step": 295
},
{
"epoch": 0.74,
"grad_norm": 6.981317043304443,
"learning_rate": 8.210526315789474e-07,
"loss": 0.3368,
"step": 296
},
{
"Batch Mean": 0.18343955278396606,
"accuracy": 0.84375,
"epoch": 0.74,
"step": 296
},
{
"epoch": 0.7425,
"grad_norm": 6.980420112609863,
"learning_rate": 8.131578947368422e-07,
"loss": 0.3735,
"step": 297
},
{
"Batch Mean": -0.08783578872680664,
"accuracy": 0.859375,
"epoch": 0.7425,
"step": 297
},
{
"epoch": 0.745,
"grad_norm": 6.303057670593262,
"learning_rate": 8.052631578947369e-07,
"loss": 0.3213,
"step": 298
},
{
"Batch Mean": 0.05080127716064453,
"accuracy": 0.8515625,
"epoch": 0.745,
"step": 298
},
{
"epoch": 0.7475,
"grad_norm": 6.454366207122803,
"learning_rate": 7.973684210526315e-07,
"loss": 0.331,
"step": 299
},
{
"Batch Mean": 0.1761825978755951,
"accuracy": 0.8359375,
"epoch": 0.7475,
"step": 299
},
{
"epoch": 0.75,
"grad_norm": 6.318911075592041,
"learning_rate": 7.894736842105263e-07,
"loss": 0.3321,
"step": 300
},
{
"Batch Mean": 0.34948229789733887,
"accuracy": 0.890625,
"epoch": 0.75,
"step": 300
},
{
"epoch": 0.7525,
"grad_norm": 7.687553405761719,
"learning_rate": 7.81578947368421e-07,
"loss": 0.2939,
"step": 301
},
{
"Batch Mean": -0.3907508850097656,
"accuracy": 0.859375,
"epoch": 0.7525,
"step": 301
},
{
"epoch": 0.755,
"grad_norm": 7.531171798706055,
"learning_rate": 7.736842105263159e-07,
"loss": 0.3419,
"step": 302
},
{
"Batch Mean": -0.1733400821685791,
"accuracy": 0.84375,
"epoch": 0.755,
"step": 302
},
{
"epoch": 0.7575,
"grad_norm": 7.942507266998291,
"learning_rate": 7.657894736842106e-07,
"loss": 0.4089,
"step": 303
},
{
"Batch Mean": -0.12437725067138672,
"accuracy": 0.8671875,
"epoch": 0.7575,
"step": 303
},
{
"epoch": 0.76,
"grad_norm": 6.244892120361328,
"learning_rate": 7.578947368421053e-07,
"loss": 0.2945,
"step": 304
},
{
"Batch Mean": 0.434220552444458,
"accuracy": 0.8828125,
"epoch": 0.76,
"step": 304
},
{
"epoch": 0.7625,
"grad_norm": 8.639275550842285,
"learning_rate": 7.5e-07,
"loss": 0.2779,
"step": 305
},
{
"Batch Mean": -0.026009559631347656,
"accuracy": 0.796875,
"epoch": 0.7625,
"step": 305
},
{
"epoch": 0.765,
"grad_norm": 7.740335464477539,
"learning_rate": 7.421052631578948e-07,
"loss": 0.4149,
"step": 306
},
{
"Batch Mean": 0.5953760147094727,
"accuracy": 0.7734375,
"epoch": 0.765,
"step": 306
},
{
"epoch": 0.7675,
"grad_norm": 12.056532859802246,
"learning_rate": 7.342105263157895e-07,
"loss": 0.487,
"step": 307
},
{
"Batch Mean": 0.028321504592895508,
"accuracy": 0.828125,
"epoch": 0.7675,
"step": 307
},
{
"epoch": 0.77,
"grad_norm": 7.377532005310059,
"learning_rate": 7.263157894736843e-07,
"loss": 0.3745,
"step": 308
},
{
"Batch Mean": 0.06304192543029785,
"accuracy": 0.890625,
"epoch": 0.77,
"step": 308
},
{
"epoch": 0.7725,
"grad_norm": 6.292819976806641,
"learning_rate": 7.18421052631579e-07,
"loss": 0.3162,
"step": 309
},
{
"Batch Mean": 0.032994747161865234,
"accuracy": 0.84375,
"epoch": 0.7725,
"step": 309
},
{
"epoch": 0.775,
"grad_norm": 6.667322158813477,
"learning_rate": 7.105263157894736e-07,
"loss": 0.3195,
"step": 310
},
{
"Batch Mean": 0.4151010513305664,
"accuracy": 0.875,
"epoch": 0.775,
"step": 310
},
{
"epoch": 0.7775,
"grad_norm": 7.714920997619629,
"learning_rate": 7.026315789473685e-07,
"loss": 0.3024,
"step": 311
},
{
"Batch Mean": -0.24331188201904297,
"accuracy": 0.8046875,
"epoch": 0.7775,
"step": 311
},
{
"epoch": 0.78,
"grad_norm": 9.307907104492188,
"learning_rate": 6.947368421052631e-07,
"loss": 0.3824,
"step": 312
},
{
"Batch Mean": -0.26612648367881775,
"accuracy": 0.828125,
"epoch": 0.78,
"step": 312
},
{
"epoch": 0.7825,
"grad_norm": 7.5575666427612305,
"learning_rate": 6.868421052631579e-07,
"loss": 0.3605,
"step": 313
},
{
"Batch Mean": -0.006037712097167969,
"accuracy": 0.8828125,
"epoch": 0.7825,
"step": 313
},
{
"epoch": 0.785,
"grad_norm": 5.356151103973389,
"learning_rate": 6.789473684210527e-07,
"loss": 0.2806,
"step": 314
},
{
"Batch Mean": -0.2599564790725708,
"accuracy": 0.859375,
"epoch": 0.785,
"step": 314
},
{
"epoch": 0.7875,
"grad_norm": 7.695684432983398,
"learning_rate": 6.710526315789474e-07,
"loss": 0.3637,
"step": 315
},
{
"Batch Mean": -0.11902396380901337,
"accuracy": 0.8515625,
"epoch": 0.7875,
"step": 315
},
{
"epoch": 0.79,
"grad_norm": 7.382601261138916,
"learning_rate": 6.631578947368421e-07,
"loss": 0.3501,
"step": 316
},
{
"Batch Mean": -0.0383458137512207,
"accuracy": 0.875,
"epoch": 0.79,
"step": 316
},
{
"epoch": 0.7925,
"grad_norm": 5.960835933685303,
"learning_rate": 6.552631578947369e-07,
"loss": 0.2952,
"step": 317
},
{
"Batch Mean": 0.10288763046264648,
"accuracy": 0.8515625,
"epoch": 0.7925,
"step": 317
},
{
"epoch": 0.795,
"grad_norm": 7.2606377601623535,
"learning_rate": 6.473684210526316e-07,
"loss": 0.3694,
"step": 318
},
{
"Batch Mean": -0.07616853713989258,
"accuracy": 0.875,
"epoch": 0.795,
"step": 318
},
{
"epoch": 0.7975,
"grad_norm": 6.600726127624512,
"learning_rate": 6.394736842105264e-07,
"loss": 0.334,
"step": 319
},
{
"Batch Mean": 0.07948935031890869,
"accuracy": 0.8515625,
"epoch": 0.7975,
"step": 319
},
{
"epoch": 0.8,
"grad_norm": 6.849763870239258,
"learning_rate": 6.31578947368421e-07,
"loss": 0.3302,
"step": 320
},
{
"Batch Mean": 0.2501535415649414,
"accuracy": 0.7890625,
"epoch": 0.8,
"step": 320
},
{
"epoch": 0.8025,
"grad_norm": 8.018654823303223,
"learning_rate": 6.236842105263158e-07,
"loss": 0.3944,
"step": 321
},
{
"Batch Mean": 0.3222217559814453,
"accuracy": 0.8671875,
"epoch": 0.8025,
"step": 321
},
{
"epoch": 0.805,
"grad_norm": 7.5475568771362305,
"learning_rate": 6.157894736842106e-07,
"loss": 0.3073,
"step": 322
},
{
"Batch Mean": 0.09771537780761719,
"accuracy": 0.8515625,
"epoch": 0.805,
"step": 322
},
{
"epoch": 0.8075,
"grad_norm": 7.379605770111084,
"learning_rate": 6.078947368421052e-07,
"loss": 0.3698,
"step": 323
},
{
"Batch Mean": -0.3487367630004883,
"accuracy": 0.8671875,
"epoch": 0.8075,
"step": 323
},
{
"epoch": 0.81,
"grad_norm": 11.469392776489258,
"learning_rate": 6.000000000000001e-07,
"loss": 0.3105,
"step": 324
},
{
"Batch Mean": -0.1531437635421753,
"accuracy": 0.921875,
"epoch": 0.81,
"step": 324
},
{
"epoch": 0.8125,
"grad_norm": 6.025749683380127,
"learning_rate": 5.921052631578947e-07,
"loss": 0.2459,
"step": 325
},
{
"Batch Mean": 0.16370129585266113,
"accuracy": 0.8515625,
"epoch": 0.8125,
"step": 325
},
{
"epoch": 0.815,
"grad_norm": 7.14030122756958,
"learning_rate": 5.842105263157895e-07,
"loss": 0.3646,
"step": 326
},
{
"Batch Mean": 0.16911697387695312,
"accuracy": 0.8515625,
"epoch": 0.815,
"step": 326
},
{
"epoch": 0.8175,
"grad_norm": 6.381271839141846,
"learning_rate": 5.763157894736842e-07,
"loss": 0.295,
"step": 327
},
{
"Batch Mean": -0.05367541313171387,
"accuracy": 0.828125,
"epoch": 0.8175,
"step": 327
},
{
"epoch": 0.82,
"grad_norm": 6.9001569747924805,
"learning_rate": 5.68421052631579e-07,
"loss": 0.3245,
"step": 328
},
{
"Batch Mean": -0.2840130031108856,
"accuracy": 0.875,
"epoch": 0.82,
"step": 328
},
{
"epoch": 0.8225,
"grad_norm": 7.5845136642456055,
"learning_rate": 5.605263157894737e-07,
"loss": 0.3066,
"step": 329
},
{
"Batch Mean": -0.1148611381649971,
"accuracy": 0.875,
"epoch": 0.8225,
"step": 329
},
{
"epoch": 0.825,
"grad_norm": 7.100255966186523,
"learning_rate": 5.526315789473684e-07,
"loss": 0.3135,
"step": 330
},
{
"Batch Mean": -0.19996225833892822,
"accuracy": 0.8515625,
"epoch": 0.825,
"step": 330
},
{
"epoch": 0.8275,
"grad_norm": 7.477622032165527,
"learning_rate": 5.447368421052632e-07,
"loss": 0.352,
"step": 331
},
{
"Batch Mean": 0.09491991996765137,
"accuracy": 0.8515625,
"epoch": 0.8275,
"step": 331
},
{
"epoch": 0.83,
"grad_norm": 6.584697723388672,
"learning_rate": 5.368421052631579e-07,
"loss": 0.3199,
"step": 332
},
{
"Batch Mean": 0.35883140563964844,
"accuracy": 0.765625,
"epoch": 0.83,
"step": 332
},
{
"epoch": 0.8325,
"grad_norm": 9.798020362854004,
"learning_rate": 5.289473684210526e-07,
"loss": 0.4704,
"step": 333
},
{
"Batch Mean": 0.11436104774475098,
"accuracy": 0.859375,
"epoch": 0.8325,
"step": 333
},
{
"epoch": 0.835,
"grad_norm": 6.903437614440918,
"learning_rate": 5.210526315789474e-07,
"loss": 0.3307,
"step": 334
},
{
"Batch Mean": -0.03193950653076172,
"accuracy": 0.84375,
"epoch": 0.835,
"step": 334
},
{
"epoch": 0.8375,
"grad_norm": 7.264538288116455,
"learning_rate": 5.131578947368422e-07,
"loss": 0.3143,
"step": 335
},
{
"Batch Mean": -0.10847663879394531,
"accuracy": 0.84375,
"epoch": 0.8375,
"step": 335
},
{
"epoch": 0.84,
"grad_norm": 7.623288154602051,
"learning_rate": 5.052631578947368e-07,
"loss": 0.3444,
"step": 336
},
{
"Batch Mean": -0.24753522872924805,
"accuracy": 0.8828125,
"epoch": 0.84,
"step": 336
},
{
"epoch": 0.8425,
"grad_norm": 7.630964279174805,
"learning_rate": 4.973684210526316e-07,
"loss": 0.2916,
"step": 337
},
{
"Batch Mean": -0.07360517978668213,
"accuracy": 0.875,
"epoch": 0.8425,
"step": 337
},
{
"epoch": 0.845,
"grad_norm": 6.032020568847656,
"learning_rate": 4.894736842105263e-07,
"loss": 0.2865,
"step": 338
},
{
"Batch Mean": 0.24509429931640625,
"accuracy": 0.8515625,
"epoch": 0.845,
"step": 338
},
{
"epoch": 0.8475,
"grad_norm": 8.249316215515137,
"learning_rate": 4.81578947368421e-07,
"loss": 0.3729,
"step": 339
},
{
"Batch Mean": 0.5386490821838379,
"accuracy": 0.9140625,
"epoch": 0.8475,
"step": 339
},
{
"epoch": 0.85,
"grad_norm": 9.616825103759766,
"learning_rate": 4.736842105263158e-07,
"loss": 0.318,
"step": 340
},
{
"Batch Mean": 0.5909371376037598,
"accuracy": 0.8828125,
"epoch": 0.85,
"step": 340
},
{
"epoch": 0.8525,
"grad_norm": 9.413528442382812,
"learning_rate": 4.657894736842106e-07,
"loss": 0.3229,
"step": 341
},
{
"Batch Mean": -0.1252138614654541,
"accuracy": 0.921875,
"epoch": 0.8525,
"step": 341
},
{
"epoch": 0.855,
"grad_norm": 6.636141300201416,
"learning_rate": 4.578947368421053e-07,
"loss": 0.2011,
"step": 342
},
{
"Batch Mean": 0.18854296207427979,
"accuracy": 0.796875,
"epoch": 0.855,
"step": 342
},
{
"epoch": 0.8575,
"grad_norm": 8.591415405273438,
"learning_rate": 4.5e-07,
"loss": 0.3862,
"step": 343
},
{
"Batch Mean": 0.22980284690856934,
"accuracy": 0.828125,
"epoch": 0.8575,
"step": 343
},
{
"epoch": 0.86,
"grad_norm": 8.253018379211426,
"learning_rate": 4.421052631578947e-07,
"loss": 0.3537,
"step": 344
},
{
"Batch Mean": -0.2752387523651123,
"accuracy": 0.8828125,
"epoch": 0.86,
"step": 344
},
{
"epoch": 0.8625,
"grad_norm": 7.230133056640625,
"learning_rate": 4.342105263157895e-07,
"loss": 0.319,
"step": 345
},
{
"Batch Mean": -0.3212716579437256,
"accuracy": 0.8359375,
"epoch": 0.8625,
"step": 345
},
{
"epoch": 0.865,
"grad_norm": 8.605917930603027,
"learning_rate": 4.263157894736842e-07,
"loss": 0.3298,
"step": 346
},
{
"Batch Mean": 0.09139060974121094,
"accuracy": 0.75,
"epoch": 0.865,
"step": 346
},
{
"epoch": 0.8675,
"grad_norm": 11.82695198059082,
"learning_rate": 4.184210526315789e-07,
"loss": 0.4994,
"step": 347
},
{
"Batch Mean": -0.46064186096191406,
"accuracy": 0.8515625,
"epoch": 0.8675,
"step": 347
},
{
"epoch": 0.87,
"grad_norm": 9.056855201721191,
"learning_rate": 4.105263157894737e-07,
"loss": 0.3506,
"step": 348
},
{
"Batch Mean": -0.42308902740478516,
"accuracy": 0.875,
"epoch": 0.87,
"step": 348
},
{
"epoch": 0.8725,
"grad_norm": 7.866572380065918,
"learning_rate": 4.0263157894736845e-07,
"loss": 0.3114,
"step": 349
},
{
"Batch Mean": -0.3634154796600342,
"accuracy": 0.8515625,
"epoch": 0.8725,
"step": 349
},
{
"epoch": 0.875,
"grad_norm": 8.071781158447266,
"learning_rate": 3.9473684210526315e-07,
"loss": 0.3245,
"step": 350
},
{
"Batch Mean": -0.41735923290252686,
"accuracy": 0.859375,
"epoch": 0.875,
"step": 350
},
{
"epoch": 0.8775,
"grad_norm": 8.487600326538086,
"learning_rate": 3.8684210526315794e-07,
"loss": 0.3517,
"step": 351
},
{
"Batch Mean": -0.26699429750442505,
"accuracy": 0.8359375,
"epoch": 0.8775,
"step": 351
},
{
"epoch": 0.88,
"grad_norm": 7.854258060455322,
"learning_rate": 3.7894736842105264e-07,
"loss": 0.367,
"step": 352
},
{
"Batch Mean": -0.1109018325805664,
"accuracy": 0.828125,
"epoch": 0.88,
"step": 352
},
{
"epoch": 0.8825,
"grad_norm": 7.181438446044922,
"learning_rate": 3.710526315789474e-07,
"loss": 0.3465,
"step": 353
},
{
"Batch Mean": -0.2739065885543823,
"accuracy": 0.828125,
"epoch": 0.8825,
"step": 353
},
{
"epoch": 0.885,
"grad_norm": 9.101792335510254,
"learning_rate": 3.6315789473684213e-07,
"loss": 0.3449,
"step": 354
},
{
"Batch Mean": 0.4022037982940674,
"accuracy": 0.875,
"epoch": 0.885,
"step": 354
},
{
"epoch": 0.8875,
"grad_norm": 7.463392734527588,
"learning_rate": 3.552631578947368e-07,
"loss": 0.2631,
"step": 355
},
{
"Batch Mean": 0.23499655723571777,
"accuracy": 0.8125,
"epoch": 0.8875,
"step": 355
},
{
"epoch": 0.89,
"grad_norm": 7.035387992858887,
"learning_rate": 3.4736842105263157e-07,
"loss": 0.315,
"step": 356
},
{
"Batch Mean": -0.2142975926399231,
"accuracy": 0.7890625,
"epoch": 0.89,
"step": 356
},
{
"epoch": 0.8925,
"grad_norm": 8.919984817504883,
"learning_rate": 3.3947368421052636e-07,
"loss": 0.4086,
"step": 357
},
{
"Batch Mean": 0.031181931495666504,
"accuracy": 0.8203125,
"epoch": 0.8925,
"step": 357
},
{
"epoch": 0.895,
"grad_norm": 6.83364200592041,
"learning_rate": 3.3157894736842106e-07,
"loss": 0.3191,
"step": 358
},
{
"Batch Mean": 0.49400806427001953,
"accuracy": 0.8203125,
"epoch": 0.895,
"step": 358
},
{
"epoch": 0.8975,
"grad_norm": 9.162834167480469,
"learning_rate": 3.236842105263158e-07,
"loss": 0.3732,
"step": 359
},
{
"Batch Mean": 0.2738938331604004,
"accuracy": 0.8828125,
"epoch": 0.8975,
"step": 359
},
{
"epoch": 0.9,
"grad_norm": 6.925217151641846,
"learning_rate": 3.157894736842105e-07,
"loss": 0.2407,
"step": 360
},
{
"Batch Mean": 0.13143014907836914,
"accuracy": 0.9140625,
"epoch": 0.9,
"step": 360
},
{
"epoch": 0.9025,
"grad_norm": 6.340146541595459,
"learning_rate": 3.078947368421053e-07,
"loss": 0.2507,
"step": 361
},
{
"Batch Mean": 0.4825480580329895,
"accuracy": 0.8046875,
"epoch": 0.9025,
"step": 361
},
{
"epoch": 0.905,
"grad_norm": 10.098735809326172,
"learning_rate": 3.0000000000000004e-07,
"loss": 0.37,
"step": 362
},
{
"Batch Mean": 0.3404097557067871,
"accuracy": 0.90625,
"epoch": 0.905,
"step": 362
},
{
"epoch": 0.9075,
"grad_norm": 7.419557571411133,
"learning_rate": 2.9210526315789473e-07,
"loss": 0.2443,
"step": 363
},
{
"Batch Mean": 0.16706562042236328,
"accuracy": 0.828125,
"epoch": 0.9075,
"step": 363
},
{
"epoch": 0.91,
"grad_norm": 6.64844274520874,
"learning_rate": 2.842105263157895e-07,
"loss": 0.3675,
"step": 364
},
{
"Batch Mean": 0.310616135597229,
"accuracy": 0.8203125,
"epoch": 0.91,
"step": 364
},
{
"epoch": 0.9125,
"grad_norm": 7.948777198791504,
"learning_rate": 2.763157894736842e-07,
"loss": 0.3647,
"step": 365
},
{
"Batch Mean": 0.23025989532470703,
"accuracy": 0.828125,
"epoch": 0.9125,
"step": 365
},
{
"epoch": 0.915,
"grad_norm": 7.509435176849365,
"learning_rate": 2.6842105263157897e-07,
"loss": 0.3541,
"step": 366
},
{
"Batch Mean": -0.10943818092346191,
"accuracy": 0.8359375,
"epoch": 0.915,
"step": 366
},
{
"epoch": 0.9175,
"grad_norm": 6.719493389129639,
"learning_rate": 2.605263157894737e-07,
"loss": 0.3256,
"step": 367
},
{
"Batch Mean": -0.17235839366912842,
"accuracy": 0.8515625,
"epoch": 0.9175,
"step": 367
},
{
"epoch": 0.92,
"grad_norm": 6.673943519592285,
"learning_rate": 2.526315789473684e-07,
"loss": 0.2851,
"step": 368
},
{
"Batch Mean": -0.15262317657470703,
"accuracy": 0.875,
"epoch": 0.92,
"step": 368
},
{
"epoch": 0.9225,
"grad_norm": 7.30422830581665,
"learning_rate": 2.4473684210526315e-07,
"loss": 0.3468,
"step": 369
},
{
"Batch Mean": 0.043369293212890625,
"accuracy": 0.8125,
"epoch": 0.9225,
"step": 369
},
{
"epoch": 0.925,
"grad_norm": 7.191950798034668,
"learning_rate": 2.368421052631579e-07,
"loss": 0.341,
"step": 370
},
{
"Batch Mean": -0.17032605409622192,
"accuracy": 0.875,
"epoch": 0.925,
"step": 370
},
{
"epoch": 0.9275,
"grad_norm": 7.00750732421875,
"learning_rate": 2.2894736842105264e-07,
"loss": 0.32,
"step": 371
},
{
"Batch Mean": -0.1574878692626953,
"accuracy": 0.8515625,
"epoch": 0.9275,
"step": 371
},
{
"epoch": 0.93,
"grad_norm": 7.300047397613525,
"learning_rate": 2.2105263157894736e-07,
"loss": 0.3129,
"step": 372
},
{
"Batch Mean": -0.23789095878601074,
"accuracy": 0.8671875,
"epoch": 0.93,
"step": 372
},
{
"epoch": 0.9325,
"grad_norm": 7.296698093414307,
"learning_rate": 2.131578947368421e-07,
"loss": 0.3159,
"step": 373
},
{
"Batch Mean": -0.10532283782958984,
"accuracy": 0.8359375,
"epoch": 0.9325,
"step": 373
},
{
"epoch": 0.935,
"grad_norm": 7.050195693969727,
"learning_rate": 2.0526315789473685e-07,
"loss": 0.3718,
"step": 374
},
{
"Batch Mean": -0.22361278533935547,
"accuracy": 0.8984375,
"epoch": 0.935,
"step": 374
},
{
"epoch": 0.9375,
"grad_norm": 6.591440677642822,
"learning_rate": 1.9736842105263157e-07,
"loss": 0.2514,
"step": 375
},
{
"Batch Mean": 0.25964975357055664,
"accuracy": 0.8515625,
"epoch": 0.9375,
"step": 375
},
{
"epoch": 0.94,
"grad_norm": 7.184764385223389,
"learning_rate": 1.8947368421052632e-07,
"loss": 0.3271,
"step": 376
},
{
"Batch Mean": -0.29873597621917725,
"accuracy": 0.8828125,
"epoch": 0.94,
"step": 376
},
{
"epoch": 0.9425,
"grad_norm": 7.436588287353516,
"learning_rate": 1.8157894736842106e-07,
"loss": 0.3299,
"step": 377
},
{
"Batch Mean": -0.10654175281524658,
"accuracy": 0.8359375,
"epoch": 0.9425,
"step": 377
},
{
"epoch": 0.945,
"grad_norm": 6.680266380310059,
"learning_rate": 1.7368421052631578e-07,
"loss": 0.3424,
"step": 378
},
{
"Batch Mean": 0.13961675763130188,
"accuracy": 0.8046875,
"epoch": 0.945,
"step": 378
},
{
"epoch": 0.9475,
"grad_norm": 7.461141586303711,
"learning_rate": 1.6578947368421053e-07,
"loss": 0.3845,
"step": 379
},
{
"Batch Mean": 0.029703140258789062,
"accuracy": 0.8671875,
"epoch": 0.9475,
"step": 379
},
{
"epoch": 0.95,
"grad_norm": 6.326377868652344,
"learning_rate": 1.5789473684210525e-07,
"loss": 0.3241,
"step": 380
},
{
"Batch Mean": -0.23638677597045898,
"accuracy": 0.828125,
"epoch": 0.95,
"step": 380
},
{
"epoch": 0.9525,
"grad_norm": 8.736058235168457,
"learning_rate": 1.5000000000000002e-07,
"loss": 0.4079,
"step": 381
},
{
"Batch Mean": -0.24971461296081543,
"accuracy": 0.8515625,
"epoch": 0.9525,
"step": 381
},
{
"epoch": 0.955,
"grad_norm": 7.035191059112549,
"learning_rate": 1.4210526315789474e-07,
"loss": 0.2893,
"step": 382
},
{
"Batch Mean": -0.006674528121948242,
"accuracy": 0.875,
"epoch": 0.955,
"step": 382
},
{
"epoch": 0.9575,
"grad_norm": 6.71524715423584,
"learning_rate": 1.3421052631578948e-07,
"loss": 0.336,
"step": 383
},
{
"Batch Mean": -0.20042133331298828,
"accuracy": 0.875,
"epoch": 0.9575,
"step": 383
},
{
"epoch": 0.96,
"grad_norm": 7.782573223114014,
"learning_rate": 1.263157894736842e-07,
"loss": 0.3011,
"step": 384
},
{
"Batch Mean": -0.04409456253051758,
"accuracy": 0.8359375,
"epoch": 0.96,
"step": 384
},
{
"epoch": 0.9625,
"grad_norm": 6.905241012573242,
"learning_rate": 1.1842105263157895e-07,
"loss": 0.3674,
"step": 385
},
{
"Batch Mean": -0.38535165786743164,
"accuracy": 0.875,
"epoch": 0.9625,
"step": 385
},
{
"epoch": 0.965,
"grad_norm": 8.010607719421387,
"learning_rate": 1.1052631578947368e-07,
"loss": 0.2929,
"step": 386
},
{
"Batch Mean": 0.23045122623443604,
"accuracy": 0.8984375,
"epoch": 0.965,
"step": 386
},
{
"epoch": 0.9675,
"grad_norm": 6.622053623199463,
"learning_rate": 1.0263157894736843e-07,
"loss": 0.2811,
"step": 387
},
{
"Batch Mean": 0.2735708951950073,
"accuracy": 0.921875,
"epoch": 0.9675,
"step": 387
},
{
"epoch": 0.97,
"grad_norm": 6.48416805267334,
"learning_rate": 9.473684210526316e-08,
"loss": 0.2411,
"step": 388
},
{
"Batch Mean": -0.03578150272369385,
"accuracy": 0.8828125,
"epoch": 0.97,
"step": 388
},
{
"epoch": 0.9725,
"grad_norm": 6.5772833824157715,
"learning_rate": 8.684210526315789e-08,
"loss": 0.3403,
"step": 389
},
{
"Batch Mean": 0.15822243690490723,
"accuracy": 0.8671875,
"epoch": 0.9725,
"step": 389
},
{
"epoch": 0.975,
"grad_norm": 7.388429164886475,
"learning_rate": 7.894736842105262e-08,
"loss": 0.337,
"step": 390
},
{
"Batch Mean": 0.3468554615974426,
"accuracy": 0.859375,
"epoch": 0.975,
"step": 390
},
{
"epoch": 0.9775,
"grad_norm": 7.855053424835205,
"learning_rate": 7.105263157894737e-08,
"loss": 0.3561,
"step": 391
},
{
"Batch Mean": 0.1489858627319336,
"accuracy": 0.8125,
"epoch": 0.9775,
"step": 391
},
{
"epoch": 0.98,
"grad_norm": 8.02416706085205,
"learning_rate": 6.31578947368421e-08,
"loss": 0.39,
"step": 392
},
{
"Batch Mean": 0.35792016983032227,
"accuracy": 0.8828125,
"epoch": 0.98,
"step": 392
},
{
"epoch": 0.9825,
"grad_norm": 7.255843639373779,
"learning_rate": 5.526315789473684e-08,
"loss": 0.2829,
"step": 393
},
{
"Batch Mean": 0.0024873018264770508,
"accuracy": 0.8125,
"epoch": 0.9825,
"step": 393
},
{
"epoch": 0.985,
"grad_norm": 9.95192813873291,
"learning_rate": 4.736842105263158e-08,
"loss": 0.4514,
"step": 394
},
{
"Batch Mean": 0.41193270683288574,
"accuracy": 0.84375,
"epoch": 0.985,
"step": 394
},
{
"epoch": 0.9875,
"grad_norm": 9.13137435913086,
"learning_rate": 3.947368421052631e-08,
"loss": 0.3886,
"step": 395
},
{
"Batch Mean": 0.013708412647247314,
"accuracy": 0.875,
"epoch": 0.9875,
"step": 395
},
{
"epoch": 0.99,
"grad_norm": 6.254482269287109,
"learning_rate": 3.157894736842105e-08,
"loss": 0.2909,
"step": 396
},
{
"Batch Mean": -0.15633320808410645,
"accuracy": 0.859375,
"epoch": 0.99,
"step": 396
},
{
"epoch": 0.9925,
"grad_norm": 8.189301490783691,
"learning_rate": 2.368421052631579e-08,
"loss": 0.3179,
"step": 397
},
{
"Batch Mean": -0.004555225372314453,
"accuracy": 0.8359375,
"epoch": 0.9925,
"step": 397
},
{
"epoch": 0.995,
"grad_norm": 6.529176235198975,
"learning_rate": 1.5789473684210525e-08,
"loss": 0.349,
"step": 398
},
{
"Batch Mean": -0.15039438009262085,
"accuracy": 0.8125,
"epoch": 0.995,
"step": 398
},
{
"epoch": 0.9975,
"grad_norm": 8.09438419342041,
"learning_rate": 7.894736842105263e-09,
"loss": 0.4229,
"step": 399
},
{
"Batch Mean": 0.07588425278663635,
"accuracy": 0.8125,
"epoch": 0.9975,
"step": 399
},
{
"epoch": 1.0,
"grad_norm": 7.74286413192749,
"learning_rate": 0.0,
"loss": 0.3823,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}