Undi95's picture
Upload folder using huggingface_hub
7460a81
{
"best_metric": 0.8154956698417664,
"best_model_checkpoint": "./lora-out/checkpoint-704",
"epoch": 1.0,
"eval_steps": 176,
"global_step": 704,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2e-05,
"loss": 1.0748,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 1.1153677701950073,
"eval_runtime": 59.6311,
"eval_samples_per_second": 6.708,
"eval_steps_per_second": 3.354,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 1.1247,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 6e-05,
"loss": 1.1111,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 8e-05,
"loss": 1.0777,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 0.0001,
"loss": 1.033,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 0.00012,
"loss": 1.0021,
"step": 6
},
{
"epoch": 0.01,
"learning_rate": 0.00014,
"loss": 1.0017,
"step": 7
},
{
"epoch": 0.01,
"learning_rate": 0.00016,
"loss": 1.0205,
"step": 8
},
{
"epoch": 0.01,
"learning_rate": 0.00018,
"loss": 0.95,
"step": 9
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 1.0236,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 0.00019999974750358046,
"loss": 0.9656,
"step": 11
},
{
"epoch": 0.02,
"learning_rate": 0.00019999899001559682,
"loss": 0.9769,
"step": 12
},
{
"epoch": 0.02,
"learning_rate": 0.00019999772753987444,
"loss": 0.984,
"step": 13
},
{
"epoch": 0.02,
"learning_rate": 0.0001999959600827887,
"loss": 0.9227,
"step": 14
},
{
"epoch": 0.02,
"learning_rate": 0.0001999936876532651,
"loss": 0.9342,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 0.00019999091026277928,
"loss": 0.991,
"step": 16
},
{
"epoch": 0.02,
"learning_rate": 0.00019998762792535683,
"loss": 0.9632,
"step": 17
},
{
"epoch": 0.03,
"learning_rate": 0.00019998384065757335,
"loss": 0.9574,
"step": 18
},
{
"epoch": 0.03,
"learning_rate": 0.00019997954847855427,
"loss": 0.9778,
"step": 19
},
{
"epoch": 0.03,
"learning_rate": 0.00019997475140997475,
"loss": 0.8948,
"step": 20
},
{
"epoch": 0.03,
"learning_rate": 0.00019996944947605968,
"loss": 0.9359,
"step": 21
},
{
"epoch": 0.03,
"learning_rate": 0.00019996364270358346,
"loss": 0.8931,
"step": 22
},
{
"epoch": 0.03,
"learning_rate": 0.00019995733112186982,
"loss": 0.9468,
"step": 23
},
{
"epoch": 0.03,
"learning_rate": 0.00019995051476279186,
"loss": 0.9246,
"step": 24
},
{
"epoch": 0.04,
"learning_rate": 0.00019994319366077167,
"loss": 0.9223,
"step": 25
},
{
"epoch": 0.04,
"learning_rate": 0.00019993536785278032,
"loss": 0.8973,
"step": 26
},
{
"epoch": 0.04,
"learning_rate": 0.00019992703737833748,
"loss": 0.9316,
"step": 27
},
{
"epoch": 0.04,
"learning_rate": 0.0001999182022795116,
"loss": 0.9499,
"step": 28
},
{
"epoch": 0.04,
"learning_rate": 0.00019990886260091916,
"loss": 0.9069,
"step": 29
},
{
"epoch": 0.04,
"learning_rate": 0.00019989901838972496,
"loss": 0.9034,
"step": 30
},
{
"epoch": 0.04,
"learning_rate": 0.0001998886696956415,
"loss": 0.9329,
"step": 31
},
{
"epoch": 0.05,
"learning_rate": 0.000199877816570929,
"loss": 0.944,
"step": 32
},
{
"epoch": 0.05,
"learning_rate": 0.00019986645907039497,
"loss": 0.899,
"step": 33
},
{
"epoch": 0.05,
"learning_rate": 0.0001998545972513939,
"loss": 0.8994,
"step": 34
},
{
"epoch": 0.05,
"learning_rate": 0.00019984223117382714,
"loss": 0.88,
"step": 35
},
{
"epoch": 0.05,
"learning_rate": 0.00019982936090014256,
"loss": 0.9321,
"step": 36
},
{
"epoch": 0.05,
"learning_rate": 0.0001998159864953341,
"loss": 0.9437,
"step": 37
},
{
"epoch": 0.05,
"learning_rate": 0.0001998021080269415,
"loss": 0.8879,
"step": 38
},
{
"epoch": 0.06,
"learning_rate": 0.0001997877255650501,
"loss": 0.8921,
"step": 39
},
{
"epoch": 0.06,
"learning_rate": 0.00019977283918229022,
"loss": 0.8943,
"step": 40
},
{
"epoch": 0.06,
"learning_rate": 0.00019975744895383706,
"loss": 0.8991,
"step": 41
},
{
"epoch": 0.06,
"learning_rate": 0.00019974155495741024,
"loss": 0.8881,
"step": 42
},
{
"epoch": 0.06,
"learning_rate": 0.0001997251572732732,
"loss": 0.9018,
"step": 43
},
{
"epoch": 0.06,
"learning_rate": 0.00019970825598423315,
"loss": 0.8722,
"step": 44
},
{
"epoch": 0.06,
"learning_rate": 0.00019969085117564034,
"loss": 0.9355,
"step": 45
},
{
"epoch": 0.07,
"learning_rate": 0.0001996729429353878,
"loss": 0.9524,
"step": 46
},
{
"epoch": 0.07,
"learning_rate": 0.0001996545313539109,
"loss": 0.9006,
"step": 47
},
{
"epoch": 0.07,
"learning_rate": 0.00019963561652418683,
"loss": 0.869,
"step": 48
},
{
"epoch": 0.07,
"learning_rate": 0.0001996161985417341,
"loss": 0.8397,
"step": 49
},
{
"epoch": 0.07,
"learning_rate": 0.00019959627750461208,
"loss": 0.8915,
"step": 50
},
{
"epoch": 0.07,
"learning_rate": 0.0001995758535134206,
"loss": 0.8423,
"step": 51
},
{
"epoch": 0.07,
"learning_rate": 0.0001995549266712994,
"loss": 0.8994,
"step": 52
},
{
"epoch": 0.08,
"learning_rate": 0.00019953349708392752,
"loss": 0.8939,
"step": 53
},
{
"epoch": 0.08,
"learning_rate": 0.0001995115648595228,
"loss": 0.93,
"step": 54
},
{
"epoch": 0.08,
"learning_rate": 0.00019948913010884147,
"loss": 0.8913,
"step": 55
},
{
"epoch": 0.08,
"learning_rate": 0.00019946619294517736,
"loss": 0.8927,
"step": 56
},
{
"epoch": 0.08,
"learning_rate": 0.00019944275348436153,
"loss": 0.875,
"step": 57
},
{
"epoch": 0.08,
"learning_rate": 0.00019941881184476154,
"loss": 0.8199,
"step": 58
},
{
"epoch": 0.08,
"learning_rate": 0.000199394368147281,
"loss": 0.9137,
"step": 59
},
{
"epoch": 0.09,
"learning_rate": 0.00019936942251535882,
"loss": 0.8572,
"step": 60
},
{
"epoch": 0.09,
"learning_rate": 0.00019934397507496865,
"loss": 0.9061,
"step": 61
},
{
"epoch": 0.09,
"learning_rate": 0.00019931802595461826,
"loss": 0.9258,
"step": 62
},
{
"epoch": 0.09,
"learning_rate": 0.0001992915752853488,
"loss": 0.945,
"step": 63
},
{
"epoch": 0.09,
"learning_rate": 0.00019926462320073429,
"loss": 0.8826,
"step": 64
},
{
"epoch": 0.09,
"learning_rate": 0.00019923716983688086,
"loss": 0.8929,
"step": 65
},
{
"epoch": 0.09,
"learning_rate": 0.00019920921533242596,
"loss": 0.8848,
"step": 66
},
{
"epoch": 0.1,
"learning_rate": 0.00019918075982853793,
"loss": 0.9109,
"step": 67
},
{
"epoch": 0.1,
"learning_rate": 0.0001991518034689149,
"loss": 0.9073,
"step": 68
},
{
"epoch": 0.1,
"learning_rate": 0.00019912234639978454,
"loss": 0.9215,
"step": 69
},
{
"epoch": 0.1,
"learning_rate": 0.00019909238876990285,
"loss": 0.8772,
"step": 70
},
{
"epoch": 0.1,
"learning_rate": 0.00019906193073055374,
"loss": 0.8677,
"step": 71
},
{
"epoch": 0.1,
"learning_rate": 0.00019903097243554815,
"loss": 0.8607,
"step": 72
},
{
"epoch": 0.1,
"learning_rate": 0.00019899951404122327,
"loss": 0.8926,
"step": 73
},
{
"epoch": 0.11,
"learning_rate": 0.00019896755570644165,
"loss": 0.8746,
"step": 74
},
{
"epoch": 0.11,
"learning_rate": 0.00019893509759259068,
"loss": 0.8866,
"step": 75
},
{
"epoch": 0.11,
"learning_rate": 0.00019890213986358148,
"loss": 0.853,
"step": 76
},
{
"epoch": 0.11,
"learning_rate": 0.00019886868268584822,
"loss": 0.8957,
"step": 77
},
{
"epoch": 0.11,
"learning_rate": 0.00019883472622834723,
"loss": 0.9247,
"step": 78
},
{
"epoch": 0.11,
"learning_rate": 0.00019880027066255623,
"loss": 0.8831,
"step": 79
},
{
"epoch": 0.11,
"learning_rate": 0.00019876531616247337,
"loss": 0.8998,
"step": 80
},
{
"epoch": 0.12,
"learning_rate": 0.00019872986290461633,
"loss": 0.8242,
"step": 81
},
{
"epoch": 0.12,
"learning_rate": 0.00019869391106802154,
"loss": 0.8815,
"step": 82
},
{
"epoch": 0.12,
"learning_rate": 0.00019865746083424317,
"loss": 0.8705,
"step": 83
},
{
"epoch": 0.12,
"learning_rate": 0.00019862051238735232,
"loss": 0.8767,
"step": 84
},
{
"epoch": 0.12,
"learning_rate": 0.00019858306591393602,
"loss": 0.9376,
"step": 85
},
{
"epoch": 0.12,
"learning_rate": 0.00019854512160309625,
"loss": 0.875,
"step": 86
},
{
"epoch": 0.12,
"learning_rate": 0.00019850667964644908,
"loss": 0.8512,
"step": 87
},
{
"epoch": 0.12,
"learning_rate": 0.00019846774023812364,
"loss": 0.8755,
"step": 88
},
{
"epoch": 0.13,
"learning_rate": 0.00019842830357476112,
"loss": 0.8862,
"step": 89
},
{
"epoch": 0.13,
"learning_rate": 0.0001983883698555139,
"loss": 0.8209,
"step": 90
},
{
"epoch": 0.13,
"learning_rate": 0.00019834793928204435,
"loss": 0.8633,
"step": 91
},
{
"epoch": 0.13,
"learning_rate": 0.000198307012058524,
"loss": 0.8421,
"step": 92
},
{
"epoch": 0.13,
"learning_rate": 0.00019826558839163236,
"loss": 0.8771,
"step": 93
},
{
"epoch": 0.13,
"learning_rate": 0.00019822366849055602,
"loss": 0.8392,
"step": 94
},
{
"epoch": 0.13,
"learning_rate": 0.0001981812525669875,
"loss": 0.8565,
"step": 95
},
{
"epoch": 0.14,
"learning_rate": 0.00019813834083512414,
"loss": 0.8965,
"step": 96
},
{
"epoch": 0.14,
"learning_rate": 0.00019809493351166711,
"loss": 0.8669,
"step": 97
},
{
"epoch": 0.14,
"learning_rate": 0.0001980510308158203,
"loss": 0.8476,
"step": 98
},
{
"epoch": 0.14,
"learning_rate": 0.00019800663296928918,
"loss": 0.8604,
"step": 99
},
{
"epoch": 0.14,
"learning_rate": 0.0001979617401962797,
"loss": 0.8687,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 0.0001979163527234971,
"loss": 0.8716,
"step": 101
},
{
"epoch": 0.14,
"learning_rate": 0.00019787047078014496,
"loss": 0.8727,
"step": 102
},
{
"epoch": 0.15,
"learning_rate": 0.00019782409459792371,
"loss": 0.8484,
"step": 103
},
{
"epoch": 0.15,
"learning_rate": 0.00019777722441102985,
"loss": 0.8811,
"step": 104
},
{
"epoch": 0.15,
"learning_rate": 0.00019772986045615438,
"loss": 0.9194,
"step": 105
},
{
"epoch": 0.15,
"learning_rate": 0.00019768200297248193,
"loss": 0.8592,
"step": 106
},
{
"epoch": 0.15,
"learning_rate": 0.0001976336522016893,
"loss": 0.856,
"step": 107
},
{
"epoch": 0.15,
"learning_rate": 0.00019758480838794453,
"loss": 0.8841,
"step": 108
},
{
"epoch": 0.15,
"learning_rate": 0.0001975354717779053,
"loss": 0.8962,
"step": 109
},
{
"epoch": 0.16,
"learning_rate": 0.000197485642620718,
"loss": 0.8415,
"step": 110
},
{
"epoch": 0.16,
"learning_rate": 0.00019743532116801624,
"loss": 0.8843,
"step": 111
},
{
"epoch": 0.16,
"learning_rate": 0.0001973845076739198,
"loss": 0.8918,
"step": 112
},
{
"epoch": 0.16,
"learning_rate": 0.00019733320239503322,
"loss": 0.836,
"step": 113
},
{
"epoch": 0.16,
"learning_rate": 0.00019728140559044445,
"loss": 0.8442,
"step": 114
},
{
"epoch": 0.16,
"learning_rate": 0.00019722911752172363,
"loss": 0.8471,
"step": 115
},
{
"epoch": 0.16,
"learning_rate": 0.00019717633845292175,
"loss": 0.8407,
"step": 116
},
{
"epoch": 0.17,
"learning_rate": 0.00019712306865056936,
"loss": 0.8982,
"step": 117
},
{
"epoch": 0.17,
"learning_rate": 0.00019706930838367517,
"loss": 0.8233,
"step": 118
},
{
"epoch": 0.17,
"learning_rate": 0.0001970150579237246,
"loss": 0.8739,
"step": 119
},
{
"epoch": 0.17,
"learning_rate": 0.0001969603175446787,
"loss": 0.8956,
"step": 120
},
{
"epoch": 0.17,
"learning_rate": 0.00019690508752297234,
"loss": 0.8368,
"step": 121
},
{
"epoch": 0.17,
"learning_rate": 0.00019684936813751326,
"loss": 0.8628,
"step": 122
},
{
"epoch": 0.17,
"learning_rate": 0.00019679315966968035,
"loss": 0.8485,
"step": 123
},
{
"epoch": 0.18,
"learning_rate": 0.00019673646240332232,
"loss": 0.8465,
"step": 124
},
{
"epoch": 0.18,
"learning_rate": 0.00019667927662475636,
"loss": 0.8258,
"step": 125
},
{
"epoch": 0.18,
"learning_rate": 0.0001966216026227665,
"loss": 0.8668,
"step": 126
},
{
"epoch": 0.18,
"learning_rate": 0.00019656344068860233,
"loss": 0.8556,
"step": 127
},
{
"epoch": 0.18,
"learning_rate": 0.00019650479111597748,
"loss": 0.8997,
"step": 128
},
{
"epoch": 0.18,
"learning_rate": 0.00019644565420106805,
"loss": 0.8958,
"step": 129
},
{
"epoch": 0.18,
"learning_rate": 0.0001963860302425113,
"loss": 0.8866,
"step": 130
},
{
"epoch": 0.19,
"learning_rate": 0.00019632591954140387,
"loss": 0.8723,
"step": 131
},
{
"epoch": 0.19,
"learning_rate": 0.00019626532240130055,
"loss": 0.8614,
"step": 132
},
{
"epoch": 0.19,
"learning_rate": 0.00019620423912821252,
"loss": 0.8564,
"step": 133
},
{
"epoch": 0.19,
"learning_rate": 0.00019614267003060593,
"loss": 0.8998,
"step": 134
},
{
"epoch": 0.19,
"learning_rate": 0.00019608061541940037,
"loss": 0.8743,
"step": 135
},
{
"epoch": 0.19,
"learning_rate": 0.00019601807560796713,
"loss": 0.8084,
"step": 136
},
{
"epoch": 0.19,
"learning_rate": 0.00019595505091212783,
"loss": 0.8503,
"step": 137
},
{
"epoch": 0.2,
"learning_rate": 0.0001958915416501526,
"loss": 0.8093,
"step": 138
},
{
"epoch": 0.2,
"learning_rate": 0.00019582754814275873,
"loss": 0.8413,
"step": 139
},
{
"epoch": 0.2,
"learning_rate": 0.00019576307071310882,
"loss": 0.9218,
"step": 140
},
{
"epoch": 0.2,
"learning_rate": 0.00019569810968680926,
"loss": 0.8248,
"step": 141
},
{
"epoch": 0.2,
"learning_rate": 0.00019563266539190862,
"loss": 0.8592,
"step": 142
},
{
"epoch": 0.2,
"learning_rate": 0.00019556673815889587,
"loss": 0.8835,
"step": 143
},
{
"epoch": 0.2,
"learning_rate": 0.00019550032832069882,
"loss": 0.8353,
"step": 144
},
{
"epoch": 0.21,
"learning_rate": 0.00019543343621268244,
"loss": 0.8212,
"step": 145
},
{
"epoch": 0.21,
"learning_rate": 0.000195366062172647,
"loss": 0.8606,
"step": 146
},
{
"epoch": 0.21,
"learning_rate": 0.00019529820654082665,
"loss": 0.8585,
"step": 147
},
{
"epoch": 0.21,
"learning_rate": 0.00019522986965988745,
"loss": 0.9144,
"step": 148
},
{
"epoch": 0.21,
"learning_rate": 0.00019516105187492575,
"loss": 0.9081,
"step": 149
},
{
"epoch": 0.21,
"learning_rate": 0.00019509175353346644,
"loss": 0.823,
"step": 150
},
{
"epoch": 0.21,
"learning_rate": 0.0001950219749854612,
"loss": 0.8732,
"step": 151
},
{
"epoch": 0.22,
"learning_rate": 0.00019495171658328664,
"loss": 0.8625,
"step": 152
},
{
"epoch": 0.22,
"learning_rate": 0.00019488097868174275,
"loss": 0.8403,
"step": 153
},
{
"epoch": 0.22,
"learning_rate": 0.00019480976163805078,
"loss": 0.8427,
"step": 154
},
{
"epoch": 0.22,
"learning_rate": 0.00019473806581185175,
"loss": 0.8417,
"step": 155
},
{
"epoch": 0.22,
"learning_rate": 0.00019466589156520448,
"loss": 0.8334,
"step": 156
},
{
"epoch": 0.22,
"learning_rate": 0.00019459323926258366,
"loss": 0.8514,
"step": 157
},
{
"epoch": 0.22,
"learning_rate": 0.00019452010927087826,
"loss": 0.7686,
"step": 158
},
{
"epoch": 0.23,
"learning_rate": 0.00019444650195938953,
"loss": 0.8638,
"step": 159
},
{
"epoch": 0.23,
"learning_rate": 0.00019437241769982907,
"loss": 0.8867,
"step": 160
},
{
"epoch": 0.23,
"learning_rate": 0.00019429785686631714,
"loss": 0.8116,
"step": 161
},
{
"epoch": 0.23,
"learning_rate": 0.00019422281983538054,
"loss": 0.8035,
"step": 162
},
{
"epoch": 0.23,
"learning_rate": 0.000194147306985951,
"loss": 0.8244,
"step": 163
},
{
"epoch": 0.23,
"learning_rate": 0.0001940713186993629,
"loss": 0.8609,
"step": 164
},
{
"epoch": 0.23,
"learning_rate": 0.00019399485535935172,
"loss": 0.8357,
"step": 165
},
{
"epoch": 0.24,
"learning_rate": 0.00019391791735205182,
"loss": 0.8386,
"step": 166
},
{
"epoch": 0.24,
"learning_rate": 0.00019384050506599462,
"loss": 0.8402,
"step": 167
},
{
"epoch": 0.24,
"learning_rate": 0.00019376261889210664,
"loss": 0.8348,
"step": 168
},
{
"epoch": 0.24,
"learning_rate": 0.00019368425922370748,
"loss": 0.8332,
"step": 169
},
{
"epoch": 0.24,
"learning_rate": 0.00019360542645650784,
"loss": 0.8906,
"step": 170
},
{
"epoch": 0.24,
"learning_rate": 0.00019352612098860755,
"loss": 0.7958,
"step": 171
},
{
"epoch": 0.24,
"learning_rate": 0.00019344634322049356,
"loss": 0.8493,
"step": 172
},
{
"epoch": 0.25,
"learning_rate": 0.00019336609355503787,
"loss": 0.8244,
"step": 173
},
{
"epoch": 0.25,
"learning_rate": 0.00019328537239749553,
"loss": 0.8464,
"step": 174
},
{
"epoch": 0.25,
"learning_rate": 0.00019320418015550265,
"loss": 0.8518,
"step": 175
},
{
"epoch": 0.25,
"learning_rate": 0.00019312251723907422,
"loss": 0.8635,
"step": 176
},
{
"epoch": 0.25,
"eval_loss": 0.873156726360321,
"eval_runtime": 58.1152,
"eval_samples_per_second": 6.883,
"eval_steps_per_second": 3.441,
"step": 176
},
{
"epoch": 0.25,
"learning_rate": 0.0001930403840606021,
"loss": 0.9014,
"step": 177
},
{
"epoch": 0.25,
"learning_rate": 0.00019295778103485298,
"loss": 0.8353,
"step": 178
},
{
"epoch": 0.25,
"learning_rate": 0.00019287470857896622,
"loss": 0.8487,
"step": 179
},
{
"epoch": 0.26,
"learning_rate": 0.00019279116711245177,
"loss": 0.8616,
"step": 180
},
{
"epoch": 0.26,
"learning_rate": 0.00019270715705718808,
"loss": 0.8197,
"step": 181
},
{
"epoch": 0.26,
"learning_rate": 0.00019262267883741986,
"loss": 0.8525,
"step": 182
},
{
"epoch": 0.26,
"learning_rate": 0.0001925377328797561,
"loss": 0.8649,
"step": 183
},
{
"epoch": 0.26,
"learning_rate": 0.00019245231961316782,
"loss": 0.8416,
"step": 184
},
{
"epoch": 0.26,
"learning_rate": 0.00019236643946898588,
"loss": 0.8973,
"step": 185
},
{
"epoch": 0.26,
"learning_rate": 0.00019228009288089885,
"loss": 0.7733,
"step": 186
},
{
"epoch": 0.27,
"learning_rate": 0.00019219328028495083,
"loss": 0.8239,
"step": 187
},
{
"epoch": 0.27,
"learning_rate": 0.00019210600211953918,
"loss": 0.7993,
"step": 188
},
{
"epoch": 0.27,
"learning_rate": 0.00019201825882541245,
"loss": 0.8195,
"step": 189
},
{
"epoch": 0.27,
"learning_rate": 0.00019193005084566797,
"loss": 0.7977,
"step": 190
},
{
"epoch": 0.27,
"learning_rate": 0.00019184137862574973,
"loss": 0.85,
"step": 191
},
{
"epoch": 0.27,
"learning_rate": 0.00019175224261344602,
"loss": 0.8245,
"step": 192
},
{
"epoch": 0.27,
"learning_rate": 0.00019166264325888742,
"loss": 0.8929,
"step": 193
},
{
"epoch": 0.28,
"learning_rate": 0.00019157258101454416,
"loss": 0.8194,
"step": 194
},
{
"epoch": 0.28,
"learning_rate": 0.00019148205633522414,
"loss": 0.8493,
"step": 195
},
{
"epoch": 0.28,
"learning_rate": 0.00019139106967807062,
"loss": 0.8489,
"step": 196
},
{
"epoch": 0.28,
"learning_rate": 0.00019129962150255957,
"loss": 0.862,
"step": 197
},
{
"epoch": 0.28,
"learning_rate": 0.00019120771227049778,
"loss": 0.8723,
"step": 198
},
{
"epoch": 0.28,
"learning_rate": 0.00019111534244602033,
"loss": 0.8193,
"step": 199
},
{
"epoch": 0.28,
"learning_rate": 0.00019102251249558812,
"loss": 0.8619,
"step": 200
},
{
"epoch": 0.29,
"learning_rate": 0.00019092922288798585,
"loss": 0.8299,
"step": 201
},
{
"epoch": 0.29,
"learning_rate": 0.0001908354740943193,
"loss": 0.8403,
"step": 202
},
{
"epoch": 0.29,
"learning_rate": 0.0001907412665880132,
"loss": 0.8623,
"step": 203
},
{
"epoch": 0.29,
"learning_rate": 0.00019064660084480868,
"loss": 0.8998,
"step": 204
},
{
"epoch": 0.29,
"learning_rate": 0.00019055147734276098,
"loss": 0.8595,
"step": 205
},
{
"epoch": 0.29,
"learning_rate": 0.00019045589656223696,
"loss": 0.8264,
"step": 206
},
{
"epoch": 0.29,
"learning_rate": 0.00019035985898591275,
"loss": 0.8368,
"step": 207
},
{
"epoch": 0.3,
"learning_rate": 0.0001902633650987712,
"loss": 0.8436,
"step": 208
},
{
"epoch": 0.3,
"learning_rate": 0.00019016641538809954,
"loss": 0.8625,
"step": 209
},
{
"epoch": 0.3,
"learning_rate": 0.00019006901034348688,
"loss": 0.8775,
"step": 210
},
{
"epoch": 0.3,
"learning_rate": 0.0001899711504568217,
"loss": 0.7985,
"step": 211
},
{
"epoch": 0.3,
"learning_rate": 0.00018987283622228944,
"loss": 0.8656,
"step": 212
},
{
"epoch": 0.3,
"learning_rate": 0.00018977406813636992,
"loss": 0.8672,
"step": 213
},
{
"epoch": 0.3,
"learning_rate": 0.00018967484669783492,
"loss": 0.8057,
"step": 214
},
{
"epoch": 0.31,
"learning_rate": 0.0001895751724077456,
"loss": 0.8596,
"step": 215
},
{
"epoch": 0.31,
"learning_rate": 0.00018947504576944998,
"loss": 0.8373,
"step": 216
},
{
"epoch": 0.31,
"learning_rate": 0.0001893744672885804,
"loss": 0.8559,
"step": 217
},
{
"epoch": 0.31,
"learning_rate": 0.000189273437473051,
"loss": 0.7931,
"step": 218
},
{
"epoch": 0.31,
"learning_rate": 0.00018917195683305517,
"loss": 0.8594,
"step": 219
},
{
"epoch": 0.31,
"learning_rate": 0.00018907002588106276,
"loss": 0.877,
"step": 220
},
{
"epoch": 0.31,
"learning_rate": 0.00018896764513181785,
"loss": 0.8297,
"step": 221
},
{
"epoch": 0.32,
"learning_rate": 0.0001888648151023359,
"loss": 0.8407,
"step": 222
},
{
"epoch": 0.32,
"learning_rate": 0.00018876153631190116,
"loss": 0.8597,
"step": 223
},
{
"epoch": 0.32,
"learning_rate": 0.00018865780928206417,
"loss": 0.8647,
"step": 224
},
{
"epoch": 0.32,
"learning_rate": 0.00018855363453663894,
"loss": 0.8075,
"step": 225
},
{
"epoch": 0.32,
"learning_rate": 0.0001884490126017005,
"loss": 0.8438,
"step": 226
},
{
"epoch": 0.32,
"learning_rate": 0.00018834394400558217,
"loss": 0.8274,
"step": 227
},
{
"epoch": 0.32,
"learning_rate": 0.0001882384292788728,
"loss": 0.8721,
"step": 228
},
{
"epoch": 0.33,
"learning_rate": 0.0001881324689544142,
"loss": 0.8204,
"step": 229
},
{
"epoch": 0.33,
"learning_rate": 0.00018802606356729844,
"loss": 0.8424,
"step": 230
},
{
"epoch": 0.33,
"learning_rate": 0.0001879192136548651,
"loss": 0.8044,
"step": 231
},
{
"epoch": 0.33,
"learning_rate": 0.00018781191975669859,
"loss": 0.8082,
"step": 232
},
{
"epoch": 0.33,
"learning_rate": 0.0001877041824146254,
"loss": 0.8438,
"step": 233
},
{
"epoch": 0.33,
"learning_rate": 0.0001875960021727114,
"loss": 0.8748,
"step": 234
},
{
"epoch": 0.33,
"learning_rate": 0.00018748737957725904,
"loss": 0.854,
"step": 235
},
{
"epoch": 0.34,
"learning_rate": 0.0001873783151768047,
"loss": 0.851,
"step": 236
},
{
"epoch": 0.34,
"learning_rate": 0.00018726880952211575,
"loss": 0.8547,
"step": 237
},
{
"epoch": 0.34,
"learning_rate": 0.0001871588631661879,
"loss": 0.8399,
"step": 238
},
{
"epoch": 0.34,
"learning_rate": 0.0001870484766642424,
"loss": 0.8749,
"step": 239
},
{
"epoch": 0.34,
"learning_rate": 0.00018693765057372318,
"loss": 0.8127,
"step": 240
},
{
"epoch": 0.34,
"learning_rate": 0.00018682638545429407,
"loss": 0.8481,
"step": 241
},
{
"epoch": 0.34,
"learning_rate": 0.00018671468186783592,
"loss": 0.8164,
"step": 242
},
{
"epoch": 0.35,
"learning_rate": 0.00018660254037844388,
"loss": 0.8275,
"step": 243
},
{
"epoch": 0.35,
"learning_rate": 0.0001864899615524244,
"loss": 0.799,
"step": 244
},
{
"epoch": 0.35,
"learning_rate": 0.00018637694595829252,
"loss": 0.7889,
"step": 245
},
{
"epoch": 0.35,
"learning_rate": 0.0001862634941667689,
"loss": 0.8524,
"step": 246
},
{
"epoch": 0.35,
"learning_rate": 0.00018614960675077696,
"loss": 0.8409,
"step": 247
},
{
"epoch": 0.35,
"learning_rate": 0.00018603528428544,
"loss": 0.7696,
"step": 248
},
{
"epoch": 0.35,
"learning_rate": 0.00018592052734807825,
"loss": 0.8262,
"step": 249
},
{
"epoch": 0.36,
"learning_rate": 0.00018580533651820603,
"loss": 0.8208,
"step": 250
},
{
"epoch": 0.36,
"learning_rate": 0.00018568971237752882,
"loss": 0.8284,
"step": 251
},
{
"epoch": 0.36,
"learning_rate": 0.00018557365550994024,
"loss": 0.8471,
"step": 252
},
{
"epoch": 0.36,
"learning_rate": 0.00018545716650151915,
"loss": 0.8417,
"step": 253
},
{
"epoch": 0.36,
"learning_rate": 0.0001853402459405267,
"loss": 0.8051,
"step": 254
},
{
"epoch": 0.36,
"learning_rate": 0.00018522289441740334,
"loss": 0.8433,
"step": 255
},
{
"epoch": 0.36,
"learning_rate": 0.00018510511252476587,
"loss": 0.8526,
"step": 256
},
{
"epoch": 0.37,
"learning_rate": 0.00018498690085740445,
"loss": 0.805,
"step": 257
},
{
"epoch": 0.37,
"learning_rate": 0.00018486826001227948,
"loss": 0.8524,
"step": 258
},
{
"epoch": 0.37,
"learning_rate": 0.00018474919058851877,
"loss": 0.8487,
"step": 259
},
{
"epoch": 0.37,
"learning_rate": 0.00018462969318741433,
"loss": 0.8604,
"step": 260
},
{
"epoch": 0.37,
"learning_rate": 0.00018450976841241951,
"loss": 0.8518,
"step": 261
},
{
"epoch": 0.37,
"learning_rate": 0.0001843894168691459,
"loss": 0.769,
"step": 262
},
{
"epoch": 0.37,
"learning_rate": 0.0001842686391653601,
"loss": 0.8178,
"step": 263
},
{
"epoch": 0.38,
"learning_rate": 0.00018414743591098085,
"loss": 0.8224,
"step": 264
},
{
"epoch": 0.38,
"learning_rate": 0.00018402580771807595,
"loss": 0.8227,
"step": 265
},
{
"epoch": 0.38,
"learning_rate": 0.00018390375520085903,
"loss": 0.8333,
"step": 266
},
{
"epoch": 0.38,
"learning_rate": 0.00018378127897568662,
"loss": 0.8207,
"step": 267
},
{
"epoch": 0.38,
"learning_rate": 0.00018365837966105483,
"loss": 0.8144,
"step": 268
},
{
"epoch": 0.38,
"learning_rate": 0.00018353505787759643,
"loss": 0.8289,
"step": 269
},
{
"epoch": 0.38,
"learning_rate": 0.0001834113142480776,
"loss": 0.8387,
"step": 270
},
{
"epoch": 0.38,
"learning_rate": 0.00018328714939739476,
"loss": 0.8306,
"step": 271
},
{
"epoch": 0.39,
"learning_rate": 0.00018316256395257155,
"loss": 0.8485,
"step": 272
},
{
"epoch": 0.39,
"learning_rate": 0.00018303755854275555,
"loss": 0.7812,
"step": 273
},
{
"epoch": 0.39,
"learning_rate": 0.00018291213379921512,
"loss": 0.7986,
"step": 274
},
{
"epoch": 0.39,
"learning_rate": 0.00018278629035533623,
"loss": 0.8012,
"step": 275
},
{
"epoch": 0.39,
"learning_rate": 0.00018266002884661928,
"loss": 0.8854,
"step": 276
},
{
"epoch": 0.39,
"learning_rate": 0.00018253334991067581,
"loss": 0.8528,
"step": 277
},
{
"epoch": 0.39,
"learning_rate": 0.00018240625418722541,
"loss": 0.8012,
"step": 278
},
{
"epoch": 0.4,
"learning_rate": 0.00018227874231809238,
"loss": 0.8785,
"step": 279
},
{
"epoch": 0.4,
"learning_rate": 0.00018215081494720248,
"loss": 0.8013,
"step": 280
},
{
"epoch": 0.4,
"learning_rate": 0.00018202247272057983,
"loss": 0.7962,
"step": 281
},
{
"epoch": 0.4,
"learning_rate": 0.00018189371628634347,
"loss": 0.8746,
"step": 282
},
{
"epoch": 0.4,
"learning_rate": 0.00018176454629470414,
"loss": 0.8755,
"step": 283
},
{
"epoch": 0.4,
"learning_rate": 0.0001816349633979611,
"loss": 0.8665,
"step": 284
},
{
"epoch": 0.4,
"learning_rate": 0.00018150496825049866,
"loss": 0.8163,
"step": 285
},
{
"epoch": 0.41,
"learning_rate": 0.00018137456150878303,
"loss": 0.8478,
"step": 286
},
{
"epoch": 0.41,
"learning_rate": 0.0001812437438313589,
"loss": 0.7785,
"step": 287
},
{
"epoch": 0.41,
"learning_rate": 0.00018111251587884616,
"loss": 0.7899,
"step": 288
},
{
"epoch": 0.41,
"learning_rate": 0.00018098087831393663,
"loss": 0.8274,
"step": 289
},
{
"epoch": 0.41,
"learning_rate": 0.00018084883180139052,
"loss": 0.8124,
"step": 290
},
{
"epoch": 0.41,
"learning_rate": 0.00018071637700803334,
"loss": 0.8438,
"step": 291
},
{
"epoch": 0.41,
"learning_rate": 0.0001805835146027522,
"loss": 0.8542,
"step": 292
},
{
"epoch": 0.42,
"learning_rate": 0.00018045024525649284,
"loss": 0.8308,
"step": 293
},
{
"epoch": 0.42,
"learning_rate": 0.00018031656964225584,
"loss": 0.8677,
"step": 294
},
{
"epoch": 0.42,
"learning_rate": 0.00018018248843509354,
"loss": 0.8398,
"step": 295
},
{
"epoch": 0.42,
"learning_rate": 0.00018004800231210638,
"loss": 0.8078,
"step": 296
},
{
"epoch": 0.42,
"learning_rate": 0.0001799131119524397,
"loss": 0.7889,
"step": 297
},
{
"epoch": 0.42,
"learning_rate": 0.0001797778180372801,
"loss": 0.8287,
"step": 298
},
{
"epoch": 0.42,
"learning_rate": 0.00017964212124985224,
"loss": 0.8584,
"step": 299
},
{
"epoch": 0.43,
"learning_rate": 0.0001795060222754151,
"loss": 0.8178,
"step": 300
},
{
"epoch": 0.43,
"learning_rate": 0.0001793695218012588,
"loss": 0.8409,
"step": 301
},
{
"epoch": 0.43,
"learning_rate": 0.000179232620516701,
"loss": 0.8806,
"step": 302
},
{
"epoch": 0.43,
"learning_rate": 0.00017909531911308325,
"loss": 0.8563,
"step": 303
},
{
"epoch": 0.43,
"learning_rate": 0.00017895761828376795,
"loss": 0.8051,
"step": 304
},
{
"epoch": 0.43,
"learning_rate": 0.00017881951872413435,
"loss": 0.8475,
"step": 305
},
{
"epoch": 0.43,
"learning_rate": 0.00017868102113157534,
"loss": 0.804,
"step": 306
},
{
"epoch": 0.44,
"learning_rate": 0.0001785421262054939,
"loss": 0.8033,
"step": 307
},
{
"epoch": 0.44,
"learning_rate": 0.0001784028346472994,
"loss": 0.8219,
"step": 308
},
{
"epoch": 0.44,
"learning_rate": 0.00017826314716040423,
"loss": 0.8595,
"step": 309
},
{
"epoch": 0.44,
"learning_rate": 0.00017812306445022025,
"loss": 0.8405,
"step": 310
},
{
"epoch": 0.44,
"learning_rate": 0.00017798258722415508,
"loss": 0.7675,
"step": 311
},
{
"epoch": 0.44,
"learning_rate": 0.0001778417161916087,
"loss": 0.8026,
"step": 312
},
{
"epoch": 0.44,
"learning_rate": 0.00017770045206396963,
"loss": 0.8292,
"step": 313
},
{
"epoch": 0.45,
"learning_rate": 0.0001775587955546117,
"loss": 0.8287,
"step": 314
},
{
"epoch": 0.45,
"learning_rate": 0.0001774167473788901,
"loss": 0.8299,
"step": 315
},
{
"epoch": 0.45,
"learning_rate": 0.00017727430825413792,
"loss": 0.8101,
"step": 316
},
{
"epoch": 0.45,
"learning_rate": 0.00017713147889966262,
"loss": 0.7898,
"step": 317
},
{
"epoch": 0.45,
"learning_rate": 0.0001769882600367421,
"loss": 0.8084,
"step": 318
},
{
"epoch": 0.45,
"learning_rate": 0.00017684465238862148,
"loss": 0.8087,
"step": 319
},
{
"epoch": 0.45,
"learning_rate": 0.000176700656680509,
"loss": 0.8065,
"step": 320
},
{
"epoch": 0.46,
"learning_rate": 0.00017655627363957277,
"loss": 0.8368,
"step": 321
},
{
"epoch": 0.46,
"learning_rate": 0.0001764115039949367,
"loss": 0.8231,
"step": 322
},
{
"epoch": 0.46,
"learning_rate": 0.0001762663484776772,
"loss": 0.8333,
"step": 323
},
{
"epoch": 0.46,
"learning_rate": 0.00017612080782081923,
"loss": 0.8453,
"step": 324
},
{
"epoch": 0.46,
"learning_rate": 0.00017597488275933267,
"loss": 0.8669,
"step": 325
},
{
"epoch": 0.46,
"learning_rate": 0.00017582857403012866,
"loss": 0.8022,
"step": 326
},
{
"epoch": 0.46,
"learning_rate": 0.00017568188237205575,
"loss": 0.8116,
"step": 327
},
{
"epoch": 0.47,
"learning_rate": 0.00017553480852589636,
"loss": 0.8307,
"step": 328
},
{
"epoch": 0.47,
"learning_rate": 0.00017538735323436284,
"loss": 0.7986,
"step": 329
},
{
"epoch": 0.47,
"learning_rate": 0.00017523951724209388,
"loss": 0.8278,
"step": 330
},
{
"epoch": 0.47,
"learning_rate": 0.00017509130129565067,
"loss": 0.8397,
"step": 331
},
{
"epoch": 0.47,
"learning_rate": 0.0001749427061435131,
"loss": 0.7862,
"step": 332
},
{
"epoch": 0.47,
"learning_rate": 0.00017479373253607606,
"loss": 0.8574,
"step": 333
},
{
"epoch": 0.47,
"learning_rate": 0.00017464438122564556,
"loss": 0.8312,
"step": 334
},
{
"epoch": 0.48,
"learning_rate": 0.0001744946529664351,
"loss": 0.8666,
"step": 335
},
{
"epoch": 0.48,
"learning_rate": 0.00017434454851456162,
"loss": 0.8027,
"step": 336
},
{
"epoch": 0.48,
"learning_rate": 0.00017419406862804183,
"loss": 0.8962,
"step": 337
},
{
"epoch": 0.48,
"learning_rate": 0.00017404321406678844,
"loss": 0.8286,
"step": 338
},
{
"epoch": 0.48,
"learning_rate": 0.00017389198559260614,
"loss": 0.8167,
"step": 339
},
{
"epoch": 0.48,
"learning_rate": 0.00017374038396918788,
"loss": 0.831,
"step": 340
},
{
"epoch": 0.48,
"learning_rate": 0.00017358840996211104,
"loss": 0.8218,
"step": 341
},
{
"epoch": 0.49,
"learning_rate": 0.00017343606433883348,
"loss": 0.7902,
"step": 342
},
{
"epoch": 0.49,
"learning_rate": 0.00017328334786868968,
"loss": 0.8418,
"step": 343
},
{
"epoch": 0.49,
"learning_rate": 0.00017313026132288682,
"loss": 0.853,
"step": 344
},
{
"epoch": 0.49,
"learning_rate": 0.00017297680547450107,
"loss": 0.8153,
"step": 345
},
{
"epoch": 0.49,
"learning_rate": 0.00017282298109847345,
"loss": 0.8121,
"step": 346
},
{
"epoch": 0.49,
"learning_rate": 0.00017266878897160605,
"loss": 0.8085,
"step": 347
},
{
"epoch": 0.49,
"learning_rate": 0.00017251422987255802,
"loss": 0.8342,
"step": 348
},
{
"epoch": 0.5,
"learning_rate": 0.0001723593045818418,
"loss": 0.7986,
"step": 349
},
{
"epoch": 0.5,
"learning_rate": 0.00017220401388181903,
"loss": 0.7917,
"step": 350
},
{
"epoch": 0.5,
"learning_rate": 0.00017204835855669657,
"loss": 0.8111,
"step": 351
},
{
"epoch": 0.5,
"learning_rate": 0.00017189233939252267,
"loss": 0.8284,
"step": 352
},
{
"epoch": 0.5,
"eval_loss": 0.846347987651825,
"eval_runtime": 58.037,
"eval_samples_per_second": 6.892,
"eval_steps_per_second": 3.446,
"step": 352
},
{
"epoch": 0.5,
"learning_rate": 0.00017173595717718295,
"loss": 0.8536,
"step": 353
},
{
"epoch": 0.5,
"learning_rate": 0.00017157921270039646,
"loss": 0.8388,
"step": 354
},
{
"epoch": 0.5,
"learning_rate": 0.0001714221067537115,
"loss": 0.7963,
"step": 355
},
{
"epoch": 0.51,
"learning_rate": 0.00017126464013050185,
"loss": 0.7786,
"step": 356
},
{
"epoch": 0.51,
"learning_rate": 0.00017110681362596272,
"loss": 0.8085,
"step": 357
},
{
"epoch": 0.51,
"learning_rate": 0.00017094862803710664,
"loss": 0.8535,
"step": 358
},
{
"epoch": 0.51,
"learning_rate": 0.00017079008416275954,
"loss": 0.8492,
"step": 359
},
{
"epoch": 0.51,
"learning_rate": 0.00017063118280355655,
"loss": 0.884,
"step": 360
},
{
"epoch": 0.51,
"learning_rate": 0.00017047192476193825,
"loss": 0.8504,
"step": 361
},
{
"epoch": 0.51,
"learning_rate": 0.0001703123108421463,
"loss": 0.8377,
"step": 362
},
{
"epoch": 0.52,
"learning_rate": 0.00017015234185021957,
"loss": 0.8179,
"step": 363
},
{
"epoch": 0.52,
"learning_rate": 0.00016999201859399,
"loss": 0.8584,
"step": 364
},
{
"epoch": 0.52,
"learning_rate": 0.00016983134188307858,
"loss": 0.8151,
"step": 365
},
{
"epoch": 0.52,
"learning_rate": 0.0001696703125288912,
"loss": 0.7949,
"step": 366
},
{
"epoch": 0.52,
"learning_rate": 0.0001695089313446145,
"loss": 0.7939,
"step": 367
},
{
"epoch": 0.52,
"learning_rate": 0.000169347199145212,
"loss": 0.8024,
"step": 368
},
{
"epoch": 0.52,
"learning_rate": 0.00016918511674741965,
"loss": 0.8164,
"step": 369
},
{
"epoch": 0.53,
"learning_rate": 0.00016902268496974201,
"loss": 0.8424,
"step": 370
},
{
"epoch": 0.53,
"learning_rate": 0.00016885990463244785,
"loss": 0.8081,
"step": 371
},
{
"epoch": 0.53,
"learning_rate": 0.0001686967765575663,
"loss": 0.8728,
"step": 372
},
{
"epoch": 0.53,
"learning_rate": 0.0001685333015688824,
"loss": 0.8443,
"step": 373
},
{
"epoch": 0.53,
"learning_rate": 0.00016836948049193316,
"loss": 0.7933,
"step": 374
},
{
"epoch": 0.53,
"learning_rate": 0.00016820531415400334,
"loss": 0.8349,
"step": 375
},
{
"epoch": 0.53,
"learning_rate": 0.00016804080338412108,
"loss": 0.864,
"step": 376
},
{
"epoch": 0.54,
"learning_rate": 0.00016787594901305404,
"loss": 0.8422,
"step": 377
},
{
"epoch": 0.54,
"learning_rate": 0.000167710751873305,
"loss": 0.7993,
"step": 378
},
{
"epoch": 0.54,
"learning_rate": 0.0001675452127991077,
"loss": 0.8051,
"step": 379
},
{
"epoch": 0.54,
"learning_rate": 0.00016737933262642258,
"loss": 0.7417,
"step": 380
},
{
"epoch": 0.54,
"learning_rate": 0.0001672131121929326,
"loss": 0.7987,
"step": 381
},
{
"epoch": 0.54,
"learning_rate": 0.00016704655233803912,
"loss": 0.8387,
"step": 382
},
{
"epoch": 0.54,
"learning_rate": 0.00016687965390285747,
"loss": 0.815,
"step": 383
},
{
"epoch": 0.55,
"learning_rate": 0.00016671241773021276,
"loss": 0.8293,
"step": 384
},
{
"epoch": 0.55,
"learning_rate": 0.0001665448446646357,
"loss": 0.7945,
"step": 385
},
{
"epoch": 0.55,
"learning_rate": 0.00016637693555235825,
"loss": 0.801,
"step": 386
},
{
"epoch": 0.55,
"learning_rate": 0.00016620869124130944,
"loss": 0.8003,
"step": 387
},
{
"epoch": 0.55,
"learning_rate": 0.000166040112581111,
"loss": 0.8379,
"step": 388
},
{
"epoch": 0.55,
"learning_rate": 0.00016587120042307305,
"loss": 0.7929,
"step": 389
},
{
"epoch": 0.55,
"learning_rate": 0.00016570195562018992,
"loss": 0.7693,
"step": 390
},
{
"epoch": 0.56,
"learning_rate": 0.00016553237902713574,
"loss": 0.8191,
"step": 391
},
{
"epoch": 0.56,
"learning_rate": 0.00016536247150026017,
"loss": 0.826,
"step": 392
},
{
"epoch": 0.56,
"learning_rate": 0.000165192233897584,
"loss": 0.814,
"step": 393
},
{
"epoch": 0.56,
"learning_rate": 0.00016502166707879504,
"loss": 0.8159,
"step": 394
},
{
"epoch": 0.56,
"learning_rate": 0.00016485077190524341,
"loss": 0.8702,
"step": 395
},
{
"epoch": 0.56,
"learning_rate": 0.00016467954923993756,
"loss": 0.8188,
"step": 396
},
{
"epoch": 0.56,
"learning_rate": 0.00016450799994753966,
"loss": 0.8238,
"step": 397
},
{
"epoch": 0.57,
"learning_rate": 0.00016433612489436135,
"loss": 0.8168,
"step": 398
},
{
"epoch": 0.57,
"learning_rate": 0.00016416392494835935,
"loss": 0.7938,
"step": 399
},
{
"epoch": 0.57,
"learning_rate": 0.00016399140097913105,
"loss": 0.8115,
"step": 400
},
{
"epoch": 0.57,
"learning_rate": 0.00016381855385791015,
"loss": 0.8096,
"step": 401
},
{
"epoch": 0.57,
"learning_rate": 0.00016364538445756224,
"loss": 0.7986,
"step": 402
},
{
"epoch": 0.57,
"learning_rate": 0.00016347189365258034,
"loss": 0.7957,
"step": 403
},
{
"epoch": 0.57,
"learning_rate": 0.0001632980823190807,
"loss": 0.8502,
"step": 404
},
{
"epoch": 0.58,
"learning_rate": 0.000163123951334798,
"loss": 0.8214,
"step": 405
},
{
"epoch": 0.58,
"learning_rate": 0.00016294950157908132,
"loss": 0.8192,
"step": 406
},
{
"epoch": 0.58,
"learning_rate": 0.00016277473393288937,
"loss": 0.8425,
"step": 407
},
{
"epoch": 0.58,
"learning_rate": 0.00016259964927878626,
"loss": 0.7956,
"step": 408
},
{
"epoch": 0.58,
"learning_rate": 0.000162424248500937,
"loss": 0.7574,
"step": 409
},
{
"epoch": 0.58,
"learning_rate": 0.0001622485324851029,
"loss": 0.8459,
"step": 410
},
{
"epoch": 0.58,
"learning_rate": 0.00016207250211863728,
"loss": 0.7867,
"step": 411
},
{
"epoch": 0.59,
"learning_rate": 0.00016189615829048095,
"loss": 0.8068,
"step": 412
},
{
"epoch": 0.59,
"learning_rate": 0.00016171950189115751,
"loss": 0.8387,
"step": 413
},
{
"epoch": 0.59,
"learning_rate": 0.0001615425338127692,
"loss": 0.7998,
"step": 414
},
{
"epoch": 0.59,
"learning_rate": 0.00016136525494899208,
"loss": 0.819,
"step": 415
},
{
"epoch": 0.59,
"learning_rate": 0.00016118766619507176,
"loss": 0.8168,
"step": 416
},
{
"epoch": 0.59,
"learning_rate": 0.00016100976844781877,
"loss": 0.801,
"step": 417
},
{
"epoch": 0.59,
"learning_rate": 0.00016083156260560387,
"loss": 0.8251,
"step": 418
},
{
"epoch": 0.6,
"learning_rate": 0.00016065304956835395,
"loss": 0.8453,
"step": 419
},
{
"epoch": 0.6,
"learning_rate": 0.00016047423023754696,
"loss": 0.8419,
"step": 420
},
{
"epoch": 0.6,
"learning_rate": 0.00016029510551620777,
"loss": 0.7758,
"step": 421
},
{
"epoch": 0.6,
"learning_rate": 0.00016011567630890336,
"loss": 0.8178,
"step": 422
},
{
"epoch": 0.6,
"learning_rate": 0.0001599359435217384,
"loss": 0.8313,
"step": 423
},
{
"epoch": 0.6,
"learning_rate": 0.00015975590806235058,
"loss": 0.8286,
"step": 424
},
{
"epoch": 0.6,
"learning_rate": 0.0001595755708399061,
"loss": 0.8054,
"step": 425
},
{
"epoch": 0.61,
"learning_rate": 0.000159394932765095,
"loss": 0.834,
"step": 426
},
{
"epoch": 0.61,
"learning_rate": 0.00015921399475012663,
"loss": 0.8343,
"step": 427
},
{
"epoch": 0.61,
"learning_rate": 0.000159032757708725,
"loss": 0.813,
"step": 428
},
{
"epoch": 0.61,
"learning_rate": 0.00015885122255612425,
"loss": 0.7911,
"step": 429
},
{
"epoch": 0.61,
"learning_rate": 0.00015866939020906377,
"loss": 0.8037,
"step": 430
},
{
"epoch": 0.61,
"learning_rate": 0.00015848726158578403,
"loss": 0.838,
"step": 431
},
{
"epoch": 0.61,
"learning_rate": 0.0001583048376060215,
"loss": 0.8053,
"step": 432
},
{
"epoch": 0.62,
"learning_rate": 0.00015812211919100411,
"loss": 0.8014,
"step": 433
},
{
"epoch": 0.62,
"learning_rate": 0.00015793910726344694,
"loss": 0.7935,
"step": 434
},
{
"epoch": 0.62,
"learning_rate": 0.00015775580274754697,
"loss": 0.7803,
"step": 435
},
{
"epoch": 0.62,
"learning_rate": 0.00015757220656897896,
"loss": 0.8563,
"step": 436
},
{
"epoch": 0.62,
"learning_rate": 0.00015738831965489048,
"loss": 0.7825,
"step": 437
},
{
"epoch": 0.62,
"learning_rate": 0.0001572041429338972,
"loss": 0.8035,
"step": 438
},
{
"epoch": 0.62,
"learning_rate": 0.00015701967733607844,
"loss": 0.8199,
"step": 439
},
{
"epoch": 0.62,
"learning_rate": 0.00015683492379297222,
"loss": 0.8314,
"step": 440
},
{
"epoch": 0.63,
"learning_rate": 0.00015664988323757072,
"loss": 0.7746,
"step": 441
},
{
"epoch": 0.63,
"learning_rate": 0.00015646455660431552,
"loss": 0.7724,
"step": 442
},
{
"epoch": 0.63,
"learning_rate": 0.0001562789448290928,
"loss": 0.8263,
"step": 443
},
{
"epoch": 0.63,
"learning_rate": 0.00015609304884922878,
"loss": 0.7956,
"step": 444
},
{
"epoch": 0.63,
"learning_rate": 0.0001559068696034848,
"loss": 0.8388,
"step": 445
},
{
"epoch": 0.63,
"learning_rate": 0.00015572040803205273,
"loss": 0.8226,
"step": 446
},
{
"epoch": 0.63,
"learning_rate": 0.0001555336650765502,
"loss": 0.8014,
"step": 447
},
{
"epoch": 0.64,
"learning_rate": 0.00015534664168001568,
"loss": 0.8371,
"step": 448
},
{
"epoch": 0.64,
"learning_rate": 0.000155159338786904,
"loss": 0.8162,
"step": 449
},
{
"epoch": 0.64,
"learning_rate": 0.00015497175734308135,
"loss": 0.8334,
"step": 450
},
{
"epoch": 0.64,
"learning_rate": 0.00015478389829582057,
"loss": 0.802,
"step": 451
},
{
"epoch": 0.64,
"learning_rate": 0.00015459576259379637,
"loss": 0.8153,
"step": 452
},
{
"epoch": 0.64,
"learning_rate": 0.00015440735118708062,
"loss": 0.8418,
"step": 453
},
{
"epoch": 0.64,
"learning_rate": 0.0001542186650271374,
"loss": 0.7649,
"step": 454
},
{
"epoch": 0.65,
"learning_rate": 0.00015402970506681832,
"loss": 0.8178,
"step": 455
},
{
"epoch": 0.65,
"learning_rate": 0.0001538404722603577,
"loss": 0.7929,
"step": 456
},
{
"epoch": 0.65,
"learning_rate": 0.00015365096756336756,
"loss": 0.7714,
"step": 457
},
{
"epoch": 0.65,
"learning_rate": 0.00015346119193283313,
"loss": 0.7547,
"step": 458
},
{
"epoch": 0.65,
"learning_rate": 0.0001532711463271077,
"loss": 0.8495,
"step": 459
},
{
"epoch": 0.65,
"learning_rate": 0.000153080831705908,
"loss": 0.8482,
"step": 460
},
{
"epoch": 0.65,
"learning_rate": 0.00015289024903030924,
"loss": 0.7974,
"step": 461
},
{
"epoch": 0.66,
"learning_rate": 0.00015269939926274028,
"loss": 0.8091,
"step": 462
},
{
"epoch": 0.66,
"learning_rate": 0.00015250828336697876,
"loss": 0.771,
"step": 463
},
{
"epoch": 0.66,
"learning_rate": 0.00015231690230814633,
"loss": 0.8292,
"step": 464
},
{
"epoch": 0.66,
"learning_rate": 0.00015212525705270356,
"loss": 0.8259,
"step": 465
},
{
"epoch": 0.66,
"learning_rate": 0.00015193334856844528,
"loss": 0.8219,
"step": 466
},
{
"epoch": 0.66,
"learning_rate": 0.00015174117782449563,
"loss": 0.8284,
"step": 467
},
{
"epoch": 0.66,
"learning_rate": 0.00015154874579130308,
"loss": 0.8191,
"step": 468
},
{
"epoch": 0.67,
"learning_rate": 0.0001513560534406356,
"loss": 0.8275,
"step": 469
},
{
"epoch": 0.67,
"learning_rate": 0.0001511631017455758,
"loss": 0.7894,
"step": 470
},
{
"epoch": 0.67,
"learning_rate": 0.00015096989168051595,
"loss": 0.8161,
"step": 471
},
{
"epoch": 0.67,
"learning_rate": 0.00015077642422115295,
"loss": 0.8246,
"step": 472
},
{
"epoch": 0.67,
"learning_rate": 0.0001505827003444837,
"loss": 0.8306,
"step": 473
},
{
"epoch": 0.67,
"learning_rate": 0.00015038872102879981,
"loss": 0.7859,
"step": 474
},
{
"epoch": 0.67,
"learning_rate": 0.00015019448725368305,
"loss": 0.7873,
"step": 475
},
{
"epoch": 0.68,
"learning_rate": 0.00015000000000000001,
"loss": 0.812,
"step": 476
},
{
"epoch": 0.68,
"learning_rate": 0.00014980526024989738,
"loss": 0.8562,
"step": 477
},
{
"epoch": 0.68,
"learning_rate": 0.00014961026898679703,
"loss": 0.8009,
"step": 478
},
{
"epoch": 0.68,
"learning_rate": 0.0001494150271953908,
"loss": 0.8271,
"step": 479
},
{
"epoch": 0.68,
"learning_rate": 0.00014921953586163577,
"loss": 0.8172,
"step": 480
},
{
"epoch": 0.68,
"learning_rate": 0.0001490237959727492,
"loss": 0.7966,
"step": 481
},
{
"epoch": 0.68,
"learning_rate": 0.00014882780851720344,
"loss": 0.8242,
"step": 482
},
{
"epoch": 0.69,
"learning_rate": 0.00014863157448472122,
"loss": 0.7957,
"step": 483
},
{
"epoch": 0.69,
"learning_rate": 0.0001484350948662703,
"loss": 0.8224,
"step": 484
},
{
"epoch": 0.69,
"learning_rate": 0.00014823837065405863,
"loss": 0.784,
"step": 485
},
{
"epoch": 0.69,
"learning_rate": 0.0001480414028415295,
"loss": 0.8139,
"step": 486
},
{
"epoch": 0.69,
"learning_rate": 0.00014784419242335614,
"loss": 0.7678,
"step": 487
},
{
"epoch": 0.69,
"learning_rate": 0.00014764674039543718,
"loss": 0.772,
"step": 488
},
{
"epoch": 0.69,
"learning_rate": 0.00014744904775489107,
"loss": 0.8111,
"step": 489
},
{
"epoch": 0.7,
"learning_rate": 0.0001472511155000516,
"loss": 0.8256,
"step": 490
},
{
"epoch": 0.7,
"learning_rate": 0.00014705294463046248,
"loss": 0.8292,
"step": 491
},
{
"epoch": 0.7,
"learning_rate": 0.00014685453614687231,
"loss": 0.7988,
"step": 492
},
{
"epoch": 0.7,
"learning_rate": 0.0001466558910512298,
"loss": 0.7818,
"step": 493
},
{
"epoch": 0.7,
"learning_rate": 0.00014645701034667847,
"loss": 0.8344,
"step": 494
},
{
"epoch": 0.7,
"learning_rate": 0.0001462578950375516,
"loss": 0.8063,
"step": 495
},
{
"epoch": 0.7,
"learning_rate": 0.00014605854612936728,
"loss": 0.7822,
"step": 496
},
{
"epoch": 0.71,
"learning_rate": 0.00014585896462882317,
"loss": 0.8224,
"step": 497
},
{
"epoch": 0.71,
"learning_rate": 0.00014565915154379162,
"loss": 0.8074,
"step": 498
},
{
"epoch": 0.71,
"learning_rate": 0.00014545910788331433,
"loss": 0.7734,
"step": 499
},
{
"epoch": 0.71,
"learning_rate": 0.0001452588346575975,
"loss": 0.7801,
"step": 500
},
{
"epoch": 0.71,
"learning_rate": 0.00014505833287800662,
"loss": 0.8452,
"step": 501
},
{
"epoch": 0.71,
"learning_rate": 0.00014485760355706123,
"loss": 0.7961,
"step": 502
},
{
"epoch": 0.71,
"learning_rate": 0.00014465664770843008,
"loss": 0.8104,
"step": 503
},
{
"epoch": 0.72,
"learning_rate": 0.00014445546634692582,
"loss": 0.8137,
"step": 504
},
{
"epoch": 0.72,
"learning_rate": 0.0001442540604884999,
"loss": 0.7949,
"step": 505
},
{
"epoch": 0.72,
"learning_rate": 0.00014405243115023748,
"loss": 0.7968,
"step": 506
},
{
"epoch": 0.72,
"learning_rate": 0.00014385057935035228,
"loss": 0.7942,
"step": 507
},
{
"epoch": 0.72,
"learning_rate": 0.00014364850610818145,
"loss": 0.7847,
"step": 508
},
{
"epoch": 0.72,
"learning_rate": 0.0001434462124441804,
"loss": 0.7803,
"step": 509
},
{
"epoch": 0.72,
"learning_rate": 0.00014324369937991765,
"loss": 0.9007,
"step": 510
},
{
"epoch": 0.73,
"learning_rate": 0.00014304096793806958,
"loss": 0.8213,
"step": 511
},
{
"epoch": 0.73,
"learning_rate": 0.0001428380191424156,
"loss": 0.8121,
"step": 512
},
{
"epoch": 0.73,
"learning_rate": 0.00014263485401783252,
"loss": 0.7978,
"step": 513
},
{
"epoch": 0.73,
"learning_rate": 0.00014243147359028967,
"loss": 0.8319,
"step": 514
},
{
"epoch": 0.73,
"learning_rate": 0.00014222787888684363,
"loss": 0.7793,
"step": 515
},
{
"epoch": 0.73,
"learning_rate": 0.0001420240709356331,
"loss": 0.7744,
"step": 516
},
{
"epoch": 0.73,
"learning_rate": 0.00014182005076587365,
"loss": 0.769,
"step": 517
},
{
"epoch": 0.74,
"learning_rate": 0.00014161581940785252,
"loss": 0.8161,
"step": 518
},
{
"epoch": 0.74,
"learning_rate": 0.0001414113778929234,
"loss": 0.7841,
"step": 519
},
{
"epoch": 0.74,
"learning_rate": 0.00014120672725350137,
"loss": 0.7876,
"step": 520
},
{
"epoch": 0.74,
"learning_rate": 0.00014100186852305743,
"loss": 0.7723,
"step": 521
},
{
"epoch": 0.74,
"learning_rate": 0.00014079680273611358,
"loss": 0.8469,
"step": 522
},
{
"epoch": 0.74,
"learning_rate": 0.0001405915309282373,
"loss": 0.754,
"step": 523
},
{
"epoch": 0.74,
"learning_rate": 0.00014038605413603652,
"loss": 0.8394,
"step": 524
},
{
"epoch": 0.75,
"learning_rate": 0.00014018037339715437,
"loss": 0.813,
"step": 525
},
{
"epoch": 0.75,
"learning_rate": 0.00013997448975026382,
"loss": 0.7784,
"step": 526
},
{
"epoch": 0.75,
"learning_rate": 0.00013976840423506257,
"loss": 0.8189,
"step": 527
},
{
"epoch": 0.75,
"learning_rate": 0.0001395621178922677,
"loss": 0.7928,
"step": 528
},
{
"epoch": 0.75,
"eval_loss": 0.8294777870178223,
"eval_runtime": 58.0411,
"eval_samples_per_second": 6.892,
"eval_steps_per_second": 3.446,
"step": 528
},
{
"epoch": 0.75,
"learning_rate": 0.00013935563176361042,
"loss": 0.7904,
"step": 529
},
{
"epoch": 0.75,
"learning_rate": 0.00013914894689183097,
"loss": 0.7809,
"step": 530
},
{
"epoch": 0.75,
"learning_rate": 0.00013894206432067308,
"loss": 0.7594,
"step": 531
},
{
"epoch": 0.76,
"learning_rate": 0.00013873498509487902,
"loss": 0.8133,
"step": 532
},
{
"epoch": 0.76,
"learning_rate": 0.000138527710260184,
"loss": 0.7861,
"step": 533
},
{
"epoch": 0.76,
"learning_rate": 0.00013832024086331103,
"loss": 0.798,
"step": 534
},
{
"epoch": 0.76,
"learning_rate": 0.0001381125779519658,
"loss": 0.8223,
"step": 535
},
{
"epoch": 0.76,
"learning_rate": 0.00013790472257483108,
"loss": 0.7924,
"step": 536
},
{
"epoch": 0.76,
"learning_rate": 0.00013769667578156165,
"loss": 0.8164,
"step": 537
},
{
"epoch": 0.76,
"learning_rate": 0.00013748843862277898,
"loss": 0.8427,
"step": 538
},
{
"epoch": 0.77,
"learning_rate": 0.00013728001215006574,
"loss": 0.8305,
"step": 539
},
{
"epoch": 0.77,
"learning_rate": 0.0001370713974159607,
"loss": 0.805,
"step": 540
},
{
"epoch": 0.77,
"learning_rate": 0.0001368625954739534,
"loss": 0.7933,
"step": 541
},
{
"epoch": 0.77,
"learning_rate": 0.00013665360737847857,
"loss": 0.8358,
"step": 542
},
{
"epoch": 0.77,
"learning_rate": 0.00013644443418491125,
"loss": 0.813,
"step": 543
},
{
"epoch": 0.77,
"learning_rate": 0.00013623507694956102,
"loss": 0.7695,
"step": 544
},
{
"epoch": 0.77,
"learning_rate": 0.0001360255367296669,
"loss": 0.825,
"step": 545
},
{
"epoch": 0.78,
"learning_rate": 0.00013581581458339207,
"loss": 0.7928,
"step": 546
},
{
"epoch": 0.78,
"learning_rate": 0.00013560591156981831,
"loss": 0.8634,
"step": 547
},
{
"epoch": 0.78,
"learning_rate": 0.00013539582874894083,
"loss": 0.7986,
"step": 548
},
{
"epoch": 0.78,
"learning_rate": 0.00013518556718166282,
"loss": 0.8162,
"step": 549
},
{
"epoch": 0.78,
"learning_rate": 0.00013497512792979012,
"loss": 0.8048,
"step": 550
},
{
"epoch": 0.78,
"learning_rate": 0.0001347645120560259,
"loss": 0.8305,
"step": 551
},
{
"epoch": 0.78,
"learning_rate": 0.00013455372062396524,
"loss": 0.7757,
"step": 552
},
{
"epoch": 0.79,
"learning_rate": 0.00013434275469808974,
"loss": 0.8034,
"step": 553
},
{
"epoch": 0.79,
"learning_rate": 0.0001341316153437623,
"loss": 0.8023,
"step": 554
},
{
"epoch": 0.79,
"learning_rate": 0.0001339203036272215,
"loss": 0.8447,
"step": 555
},
{
"epoch": 0.79,
"learning_rate": 0.00013370882061557635,
"loss": 0.8172,
"step": 556
},
{
"epoch": 0.79,
"learning_rate": 0.00013349716737680092,
"loss": 0.8713,
"step": 557
},
{
"epoch": 0.79,
"learning_rate": 0.00013328534497972894,
"loss": 0.7882,
"step": 558
},
{
"epoch": 0.79,
"learning_rate": 0.00013307335449404836,
"loss": 0.812,
"step": 559
},
{
"epoch": 0.8,
"learning_rate": 0.0001328611969902959,
"loss": 0.8051,
"step": 560
},
{
"epoch": 0.8,
"learning_rate": 0.00013264887353985175,
"loss": 0.7853,
"step": 561
},
{
"epoch": 0.8,
"learning_rate": 0.00013243638521493424,
"loss": 0.8625,
"step": 562
},
{
"epoch": 0.8,
"learning_rate": 0.00013222373308859406,
"loss": 0.7404,
"step": 563
},
{
"epoch": 0.8,
"learning_rate": 0.00013201091823470936,
"loss": 0.7813,
"step": 564
},
{
"epoch": 0.8,
"learning_rate": 0.00013179794172797976,
"loss": 0.7803,
"step": 565
},
{
"epoch": 0.8,
"learning_rate": 0.00013158480464392144,
"loss": 0.8549,
"step": 566
},
{
"epoch": 0.81,
"learning_rate": 0.00013137150805886147,
"loss": 0.7503,
"step": 567
},
{
"epoch": 0.81,
"learning_rate": 0.0001311580530499322,
"loss": 0.8602,
"step": 568
},
{
"epoch": 0.81,
"learning_rate": 0.0001309444406950663,
"loss": 0.8061,
"step": 569
},
{
"epoch": 0.81,
"learning_rate": 0.00013073067207299073,
"loss": 0.8279,
"step": 570
},
{
"epoch": 0.81,
"learning_rate": 0.00013051674826322176,
"loss": 0.7735,
"step": 571
},
{
"epoch": 0.81,
"learning_rate": 0.0001303026703460594,
"loss": 0.7717,
"step": 572
},
{
"epoch": 0.81,
"learning_rate": 0.00013008843940258164,
"loss": 0.7514,
"step": 573
},
{
"epoch": 0.82,
"learning_rate": 0.00012987405651463952,
"loss": 0.7604,
"step": 574
},
{
"epoch": 0.82,
"learning_rate": 0.00012965952276485128,
"loss": 0.7884,
"step": 575
},
{
"epoch": 0.82,
"learning_rate": 0.00012944483923659693,
"loss": 0.8118,
"step": 576
},
{
"epoch": 0.82,
"learning_rate": 0.00012923000701401297,
"loss": 0.8253,
"step": 577
},
{
"epoch": 0.82,
"learning_rate": 0.00012901502718198663,
"loss": 0.7794,
"step": 578
},
{
"epoch": 0.82,
"learning_rate": 0.0001287999008261508,
"loss": 0.8049,
"step": 579
},
{
"epoch": 0.82,
"learning_rate": 0.00012858462903287814,
"loss": 0.8225,
"step": 580
},
{
"epoch": 0.83,
"learning_rate": 0.00012836921288927574,
"loss": 0.7848,
"step": 581
},
{
"epoch": 0.83,
"learning_rate": 0.00012815365348317975,
"loss": 0.8081,
"step": 582
},
{
"epoch": 0.83,
"learning_rate": 0.00012793795190314973,
"loss": 0.7644,
"step": 583
},
{
"epoch": 0.83,
"learning_rate": 0.00012772210923846317,
"loss": 0.8084,
"step": 584
},
{
"epoch": 0.83,
"learning_rate": 0.00012750612657911012,
"loss": 0.7859,
"step": 585
},
{
"epoch": 0.83,
"learning_rate": 0.0001272900050157875,
"loss": 0.8209,
"step": 586
},
{
"epoch": 0.83,
"learning_rate": 0.00012707374563989375,
"loss": 0.8092,
"step": 587
},
{
"epoch": 0.84,
"learning_rate": 0.00012685734954352327,
"loss": 0.8402,
"step": 588
},
{
"epoch": 0.84,
"learning_rate": 0.0001266408178194608,
"loss": 0.7294,
"step": 589
},
{
"epoch": 0.84,
"learning_rate": 0.00012642415156117605,
"loss": 0.8243,
"step": 590
},
{
"epoch": 0.84,
"learning_rate": 0.0001262073518628181,
"loss": 0.8151,
"step": 591
},
{
"epoch": 0.84,
"learning_rate": 0.00012599041981920995,
"loss": 0.7833,
"step": 592
},
{
"epoch": 0.84,
"learning_rate": 0.00012577335652584284,
"loss": 0.8321,
"step": 593
},
{
"epoch": 0.84,
"learning_rate": 0.00012555616307887086,
"loss": 0.7716,
"step": 594
},
{
"epoch": 0.85,
"learning_rate": 0.00012533884057510538,
"loss": 0.7746,
"step": 595
},
{
"epoch": 0.85,
"learning_rate": 0.00012512139011200947,
"loss": 0.8502,
"step": 596
},
{
"epoch": 0.85,
"learning_rate": 0.00012490381278769242,
"loss": 0.8417,
"step": 597
},
{
"epoch": 0.85,
"learning_rate": 0.00012468610970090411,
"loss": 0.8002,
"step": 598
},
{
"epoch": 0.85,
"learning_rate": 0.00012446828195102956,
"loss": 0.762,
"step": 599
},
{
"epoch": 0.85,
"learning_rate": 0.00012425033063808328,
"loss": 0.8302,
"step": 600
},
{
"epoch": 0.85,
"learning_rate": 0.00012403225686270384,
"loss": 0.8135,
"step": 601
},
{
"epoch": 0.86,
"learning_rate": 0.00012381406172614812,
"loss": 0.7791,
"step": 602
},
{
"epoch": 0.86,
"learning_rate": 0.000123595746330286,
"loss": 0.7753,
"step": 603
},
{
"epoch": 0.86,
"learning_rate": 0.0001233773117775946,
"loss": 0.7365,
"step": 604
},
{
"epoch": 0.86,
"learning_rate": 0.0001231587591711527,
"loss": 0.8144,
"step": 605
},
{
"epoch": 0.86,
"learning_rate": 0.00012294008961463539,
"loss": 0.7906,
"step": 606
},
{
"epoch": 0.86,
"learning_rate": 0.00012272130421230818,
"loss": 0.8327,
"step": 607
},
{
"epoch": 0.86,
"learning_rate": 0.0001225024040690218,
"loss": 0.7626,
"step": 608
},
{
"epoch": 0.87,
"learning_rate": 0.00012228339029020624,
"loss": 0.8493,
"step": 609
},
{
"epoch": 0.87,
"learning_rate": 0.00012206426398186534,
"loss": 0.7778,
"step": 610
},
{
"epoch": 0.87,
"learning_rate": 0.00012184502625057139,
"loss": 0.805,
"step": 611
},
{
"epoch": 0.87,
"learning_rate": 0.00012162567820345912,
"loss": 0.8099,
"step": 612
},
{
"epoch": 0.87,
"learning_rate": 0.00012140622094822054,
"loss": 0.7626,
"step": 613
},
{
"epoch": 0.87,
"learning_rate": 0.00012118665559309906,
"loss": 0.8807,
"step": 614
},
{
"epoch": 0.87,
"learning_rate": 0.00012096698324688392,
"loss": 0.8007,
"step": 615
},
{
"epoch": 0.88,
"learning_rate": 0.00012074720501890484,
"loss": 0.7872,
"step": 616
},
{
"epoch": 0.88,
"learning_rate": 0.00012052732201902608,
"loss": 0.7814,
"step": 617
},
{
"epoch": 0.88,
"learning_rate": 0.00012030733535764107,
"loss": 0.8192,
"step": 618
},
{
"epoch": 0.88,
"learning_rate": 0.0001200872461456667,
"loss": 0.8097,
"step": 619
},
{
"epoch": 0.88,
"learning_rate": 0.0001198670554945377,
"loss": 0.7605,
"step": 620
},
{
"epoch": 0.88,
"learning_rate": 0.00011964676451620112,
"loss": 0.817,
"step": 621
},
{
"epoch": 0.88,
"learning_rate": 0.00011942637432311059,
"loss": 0.8013,
"step": 622
},
{
"epoch": 0.88,
"learning_rate": 0.00011920588602822083,
"loss": 0.7394,
"step": 623
},
{
"epoch": 0.89,
"learning_rate": 0.00011898530074498194,
"loss": 0.8615,
"step": 624
},
{
"epoch": 0.89,
"learning_rate": 0.00011876461958733381,
"loss": 0.745,
"step": 625
},
{
"epoch": 0.89,
"learning_rate": 0.00011854384366970046,
"loss": 0.8218,
"step": 626
},
{
"epoch": 0.89,
"learning_rate": 0.00011832297410698447,
"loss": 0.776,
"step": 627
},
{
"epoch": 0.89,
"learning_rate": 0.00011810201201456134,
"loss": 0.781,
"step": 628
},
{
"epoch": 0.89,
"learning_rate": 0.00011788095850827381,
"loss": 0.7886,
"step": 629
},
{
"epoch": 0.89,
"learning_rate": 0.00011765981470442624,
"loss": 0.7804,
"step": 630
},
{
"epoch": 0.9,
"learning_rate": 0.00011743858171977899,
"loss": 0.7484,
"step": 631
},
{
"epoch": 0.9,
"learning_rate": 0.00011721726067154282,
"loss": 0.7553,
"step": 632
},
{
"epoch": 0.9,
"learning_rate": 0.00011699585267737317,
"loss": 0.8006,
"step": 633
},
{
"epoch": 0.9,
"learning_rate": 0.00011677435885536452,
"loss": 0.7649,
"step": 634
},
{
"epoch": 0.9,
"learning_rate": 0.00011655278032404489,
"loss": 0.7723,
"step": 635
},
{
"epoch": 0.9,
"learning_rate": 0.00011633111820236991,
"loss": 0.8024,
"step": 636
},
{
"epoch": 0.9,
"learning_rate": 0.00011610937360971747,
"loss": 0.7931,
"step": 637
},
{
"epoch": 0.91,
"learning_rate": 0.00011588754766588188,
"loss": 0.847,
"step": 638
},
{
"epoch": 0.91,
"learning_rate": 0.00011566564149106822,
"loss": 0.7786,
"step": 639
},
{
"epoch": 0.91,
"learning_rate": 0.00011544365620588688,
"loss": 0.8177,
"step": 640
},
{
"epoch": 0.91,
"learning_rate": 0.00011522159293134758,
"loss": 0.7655,
"step": 641
},
{
"epoch": 0.91,
"learning_rate": 0.00011499945278885395,
"loss": 0.7785,
"step": 642
},
{
"epoch": 0.91,
"learning_rate": 0.00011477723690019788,
"loss": 0.8031,
"step": 643
},
{
"epoch": 0.91,
"learning_rate": 0.0001145549463875536,
"loss": 0.844,
"step": 644
},
{
"epoch": 0.92,
"learning_rate": 0.00011433258237347235,
"loss": 0.7841,
"step": 645
},
{
"epoch": 0.92,
"learning_rate": 0.00011411014598087644,
"loss": 0.8228,
"step": 646
},
{
"epoch": 0.92,
"learning_rate": 0.00011388763833305371,
"loss": 0.8026,
"step": 647
},
{
"epoch": 0.92,
"learning_rate": 0.00011366506055365194,
"loss": 0.8072,
"step": 648
},
{
"epoch": 0.92,
"learning_rate": 0.00011344241376667284,
"loss": 0.8113,
"step": 649
},
{
"epoch": 0.92,
"learning_rate": 0.00011321969909646683,
"loss": 0.8151,
"step": 650
},
{
"epoch": 0.92,
"learning_rate": 0.00011299691766772709,
"loss": 0.8332,
"step": 651
},
{
"epoch": 0.93,
"learning_rate": 0.00011277407060548373,
"loss": 0.7731,
"step": 652
},
{
"epoch": 0.93,
"learning_rate": 0.00011255115903509861,
"loss": 0.8198,
"step": 653
},
{
"epoch": 0.93,
"learning_rate": 0.00011232818408225909,
"loss": 0.7763,
"step": 654
},
{
"epoch": 0.93,
"learning_rate": 0.0001121051468729728,
"loss": 0.8101,
"step": 655
},
{
"epoch": 0.93,
"learning_rate": 0.00011188204853356163,
"loss": 0.7844,
"step": 656
},
{
"epoch": 0.93,
"learning_rate": 0.00011165889019065618,
"loss": 0.8057,
"step": 657
},
{
"epoch": 0.93,
"learning_rate": 0.0001114356729711902,
"loss": 0.7925,
"step": 658
},
{
"epoch": 0.94,
"learning_rate": 0.00011121239800239458,
"loss": 0.7849,
"step": 659
},
{
"epoch": 0.94,
"learning_rate": 0.00011098906641179194,
"loss": 0.811,
"step": 660
},
{
"epoch": 0.94,
"learning_rate": 0.00011076567932719088,
"loss": 0.7957,
"step": 661
},
{
"epoch": 0.94,
"learning_rate": 0.00011054223787668008,
"loss": 0.8227,
"step": 662
},
{
"epoch": 0.94,
"learning_rate": 0.00011031874318862294,
"loss": 0.8388,
"step": 663
},
{
"epoch": 0.94,
"learning_rate": 0.00011009519639165162,
"loss": 0.7837,
"step": 664
},
{
"epoch": 0.94,
"learning_rate": 0.00010987159861466143,
"loss": 0.8168,
"step": 665
},
{
"epoch": 0.95,
"learning_rate": 0.00010964795098680512,
"loss": 0.8015,
"step": 666
},
{
"epoch": 0.95,
"learning_rate": 0.0001094242546374872,
"loss": 0.8834,
"step": 667
},
{
"epoch": 0.95,
"learning_rate": 0.00010920051069635822,
"loss": 0.7445,
"step": 668
},
{
"epoch": 0.95,
"learning_rate": 0.00010897672029330906,
"loss": 0.84,
"step": 669
},
{
"epoch": 0.95,
"learning_rate": 0.00010875288455846522,
"loss": 0.8329,
"step": 670
},
{
"epoch": 0.95,
"learning_rate": 0.00010852900462218117,
"loss": 0.8016,
"step": 671
},
{
"epoch": 0.95,
"learning_rate": 0.0001083050816150345,
"loss": 0.7757,
"step": 672
},
{
"epoch": 0.96,
"learning_rate": 0.0001080811166678204,
"loss": 0.7858,
"step": 673
},
{
"epoch": 0.96,
"learning_rate": 0.0001078571109115458,
"loss": 0.7934,
"step": 674
},
{
"epoch": 0.96,
"learning_rate": 0.00010763306547742375,
"loss": 0.8384,
"step": 675
},
{
"epoch": 0.96,
"learning_rate": 0.0001074089814968676,
"loss": 0.7513,
"step": 676
},
{
"epoch": 0.96,
"learning_rate": 0.00010718486010148547,
"loss": 0.8118,
"step": 677
},
{
"epoch": 0.96,
"learning_rate": 0.00010696070242307432,
"loss": 0.7644,
"step": 678
},
{
"epoch": 0.96,
"learning_rate": 0.00010673650959361439,
"loss": 0.7744,
"step": 679
},
{
"epoch": 0.97,
"learning_rate": 0.00010651228274526339,
"loss": 0.8201,
"step": 680
},
{
"epoch": 0.97,
"learning_rate": 0.00010628802301035085,
"loss": 0.8065,
"step": 681
},
{
"epoch": 0.97,
"learning_rate": 0.00010606373152137241,
"loss": 0.851,
"step": 682
},
{
"epoch": 0.97,
"learning_rate": 0.000105839409410984,
"loss": 0.8175,
"step": 683
},
{
"epoch": 0.97,
"learning_rate": 0.00010561505781199618,
"loss": 0.7985,
"step": 684
},
{
"epoch": 0.97,
"learning_rate": 0.00010539067785736856,
"loss": 0.801,
"step": 685
},
{
"epoch": 0.97,
"learning_rate": 0.00010516627068020373,
"loss": 0.8111,
"step": 686
},
{
"epoch": 0.98,
"learning_rate": 0.00010494183741374194,
"loss": 0.8076,
"step": 687
},
{
"epoch": 0.98,
"learning_rate": 0.0001047173791913551,
"loss": 0.7516,
"step": 688
},
{
"epoch": 0.98,
"learning_rate": 0.00010449289714654109,
"loss": 0.7644,
"step": 689
},
{
"epoch": 0.98,
"learning_rate": 0.00010426839241291828,
"loss": 0.8133,
"step": 690
},
{
"epoch": 0.98,
"learning_rate": 0.00010404386612421942,
"loss": 0.8554,
"step": 691
},
{
"epoch": 0.98,
"learning_rate": 0.0001038193194142862,
"loss": 0.8334,
"step": 692
},
{
"epoch": 0.98,
"learning_rate": 0.00010359475341706346,
"loss": 0.8241,
"step": 693
},
{
"epoch": 0.99,
"learning_rate": 0.00010337016926659333,
"loss": 0.8197,
"step": 694
},
{
"epoch": 0.99,
"learning_rate": 0.0001031455680970098,
"loss": 0.8107,
"step": 695
},
{
"epoch": 0.99,
"learning_rate": 0.00010292095104253259,
"loss": 0.798,
"step": 696
},
{
"epoch": 0.99,
"learning_rate": 0.00010269631923746176,
"loss": 0.8339,
"step": 697
},
{
"epoch": 0.99,
"learning_rate": 0.00010247167381617191,
"loss": 0.7761,
"step": 698
},
{
"epoch": 0.99,
"learning_rate": 0.00010224701591310625,
"loss": 0.8617,
"step": 699
},
{
"epoch": 0.99,
"learning_rate": 0.00010202234666277115,
"loss": 0.7663,
"step": 700
},
{
"epoch": 1.0,
"learning_rate": 0.00010179766719973023,
"loss": 0.7603,
"step": 701
},
{
"epoch": 1.0,
"learning_rate": 0.00010157297865859865,
"loss": 0.7736,
"step": 702
},
{
"epoch": 1.0,
"learning_rate": 0.0001013482821740375,
"loss": 0.8078,
"step": 703
},
{
"epoch": 1.0,
"learning_rate": 0.00010112357888074793,
"loss": 0.8313,
"step": 704
},
{
"epoch": 1.0,
"eval_loss": 0.8154956698417664,
"eval_runtime": 58.0041,
"eval_samples_per_second": 6.896,
"eval_steps_per_second": 3.448,
"step": 704
}
],
"logging_steps": 1,
"max_steps": 1408,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 704,
"total_flos": 2.3970054853676237e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}