bart-base-code-instructiongen / trainer_state.json
pszemraj's picture
End of training
297f3e0
raw
history blame
35.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.994671403197158,
"global_step": 843,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1.4117647058823532e-05,
"loss": 5.6404,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 2.8235294117647063e-05,
"loss": 4.5746,
"step": 6
},
{
"epoch": 0.03,
"learning_rate": 4.235294117647059e-05,
"loss": 3.3627,
"step": 9
},
{
"epoch": 0.04,
"learning_rate": 5.6470588235294126e-05,
"loss": 2.7997,
"step": 12
},
{
"epoch": 0.05,
"learning_rate": 7.058823529411765e-05,
"loss": 2.4203,
"step": 15
},
{
"epoch": 0.06,
"learning_rate": 7.999971068621779e-05,
"loss": 2.217,
"step": 18
},
{
"epoch": 0.07,
"learning_rate": 7.999537106318664e-05,
"loss": 2.2324,
"step": 21
},
{
"epoch": 0.09,
"learning_rate": 7.998582444493811e-05,
"loss": 2.089,
"step": 24
},
{
"epoch": 0.1,
"learning_rate": 7.997107207434592e-05,
"loss": 2.0586,
"step": 27
},
{
"epoch": 0.11,
"learning_rate": 7.995111587202043e-05,
"loss": 1.9011,
"step": 30
},
{
"epoch": 0.12,
"learning_rate": 7.992595843605852e-05,
"loss": 1.8797,
"step": 33
},
{
"epoch": 0.13,
"learning_rate": 7.989560304170546e-05,
"loss": 1.8855,
"step": 36
},
{
"epoch": 0.14,
"learning_rate": 7.98600536409284e-05,
"loss": 1.7122,
"step": 39
},
{
"epoch": 0.15,
"learning_rate": 7.981931486190192e-05,
"loss": 1.7285,
"step": 42
},
{
"epoch": 0.16,
"learning_rate": 7.97733920084055e-05,
"loss": 1.6935,
"step": 45
},
{
"epoch": 0.17,
"learning_rate": 7.972229105913295e-05,
"loss": 1.5963,
"step": 48
},
{
"epoch": 0.18,
"learning_rate": 7.966601866691413e-05,
"loss": 1.6079,
"step": 51
},
{
"epoch": 0.19,
"learning_rate": 7.96045821578488e-05,
"loss": 1.6835,
"step": 54
},
{
"epoch": 0.2,
"learning_rate": 7.953798953035274e-05,
"loss": 1.5928,
"step": 57
},
{
"epoch": 0.21,
"learning_rate": 7.946624945411658e-05,
"loss": 1.6176,
"step": 60
},
{
"epoch": 0.22,
"learning_rate": 7.938937126897702e-05,
"loss": 1.5979,
"step": 63
},
{
"epoch": 0.23,
"learning_rate": 7.930736498370085e-05,
"loss": 1.5759,
"step": 66
},
{
"epoch": 0.25,
"learning_rate": 7.922024127468196e-05,
"loss": 1.633,
"step": 69
},
{
"epoch": 0.26,
"learning_rate": 7.912801148455134e-05,
"loss": 1.5562,
"step": 72
},
{
"epoch": 0.27,
"learning_rate": 7.903068762070047e-05,
"loss": 1.5349,
"step": 75
},
{
"epoch": 0.28,
"learning_rate": 7.892828235371795e-05,
"loss": 1.5454,
"step": 78
},
{
"epoch": 0.29,
"learning_rate": 7.882080901574001e-05,
"loss": 1.5811,
"step": 81
},
{
"epoch": 0.3,
"learning_rate": 7.870828159871478e-05,
"loss": 1.5442,
"step": 84
},
{
"epoch": 0.31,
"learning_rate": 7.859071475258065e-05,
"loss": 1.5513,
"step": 87
},
{
"epoch": 0.32,
"learning_rate": 7.8468123783359e-05,
"loss": 1.5349,
"step": 90
},
{
"epoch": 0.33,
"learning_rate": 7.83405246511616e-05,
"loss": 1.5035,
"step": 93
},
{
"epoch": 0.34,
"learning_rate": 7.820793396811258e-05,
"loss": 1.5159,
"step": 96
},
{
"epoch": 0.35,
"learning_rate": 7.80703689961859e-05,
"loss": 1.4197,
"step": 99
},
{
"epoch": 0.36,
"learning_rate": 7.792784764495791e-05,
"loss": 1.4168,
"step": 102
},
{
"epoch": 0.37,
"learning_rate": 7.778038846927566e-05,
"loss": 1.4997,
"step": 105
},
{
"epoch": 0.38,
"learning_rate": 7.762801066684133e-05,
"loss": 1.4985,
"step": 108
},
{
"epoch": 0.39,
"learning_rate": 7.747073407571291e-05,
"loss": 1.2951,
"step": 111
},
{
"epoch": 0.4,
"learning_rate": 7.730857917172132e-05,
"loss": 1.3738,
"step": 114
},
{
"epoch": 0.42,
"learning_rate": 7.714156706580487e-05,
"loss": 1.385,
"step": 117
},
{
"epoch": 0.43,
"learning_rate": 7.696971950126068e-05,
"loss": 1.4614,
"step": 120
},
{
"epoch": 0.44,
"learning_rate": 7.679305885091398e-05,
"loss": 1.3669,
"step": 123
},
{
"epoch": 0.45,
"learning_rate": 7.661160811420536e-05,
"loss": 1.3745,
"step": 126
},
{
"epoch": 0.46,
"learning_rate": 7.642539091419654e-05,
"loss": 1.3872,
"step": 129
},
{
"epoch": 0.47,
"learning_rate": 7.623443149449479e-05,
"loss": 1.3118,
"step": 132
},
{
"epoch": 0.48,
"learning_rate": 7.603875471609677e-05,
"loss": 1.3973,
"step": 135
},
{
"epoch": 0.49,
"learning_rate": 7.583838605415177e-05,
"loss": 1.4051,
"step": 138
},
{
"epoch": 0.5,
"learning_rate": 7.563335159464515e-05,
"loss": 1.4145,
"step": 141
},
{
"epoch": 0.51,
"learning_rate": 7.542367803100223e-05,
"loss": 1.342,
"step": 144
},
{
"epoch": 0.52,
"learning_rate": 7.520939266061304e-05,
"loss": 1.3072,
"step": 147
},
{
"epoch": 0.53,
"learning_rate": 7.499052338127845e-05,
"loss": 1.3066,
"step": 150
},
{
"epoch": 0.54,
"learning_rate": 7.476709868757825e-05,
"loss": 1.3681,
"step": 153
},
{
"epoch": 0.55,
"learning_rate": 7.453914766716133e-05,
"loss": 1.3416,
"step": 156
},
{
"epoch": 0.56,
"learning_rate": 7.430669999695885e-05,
"loss": 1.4098,
"step": 159
},
{
"epoch": 0.58,
"learning_rate": 7.406978593932055e-05,
"loss": 1.4484,
"step": 162
},
{
"epoch": 0.59,
"learning_rate": 7.382843633807488e-05,
"loss": 1.2463,
"step": 165
},
{
"epoch": 0.6,
"learning_rate": 7.358268261451353e-05,
"loss": 1.3308,
"step": 168
},
{
"epoch": 0.61,
"learning_rate": 7.33325567633006e-05,
"loss": 1.2935,
"step": 171
},
{
"epoch": 0.62,
"learning_rate": 7.307809134830729e-05,
"loss": 1.2542,
"step": 174
},
{
"epoch": 0.63,
"learning_rate": 7.281931949837233e-05,
"loss": 1.2856,
"step": 177
},
{
"epoch": 0.64,
"learning_rate": 7.255627490298902e-05,
"loss": 1.3299,
"step": 180
},
{
"epoch": 0.65,
"learning_rate": 7.228899180791914e-05,
"loss": 1.2866,
"step": 183
},
{
"epoch": 0.66,
"learning_rate": 7.201750501073452e-05,
"loss": 1.2212,
"step": 186
},
{
"epoch": 0.67,
"learning_rate": 7.174184985628679e-05,
"loss": 1.2903,
"step": 189
},
{
"epoch": 0.68,
"learning_rate": 7.146206223210571e-05,
"loss": 1.2567,
"step": 192
},
{
"epoch": 0.69,
"learning_rate": 7.117817856372709e-05,
"loss": 1.2662,
"step": 195
},
{
"epoch": 0.7,
"learning_rate": 7.089023580995057e-05,
"loss": 1.2442,
"step": 198
},
{
"epoch": 0.71,
"learning_rate": 7.059827145802777e-05,
"loss": 1.2454,
"step": 201
},
{
"epoch": 0.72,
"learning_rate": 7.03023235187821e-05,
"loss": 1.3152,
"step": 204
},
{
"epoch": 0.74,
"learning_rate": 7.000243052165989e-05,
"loss": 1.2658,
"step": 207
},
{
"epoch": 0.75,
"learning_rate": 6.969863150971439e-05,
"loss": 1.1871,
"step": 210
},
{
"epoch": 0.76,
"learning_rate": 6.939096603452269e-05,
"loss": 1.2346,
"step": 213
},
{
"epoch": 0.77,
"learning_rate": 6.907947415103656e-05,
"loss": 1.1971,
"step": 216
},
{
"epoch": 0.78,
"learning_rate": 6.87641964123676e-05,
"loss": 1.2636,
"step": 219
},
{
"epoch": 0.79,
"learning_rate": 6.844517386450775e-05,
"loss": 1.2181,
"step": 222
},
{
"epoch": 0.8,
"learning_rate": 6.812244804098544e-05,
"loss": 1.2436,
"step": 225
},
{
"epoch": 0.81,
"learning_rate": 6.779606095745832e-05,
"loss": 1.249,
"step": 228
},
{
"epoch": 0.82,
"learning_rate": 6.746605510624337e-05,
"loss": 1.2698,
"step": 231
},
{
"epoch": 0.83,
"learning_rate": 6.713247345078465e-05,
"loss": 1.1873,
"step": 234
},
{
"epoch": 0.84,
"learning_rate": 6.679535942006004e-05,
"loss": 1.1497,
"step": 237
},
{
"epoch": 0.85,
"learning_rate": 6.645475690292716e-05,
"loss": 1.2872,
"step": 240
},
{
"epoch": 0.86,
"learning_rate": 6.611071024240946e-05,
"loss": 1.2413,
"step": 243
},
{
"epoch": 0.87,
"learning_rate": 6.576326422992326e-05,
"loss": 1.213,
"step": 246
},
{
"epoch": 0.88,
"learning_rate": 6.541246409944634e-05,
"loss": 1.2931,
"step": 249
},
{
"epoch": 0.9,
"learning_rate": 6.505835552162887e-05,
"loss": 1.2878,
"step": 252
},
{
"epoch": 0.91,
"learning_rate": 6.470098459784768e-05,
"loss": 1.2502,
"step": 255
},
{
"epoch": 0.92,
"learning_rate": 6.434039785420417e-05,
"loss": 1.2416,
"step": 258
},
{
"epoch": 0.93,
"learning_rate": 6.397664223546721e-05,
"loss": 1.2468,
"step": 261
},
{
"epoch": 0.94,
"learning_rate": 6.360976509896133e-05,
"loss": 1.2207,
"step": 264
},
{
"epoch": 0.95,
"learning_rate": 6.323981420840124e-05,
"loss": 1.2038,
"step": 267
},
{
"epoch": 0.96,
"learning_rate": 6.286683772767365e-05,
"loss": 1.3596,
"step": 270
},
{
"epoch": 0.97,
"learning_rate": 6.249088421456656e-05,
"loss": 1.1464,
"step": 273
},
{
"epoch": 0.98,
"learning_rate": 6.211200261444774e-05,
"loss": 1.2218,
"step": 276
},
{
"epoch": 0.99,
"learning_rate": 6.173024225389242e-05,
"loss": 1.1165,
"step": 279
},
{
"epoch": 1.0,
"eval_gen_len": 28.29241516966068,
"eval_loss": 1.1089943647384644,
"eval_rouge1": 57.9239,
"eval_rouge2": 31.9259,
"eval_rougeL": 53.8737,
"eval_rougeLsum": 54.9811,
"eval_runtime": 90.9427,
"eval_samples_per_second": 11.018,
"eval_steps_per_second": 2.76,
"step": 281
},
{
"epoch": 1.0,
"learning_rate": 6.134565283426149e-05,
"loss": 1.1982,
"step": 282
},
{
"epoch": 1.01,
"learning_rate": 6.0958284425230914e-05,
"loss": 1.117,
"step": 285
},
{
"epoch": 1.02,
"learning_rate": 6.0568187458273134e-05,
"loss": 1.1124,
"step": 288
},
{
"epoch": 1.03,
"learning_rate": 6.017541272009135e-05,
"loss": 1.0754,
"step": 291
},
{
"epoch": 1.04,
"learning_rate": 5.9780011346007745e-05,
"loss": 1.046,
"step": 294
},
{
"epoch": 1.06,
"learning_rate": 5.9382034813306014e-05,
"loss": 1.1768,
"step": 297
},
{
"epoch": 1.07,
"learning_rate": 5.898153493452965e-05,
"loss": 1.1407,
"step": 300
},
{
"epoch": 1.08,
"learning_rate": 5.857856385073643e-05,
"loss": 1.1402,
"step": 303
},
{
"epoch": 1.09,
"learning_rate": 5.817317402471016e-05,
"loss": 1.0946,
"step": 306
},
{
"epoch": 1.1,
"learning_rate": 5.776541823413062e-05,
"loss": 1.0745,
"step": 309
},
{
"epoch": 1.11,
"learning_rate": 5.735534956470233e-05,
"loss": 1.1032,
"step": 312
},
{
"epoch": 1.12,
"learning_rate": 5.6943021403243415e-05,
"loss": 1.0543,
"step": 315
},
{
"epoch": 1.13,
"learning_rate": 5.652848743073513e-05,
"loss": 1.0755,
"step": 318
},
{
"epoch": 1.14,
"learning_rate": 5.611180161533318e-05,
"loss": 1.0945,
"step": 321
},
{
"epoch": 1.15,
"learning_rate": 5.569301820534154e-05,
"loss": 1.158,
"step": 324
},
{
"epoch": 1.16,
"learning_rate": 5.5272191722149965e-05,
"loss": 1.1357,
"step": 327
},
{
"epoch": 1.17,
"learning_rate": 5.484937695313573e-05,
"loss": 1.1591,
"step": 330
},
{
"epoch": 1.18,
"learning_rate": 5.4424628944531024e-05,
"loss": 1.0965,
"step": 333
},
{
"epoch": 1.19,
"learning_rate": 5.399800299425631e-05,
"loss": 1.0746,
"step": 336
},
{
"epoch": 1.2,
"learning_rate": 5.356955464472121e-05,
"loss": 1.1068,
"step": 339
},
{
"epoch": 1.21,
"learning_rate": 5.3139339675593446e-05,
"loss": 1.0997,
"step": 342
},
{
"epoch": 1.23,
"learning_rate": 5.2707414096536793e-05,
"loss": 1.1239,
"step": 345
},
{
"epoch": 1.24,
"learning_rate": 5.227383413991925e-05,
"loss": 1.1499,
"step": 348
},
{
"epoch": 1.25,
"learning_rate": 5.1838656253492175e-05,
"loss": 1.0583,
"step": 351
},
{
"epoch": 1.26,
"learning_rate": 5.1401937093041305e-05,
"loss": 0.9659,
"step": 354
},
{
"epoch": 1.27,
"learning_rate": 5.096373351501074e-05,
"loss": 1.0781,
"step": 357
},
{
"epoch": 1.28,
"learning_rate": 5.052410256910085e-05,
"loss": 1.0969,
"step": 360
},
{
"epoch": 1.29,
"learning_rate": 5.008310149084097e-05,
"loss": 1.1756,
"step": 363
},
{
"epoch": 1.3,
"learning_rate": 4.964078769413782e-05,
"loss": 1.0718,
"step": 366
},
{
"epoch": 1.31,
"learning_rate": 4.919721876380094e-05,
"loss": 1.1295,
"step": 369
},
{
"epoch": 1.32,
"learning_rate": 4.8752452448045626e-05,
"loss": 1.1278,
"step": 372
},
{
"epoch": 1.33,
"learning_rate": 4.830654665097469e-05,
"loss": 1.1014,
"step": 375
},
{
"epoch": 1.34,
"learning_rate": 4.785955942504001e-05,
"loss": 1.0543,
"step": 378
},
{
"epoch": 1.35,
"learning_rate": 4.741154896348458e-05,
"loss": 1.126,
"step": 381
},
{
"epoch": 1.36,
"learning_rate": 4.696257359276641e-05,
"loss": 1.0462,
"step": 384
},
{
"epoch": 1.37,
"learning_rate": 4.651269176496496e-05,
"loss": 1.1578,
"step": 387
},
{
"epoch": 1.39,
"learning_rate": 4.606196205017133e-05,
"loss": 1.1495,
"step": 390
},
{
"epoch": 1.4,
"learning_rate": 4.561044312886294e-05,
"loss": 1.0439,
"step": 393
},
{
"epoch": 1.41,
"learning_rate": 4.5158193784263975e-05,
"loss": 1.0773,
"step": 396
},
{
"epoch": 1.42,
"learning_rate": 4.4705272894692425e-05,
"loss": 1.1168,
"step": 399
},
{
"epoch": 1.43,
"learning_rate": 4.425173942589462e-05,
"loss": 1.0929,
"step": 402
},
{
"epoch": 1.44,
"learning_rate": 4.379765242336859e-05,
"loss": 1.1132,
"step": 405
},
{
"epoch": 1.45,
"learning_rate": 4.3343071004676905e-05,
"loss": 0.9778,
"step": 408
},
{
"epoch": 1.46,
"learning_rate": 4.288805435175011e-05,
"loss": 1.0954,
"step": 411
},
{
"epoch": 1.47,
"learning_rate": 4.2432661703181875e-05,
"loss": 1.0817,
"step": 414
},
{
"epoch": 1.48,
"learning_rate": 4.197695234651674e-05,
"loss": 1.1092,
"step": 417
},
{
"epoch": 1.49,
"learning_rate": 4.152098561053148e-05,
"loss": 1.1176,
"step": 420
},
{
"epoch": 1.5,
"learning_rate": 4.1064820857511e-05,
"loss": 1.0565,
"step": 423
},
{
"epoch": 1.51,
"learning_rate": 4.0608517475520084e-05,
"loss": 1.1288,
"step": 426
},
{
"epoch": 1.52,
"learning_rate": 4.015213487067163e-05,
"loss": 1.1744,
"step": 429
},
{
"epoch": 1.53,
"learning_rate": 3.96957324593925e-05,
"loss": 1.1086,
"step": 432
},
{
"epoch": 1.55,
"learning_rate": 3.92393696606882e-05,
"loss": 1.0737,
"step": 435
},
{
"epoch": 1.56,
"learning_rate": 3.878310588840703e-05,
"loss": 1.1223,
"step": 438
},
{
"epoch": 1.57,
"learning_rate": 3.832700054350511e-05,
"loss": 1.0578,
"step": 441
},
{
"epoch": 1.58,
"learning_rate": 3.787111300631287e-05,
"loss": 1.0141,
"step": 444
},
{
"epoch": 1.59,
"learning_rate": 3.7415502628804334e-05,
"loss": 1.0913,
"step": 447
},
{
"epoch": 1.6,
"learning_rate": 3.6960228726870186e-05,
"loss": 1.1208,
"step": 450
},
{
"epoch": 1.61,
"learning_rate": 3.650535057259536e-05,
"loss": 1.073,
"step": 453
},
{
"epoch": 1.62,
"learning_rate": 3.6050927386542365e-05,
"loss": 1.1406,
"step": 456
},
{
"epoch": 1.63,
"learning_rate": 3.5597018330041476e-05,
"loss": 1.0705,
"step": 459
},
{
"epoch": 1.64,
"learning_rate": 3.514368249748849e-05,
"loss": 1.0571,
"step": 462
},
{
"epoch": 1.65,
"learning_rate": 3.469097890865113e-05,
"loss": 1.0793,
"step": 465
},
{
"epoch": 1.66,
"learning_rate": 3.4238966500985405e-05,
"loss": 1.1336,
"step": 468
},
{
"epoch": 1.67,
"learning_rate": 3.378770412196249e-05,
"loss": 1.0964,
"step": 471
},
{
"epoch": 1.68,
"learning_rate": 3.333725052140732e-05,
"loss": 1.0749,
"step": 474
},
{
"epoch": 1.69,
"learning_rate": 3.2887664343850045e-05,
"loss": 1.0811,
"step": 477
},
{
"epoch": 1.71,
"learning_rate": 3.243900412089106e-05,
"loss": 1.082,
"step": 480
},
{
"epoch": 1.72,
"learning_rate": 3.199132826358075e-05,
"loss": 1.0476,
"step": 483
},
{
"epoch": 1.73,
"learning_rate": 3.154469505481503e-05,
"loss": 1.1107,
"step": 486
},
{
"epoch": 1.74,
"learning_rate": 3.109916264174743e-05,
"loss": 1.1545,
"step": 489
},
{
"epoch": 1.75,
"learning_rate": 3.065478902821894e-05,
"loss": 1.0738,
"step": 492
},
{
"epoch": 1.76,
"learning_rate": 3.0211632067206533e-05,
"loss": 1.0883,
"step": 495
},
{
"epoch": 1.77,
"learning_rate": 2.976974945329125e-05,
"loss": 1.0172,
"step": 498
},
{
"epoch": 1.78,
"learning_rate": 2.932919871514689e-05,
"loss": 1.076,
"step": 501
},
{
"epoch": 1.79,
"learning_rate": 2.8890037208050486e-05,
"loss": 1.1079,
"step": 504
},
{
"epoch": 1.8,
"learning_rate": 2.845232210641517e-05,
"loss": 1.0263,
"step": 507
},
{
"epoch": 1.81,
"learning_rate": 2.801611039634654e-05,
"loss": 1.0274,
"step": 510
},
{
"epoch": 1.82,
"learning_rate": 2.7581458868223796e-05,
"loss": 1.0736,
"step": 513
},
{
"epoch": 1.83,
"learning_rate": 2.714842410930612e-05,
"loss": 1.0472,
"step": 516
},
{
"epoch": 1.84,
"learning_rate": 2.6717062496365574e-05,
"loss": 1.0982,
"step": 519
},
{
"epoch": 1.85,
"learning_rate": 2.6287430188347456e-05,
"loss": 1.0779,
"step": 522
},
{
"epoch": 1.87,
"learning_rate": 2.5859583119058967e-05,
"loss": 1.0563,
"step": 525
},
{
"epoch": 1.88,
"learning_rate": 2.5433576989887115e-05,
"loss": 1.0575,
"step": 528
},
{
"epoch": 1.89,
"learning_rate": 2.5009467262547072e-05,
"loss": 1.0763,
"step": 531
},
{
"epoch": 1.9,
"learning_rate": 2.458730915186153e-05,
"loss": 1.0557,
"step": 534
},
{
"epoch": 1.91,
"learning_rate": 2.4167157618572312e-05,
"loss": 0.9993,
"step": 537
},
{
"epoch": 1.92,
"learning_rate": 2.374906736218506e-05,
"loss": 1.0631,
"step": 540
},
{
"epoch": 1.93,
"learning_rate": 2.333309281384784e-05,
"loss": 0.9966,
"step": 543
},
{
"epoch": 1.94,
"learning_rate": 2.2919288129264837e-05,
"loss": 1.0742,
"step": 546
},
{
"epoch": 1.95,
"learning_rate": 2.250770718164579e-05,
"loss": 1.0572,
"step": 549
},
{
"epoch": 1.96,
"learning_rate": 2.209840355469223e-05,
"loss": 1.0628,
"step": 552
},
{
"epoch": 1.97,
"learning_rate": 2.1691430535621432e-05,
"loss": 1.0401,
"step": 555
},
{
"epoch": 1.98,
"learning_rate": 2.1286841108228987e-05,
"loss": 0.9666,
"step": 558
},
{
"epoch": 1.99,
"learning_rate": 2.0884687945990744e-05,
"loss": 1.0763,
"step": 561
},
{
"epoch": 2.0,
"eval_gen_len": 29.696606786427147,
"eval_loss": 1.0267277956008911,
"eval_rouge1": 59.9605,
"eval_rouge2": 34.0298,
"eval_rougeL": 55.7523,
"eval_rougeLsum": 56.8021,
"eval_runtime": 96.5427,
"eval_samples_per_second": 10.379,
"eval_steps_per_second": 2.6,
"step": 563
},
{
"epoch": 2.0,
"learning_rate": 2.048502340520538e-05,
"loss": 1.0732,
"step": 564
},
{
"epoch": 2.01,
"learning_rate": 2.008789951817809e-05,
"loss": 0.9671,
"step": 567
},
{
"epoch": 2.02,
"learning_rate": 1.9693367986446415e-05,
"loss": 1.0325,
"step": 570
},
{
"epoch": 2.04,
"learning_rate": 1.930148017404936e-05,
"loss": 0.9656,
"step": 573
},
{
"epoch": 2.05,
"learning_rate": 1.8912287100840275e-05,
"loss": 0.9455,
"step": 576
},
{
"epoch": 2.06,
"learning_rate": 1.852583943584449e-05,
"loss": 0.993,
"step": 579
},
{
"epoch": 2.07,
"learning_rate": 1.8142187490662827e-05,
"loss": 0.9598,
"step": 582
},
{
"epoch": 2.08,
"learning_rate": 1.7761381212921508e-05,
"loss": 0.978,
"step": 585
},
{
"epoch": 2.09,
"learning_rate": 1.7383470179769416e-05,
"loss": 1.0325,
"step": 588
},
{
"epoch": 2.1,
"learning_rate": 1.700850359142373e-05,
"loss": 0.9173,
"step": 591
},
{
"epoch": 2.11,
"learning_rate": 1.663653026476452e-05,
"loss": 1.0184,
"step": 594
},
{
"epoch": 2.12,
"learning_rate": 1.6267598626979264e-05,
"loss": 0.9831,
"step": 597
},
{
"epoch": 2.13,
"learning_rate": 1.5901756709258133e-05,
"loss": 0.9981,
"step": 600
},
{
"epoch": 2.14,
"learning_rate": 1.553905214054082e-05,
"loss": 0.9645,
"step": 603
},
{
"epoch": 2.15,
"learning_rate": 1.5179532141315685e-05,
"loss": 0.9968,
"step": 606
},
{
"epoch": 2.16,
"learning_rate": 1.4823243517472187e-05,
"loss": 0.9537,
"step": 609
},
{
"epoch": 2.17,
"learning_rate": 1.4470232654207208e-05,
"loss": 0.9923,
"step": 612
},
{
"epoch": 2.18,
"learning_rate": 1.4120545509986103e-05,
"loss": 0.9497,
"step": 615
},
{
"epoch": 2.2,
"learning_rate": 1.3774227610559461e-05,
"loss": 0.9502,
"step": 618
},
{
"epoch": 2.21,
"learning_rate": 1.3431324043036104e-05,
"loss": 0.9703,
"step": 621
},
{
"epoch": 2.22,
"learning_rate": 1.3091879450013086e-05,
"loss": 0.9592,
"step": 624
},
{
"epoch": 2.23,
"learning_rate": 1.2755938023763821e-05,
"loss": 0.9748,
"step": 627
},
{
"epoch": 2.24,
"learning_rate": 1.2423543500484607e-05,
"loss": 0.9978,
"step": 630
},
{
"epoch": 2.25,
"learning_rate": 1.2094739154600616e-05,
"loss": 1.0247,
"step": 633
},
{
"epoch": 2.26,
"learning_rate": 1.1769567793132048e-05,
"loss": 0.9923,
"step": 636
},
{
"epoch": 2.27,
"learning_rate": 1.1448071750121042e-05,
"loss": 0.9638,
"step": 639
},
{
"epoch": 2.28,
"learning_rate": 1.113029288112022e-05,
"loss": 0.9403,
"step": 642
},
{
"epoch": 2.29,
"learning_rate": 1.0816272557743525e-05,
"loss": 1.0162,
"step": 645
},
{
"epoch": 2.3,
"learning_rate": 1.050605166228006e-05,
"loss": 0.9988,
"step": 648
},
{
"epoch": 2.31,
"learning_rate": 1.0199670582371573e-05,
"loss": 1.004,
"step": 651
},
{
"epoch": 2.32,
"learning_rate": 9.897169205754461e-06,
"loss": 0.9909,
"step": 654
},
{
"epoch": 2.33,
"learning_rate": 9.598586915066766e-06,
"loss": 0.977,
"step": 657
},
{
"epoch": 2.34,
"learning_rate": 9.303962582720887e-06,
"loss": 1.001,
"step": 660
},
{
"epoch": 2.36,
"learning_rate": 9.013334565842929e-06,
"loss": 0.9925,
"step": 663
},
{
"epoch": 2.37,
"learning_rate": 8.72674070127881e-06,
"loss": 0.9803,
"step": 666
},
{
"epoch": 2.38,
"learning_rate": 8.44421830066843e-06,
"loss": 0.9129,
"step": 669
},
{
"epoch": 2.39,
"learning_rate": 8.165804145588008e-06,
"loss": 1.0281,
"step": 672
},
{
"epoch": 2.4,
"learning_rate": 7.891534482761463e-06,
"loss": 0.9433,
"step": 675
},
{
"epoch": 2.41,
"learning_rate": 7.621445019341522e-06,
"loss": 0.9536,
"step": 678
},
{
"epoch": 2.42,
"learning_rate": 7.355570918260979e-06,
"loss": 1.0091,
"step": 681
},
{
"epoch": 2.43,
"learning_rate": 7.0939467936548045e-06,
"loss": 1.0031,
"step": 684
},
{
"epoch": 2.44,
"learning_rate": 6.836606706353786e-06,
"loss": 0.9501,
"step": 687
},
{
"epoch": 2.45,
"learning_rate": 6.583584159450116e-06,
"loss": 1.0059,
"step": 690
},
{
"epoch": 2.46,
"learning_rate": 6.334912093935605e-06,
"loss": 1.0037,
"step": 693
},
{
"epoch": 2.47,
"learning_rate": 6.090622884413164e-06,
"loss": 0.9347,
"step": 696
},
{
"epoch": 2.48,
"learning_rate": 5.85074833488191e-06,
"loss": 0.9206,
"step": 699
},
{
"epoch": 2.49,
"learning_rate": 5.6153196745965774e-06,
"loss": 0.9739,
"step": 702
},
{
"epoch": 2.5,
"learning_rate": 5.384367554001864e-06,
"loss": 0.9265,
"step": 705
},
{
"epoch": 2.52,
"learning_rate": 5.157922040741996e-06,
"loss": 0.9867,
"step": 708
},
{
"epoch": 2.53,
"learning_rate": 4.936012615746193e-06,
"loss": 0.9579,
"step": 711
},
{
"epoch": 2.54,
"learning_rate": 4.71866816939063e-06,
"loss": 0.9567,
"step": 714
},
{
"epoch": 2.55,
"learning_rate": 4.505916997737143e-06,
"loss": 0.9833,
"step": 717
},
{
"epoch": 2.56,
"learning_rate": 4.29778679884937e-06,
"loss": 0.9967,
"step": 720
},
{
"epoch": 2.57,
"learning_rate": 4.094304669186761e-06,
"loss": 0.9324,
"step": 723
},
{
"epoch": 2.58,
"learning_rate": 3.895497100076892e-06,
"loss": 0.9994,
"step": 726
},
{
"epoch": 2.59,
"learning_rate": 3.701389974266536e-06,
"loss": 0.9551,
"step": 729
},
{
"epoch": 2.6,
"learning_rate": 3.512008562552005e-06,
"loss": 1.001,
"step": 732
},
{
"epoch": 2.61,
"learning_rate": 3.3273775204891678e-06,
"loss": 1.0111,
"step": 735
},
{
"epoch": 2.62,
"learning_rate": 3.1475208851834815e-06,
"loss": 1.0089,
"step": 738
},
{
"epoch": 2.63,
"learning_rate": 2.972462072160682e-06,
"loss": 0.9709,
"step": 741
},
{
"epoch": 2.64,
"learning_rate": 2.8022238723182635e-06,
"loss": 0.9676,
"step": 744
},
{
"epoch": 2.65,
"learning_rate": 2.6368284489583396e-06,
"loss": 0.9643,
"step": 747
},
{
"epoch": 2.66,
"learning_rate": 2.4762973349022355e-06,
"loss": 0.9753,
"step": 750
},
{
"epoch": 2.67,
"learning_rate": 2.320651429687093e-06,
"loss": 0.8655,
"step": 753
},
{
"epoch": 2.69,
"learning_rate": 2.169910996844968e-06,
"loss": 1.0049,
"step": 756
},
{
"epoch": 2.7,
"learning_rate": 2.0240956612647487e-06,
"loss": 1.0138,
"step": 759
},
{
"epoch": 2.71,
"learning_rate": 1.8832244066371696e-06,
"loss": 1.0226,
"step": 762
},
{
"epoch": 2.72,
"learning_rate": 1.7473155729833234e-06,
"loss": 0.9316,
"step": 765
},
{
"epoch": 2.73,
"learning_rate": 1.616386854266989e-06,
"loss": 0.9972,
"step": 768
},
{
"epoch": 2.74,
"learning_rate": 1.4904552960910512e-06,
"loss": 0.9322,
"step": 771
},
{
"epoch": 2.75,
"learning_rate": 1.3695372934783157e-06,
"loss": 0.9403,
"step": 774
},
{
"epoch": 2.76,
"learning_rate": 1.2536485887370754e-06,
"loss": 0.998,
"step": 777
},
{
"epoch": 2.77,
"learning_rate": 1.142804269411606e-06,
"loss": 1.0496,
"step": 780
},
{
"epoch": 2.78,
"learning_rate": 1.0370187663178854e-06,
"loss": 0.964,
"step": 783
},
{
"epoch": 2.79,
"learning_rate": 9.363058516649138e-07,
"loss": 0.9786,
"step": 786
},
{
"epoch": 2.8,
"learning_rate": 8.406786372616405e-07,
"loss": 0.9675,
"step": 789
},
{
"epoch": 2.81,
"learning_rate": 7.501495728100061e-07,
"loss": 0.9009,
"step": 792
},
{
"epoch": 2.82,
"learning_rate": 6.647304442840652e-07,
"loss": 0.9913,
"step": 795
},
{
"epoch": 2.83,
"learning_rate": 5.844323723955736e-07,
"loss": 1.0268,
"step": 798
},
{
"epoch": 2.85,
"learning_rate": 5.092658111462179e-07,
"loss": 0.9936,
"step": 801
},
{
"epoch": 2.86,
"learning_rate": 4.392405464665883e-07,
"loss": 0.9537,
"step": 804
},
{
"epoch": 2.87,
"learning_rate": 3.743656949421315e-07,
"loss": 1.0263,
"step": 807
},
{
"epoch": 2.88,
"learning_rate": 3.14649702626304e-07,
"loss": 1.0091,
"step": 810
},
{
"epoch": 2.89,
"learning_rate": 2.6010034394095e-07,
"loss": 0.9997,
"step": 813
},
{
"epoch": 2.9,
"learning_rate": 2.1072472066414606e-07,
"loss": 1.0062,
"step": 816
},
{
"epoch": 2.91,
"learning_rate": 1.6652926100565237e-07,
"loss": 0.9276,
"step": 819
},
{
"epoch": 2.92,
"learning_rate": 1.2751971876999504e-07,
"loss": 0.9591,
"step": 822
},
{
"epoch": 2.93,
"learning_rate": 9.37011726073811e-08,
"loss": 0.949,
"step": 825
},
{
"epoch": 2.94,
"learning_rate": 6.507802535253405e-08,
"loss": 1.0096,
"step": 828
},
{
"epoch": 2.95,
"learning_rate": 4.165400345145898e-08,
"loss": 0.9327,
"step": 831
},
{
"epoch": 2.96,
"learning_rate": 2.3432156476310698e-08,
"loss": 1.0365,
"step": 834
},
{
"epoch": 2.97,
"learning_rate": 1.0414856728373501e-08,
"loss": 1.0014,
"step": 837
},
{
"epoch": 2.98,
"learning_rate": 2.603798929206036e-09,
"loss": 0.9712,
"step": 840
},
{
"epoch": 2.99,
"learning_rate": 0.0,
"loss": 0.9595,
"step": 843
},
{
"epoch": 2.99,
"eval_gen_len": 29.714570858283434,
"eval_loss": 1.013558268547058,
"eval_rouge1": 59.9513,
"eval_rouge2": 33.9118,
"eval_rougeL": 55.7815,
"eval_rougeLsum": 56.9064,
"eval_runtime": 94.9567,
"eval_samples_per_second": 10.552,
"eval_steps_per_second": 2.643,
"step": 843
},
{
"epoch": 2.99,
"step": 843,
"total_flos": 3.289406245752013e+16,
"train_loss": 1.2042206371664859,
"train_runtime": 2861.2173,
"train_samples_per_second": 18.888,
"train_steps_per_second": 0.295
}
],
"max_steps": 843,
"num_train_epochs": 3,
"total_flos": 3.289406245752013e+16,
"trial_name": null,
"trial_params": null
}