pingusPongus / last-checkpoint /trainer_state.json
dq158's picture
Training in progress, epoch 1, checkpoint
00dc058
raw
history blame
No virus
17.6 kB
{
"best_metric": 3.0806901454925537,
"best_model_checkpoint": "dq158/pingusPongus/checkpoint-68803",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 68803,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0001,
"loss": 4.4427,
"step": 500
},
{
"epoch": 0.01,
"learning_rate": 9.999998551451928e-05,
"loss": 3.7748,
"step": 1000
},
{
"epoch": 0.02,
"learning_rate": 9.999994205808551e-05,
"loss": 3.6469,
"step": 1500
},
{
"epoch": 0.03,
"learning_rate": 9.999986963072388e-05,
"loss": 3.6827,
"step": 2000
},
{
"epoch": 0.04,
"learning_rate": 9.999976823247632e-05,
"loss": 3.6186,
"step": 2500
},
{
"epoch": 0.04,
"learning_rate": 9.999963786340163e-05,
"loss": 3.5199,
"step": 3000
},
{
"epoch": 0.05,
"learning_rate": 9.999947852357531e-05,
"loss": 3.5221,
"step": 3500
},
{
"epoch": 0.06,
"learning_rate": 9.999929021308971e-05,
"loss": 3.469,
"step": 4000
},
{
"epoch": 0.07,
"learning_rate": 9.999907293205393e-05,
"loss": 3.5388,
"step": 4500
},
{
"epoch": 0.07,
"learning_rate": 9.999882668059387e-05,
"loss": 3.4063,
"step": 5000
},
{
"epoch": 0.08,
"learning_rate": 9.99985514588522e-05,
"loss": 3.4314,
"step": 5500
},
{
"epoch": 0.09,
"learning_rate": 9.99982472669884e-05,
"loss": 3.4252,
"step": 6000
},
{
"epoch": 0.09,
"learning_rate": 9.999791410517874e-05,
"loss": 3.4736,
"step": 6500
},
{
"epoch": 0.1,
"learning_rate": 9.999755197361624e-05,
"loss": 3.4011,
"step": 7000
},
{
"epoch": 0.11,
"learning_rate": 9.999716087251072e-05,
"loss": 3.4709,
"step": 7500
},
{
"epoch": 0.12,
"learning_rate": 9.99967408020888e-05,
"loss": 3.401,
"step": 8000
},
{
"epoch": 0.12,
"learning_rate": 9.999629176259391e-05,
"loss": 3.4339,
"step": 8500
},
{
"epoch": 0.13,
"learning_rate": 9.999581375428617e-05,
"loss": 3.3573,
"step": 9000
},
{
"epoch": 0.14,
"learning_rate": 9.999530677744258e-05,
"loss": 3.364,
"step": 9500
},
{
"epoch": 0.15,
"learning_rate": 9.999477083235691e-05,
"loss": 3.4358,
"step": 10000
},
{
"epoch": 0.15,
"learning_rate": 9.999420591933965e-05,
"loss": 3.369,
"step": 10500
},
{
"epoch": 0.16,
"learning_rate": 9.999361203871817e-05,
"loss": 3.3874,
"step": 11000
},
{
"epoch": 0.17,
"learning_rate": 9.999298919083656e-05,
"loss": 3.3604,
"step": 11500
},
{
"epoch": 0.17,
"learning_rate": 9.99923373760557e-05,
"loss": 3.3134,
"step": 12000
},
{
"epoch": 0.18,
"learning_rate": 9.999165659475324e-05,
"loss": 3.3988,
"step": 12500
},
{
"epoch": 0.19,
"learning_rate": 9.999094684732369e-05,
"loss": 3.3562,
"step": 13000
},
{
"epoch": 0.2,
"learning_rate": 9.999020813417826e-05,
"loss": 3.4156,
"step": 13500
},
{
"epoch": 0.2,
"learning_rate": 9.998944045574499e-05,
"loss": 3.2524,
"step": 14000
},
{
"epoch": 0.21,
"learning_rate": 9.998864381246869e-05,
"loss": 3.4463,
"step": 14500
},
{
"epoch": 0.22,
"learning_rate": 9.998781820481091e-05,
"loss": 3.3492,
"step": 15000
},
{
"epoch": 0.23,
"learning_rate": 9.998696363325009e-05,
"loss": 3.4512,
"step": 15500
},
{
"epoch": 0.23,
"learning_rate": 9.998608009828132e-05,
"loss": 3.3218,
"step": 16000
},
{
"epoch": 0.24,
"learning_rate": 9.998516760041659e-05,
"loss": 3.2985,
"step": 16500
},
{
"epoch": 0.25,
"learning_rate": 9.998422614018456e-05,
"loss": 3.3771,
"step": 17000
},
{
"epoch": 0.25,
"learning_rate": 9.998325571813079e-05,
"loss": 3.3023,
"step": 17500
},
{
"epoch": 0.26,
"learning_rate": 9.998225633481753e-05,
"loss": 3.2226,
"step": 18000
},
{
"epoch": 0.27,
"learning_rate": 9.998122799082386e-05,
"loss": 3.3422,
"step": 18500
},
{
"epoch": 0.28,
"learning_rate": 9.998017068674558e-05,
"loss": 3.3089,
"step": 19000
},
{
"epoch": 0.28,
"learning_rate": 9.997908442319536e-05,
"loss": 3.2337,
"step": 19500
},
{
"epoch": 0.29,
"learning_rate": 9.99779692008026e-05,
"loss": 3.3586,
"step": 20000
},
{
"epoch": 0.3,
"learning_rate": 9.997682502021345e-05,
"loss": 3.2019,
"step": 20500
},
{
"epoch": 0.31,
"learning_rate": 9.997565188209089e-05,
"loss": 3.2937,
"step": 21000
},
{
"epoch": 0.31,
"learning_rate": 9.997444978711465e-05,
"loss": 3.4064,
"step": 21500
},
{
"epoch": 0.32,
"learning_rate": 9.997321873598125e-05,
"loss": 3.339,
"step": 22000
},
{
"epoch": 0.33,
"learning_rate": 9.9971958729404e-05,
"loss": 3.3237,
"step": 22500
},
{
"epoch": 0.33,
"learning_rate": 9.997066976811294e-05,
"loss": 3.3782,
"step": 23000
},
{
"epoch": 0.34,
"learning_rate": 9.996935185285495e-05,
"loss": 3.336,
"step": 23500
},
{
"epoch": 0.35,
"learning_rate": 9.996800498439362e-05,
"loss": 3.1749,
"step": 24000
},
{
"epoch": 0.36,
"learning_rate": 9.99666291635094e-05,
"loss": 3.252,
"step": 24500
},
{
"epoch": 0.36,
"learning_rate": 9.996522439099943e-05,
"loss": 3.3544,
"step": 25000
},
{
"epoch": 0.37,
"learning_rate": 9.99637906676777e-05,
"loss": 3.2898,
"step": 25500
},
{
"epoch": 0.38,
"learning_rate": 9.996232799437487e-05,
"loss": 3.2753,
"step": 26000
},
{
"epoch": 0.39,
"learning_rate": 9.996083637193849e-05,
"loss": 3.3539,
"step": 26500
},
{
"epoch": 0.39,
"learning_rate": 9.995931580123284e-05,
"loss": 3.2567,
"step": 27000
},
{
"epoch": 0.4,
"learning_rate": 9.995776628313896e-05,
"loss": 3.1842,
"step": 27500
},
{
"epoch": 0.41,
"learning_rate": 9.995618781855464e-05,
"loss": 3.2446,
"step": 28000
},
{
"epoch": 0.41,
"learning_rate": 9.995458040839452e-05,
"loss": 3.2132,
"step": 28500
},
{
"epoch": 0.42,
"learning_rate": 9.995294405358993e-05,
"loss": 3.2992,
"step": 29000
},
{
"epoch": 0.43,
"learning_rate": 9.995127875508903e-05,
"loss": 3.2555,
"step": 29500
},
{
"epoch": 0.44,
"learning_rate": 9.99495845138567e-05,
"loss": 3.3704,
"step": 30000
},
{
"epoch": 0.44,
"learning_rate": 9.994786133087464e-05,
"loss": 3.2629,
"step": 30500
},
{
"epoch": 0.45,
"learning_rate": 9.994610920714126e-05,
"loss": 3.224,
"step": 31000
},
{
"epoch": 0.46,
"learning_rate": 9.994432814367183e-05,
"loss": 3.31,
"step": 31500
},
{
"epoch": 0.47,
"learning_rate": 9.99425181414983e-05,
"loss": 3.2763,
"step": 32000
},
{
"epoch": 0.47,
"learning_rate": 9.994067920166939e-05,
"loss": 3.2862,
"step": 32500
},
{
"epoch": 0.48,
"learning_rate": 9.993881132525067e-05,
"loss": 3.3125,
"step": 33000
},
{
"epoch": 0.49,
"learning_rate": 9.993691451332439e-05,
"loss": 3.1288,
"step": 33500
},
{
"epoch": 0.49,
"learning_rate": 9.993498876698963e-05,
"loss": 3.2958,
"step": 34000
},
{
"epoch": 0.5,
"learning_rate": 9.993303408736217e-05,
"loss": 3.1933,
"step": 34500
},
{
"epoch": 0.51,
"learning_rate": 9.993105047557461e-05,
"loss": 3.2504,
"step": 35000
},
{
"epoch": 0.52,
"learning_rate": 9.992903793277628e-05,
"loss": 3.3293,
"step": 35500
},
{
"epoch": 0.52,
"learning_rate": 9.99269964601333e-05,
"loss": 3.1969,
"step": 36000
},
{
"epoch": 0.53,
"learning_rate": 9.992492605882853e-05,
"loss": 3.2087,
"step": 36500
},
{
"epoch": 0.54,
"learning_rate": 9.99228267300616e-05,
"loss": 3.3125,
"step": 37000
},
{
"epoch": 0.55,
"learning_rate": 9.992069847504891e-05,
"loss": 3.2677,
"step": 37500
},
{
"epoch": 0.55,
"learning_rate": 9.99185412950236e-05,
"loss": 3.2132,
"step": 38000
},
{
"epoch": 0.56,
"learning_rate": 9.991635519123559e-05,
"loss": 3.2534,
"step": 38500
},
{
"epoch": 0.57,
"learning_rate": 9.991414016495155e-05,
"loss": 3.1735,
"step": 39000
},
{
"epoch": 0.57,
"learning_rate": 9.99118962174549e-05,
"loss": 3.2422,
"step": 39500
},
{
"epoch": 0.58,
"learning_rate": 9.990962335004584e-05,
"loss": 3.1625,
"step": 40000
},
{
"epoch": 0.59,
"learning_rate": 9.99073215640413e-05,
"loss": 3.1163,
"step": 40500
},
{
"epoch": 0.6,
"learning_rate": 9.990499086077498e-05,
"loss": 3.1521,
"step": 41000
},
{
"epoch": 0.6,
"learning_rate": 9.990263124159736e-05,
"loss": 3.2036,
"step": 41500
},
{
"epoch": 0.61,
"learning_rate": 9.990024270787561e-05,
"loss": 3.181,
"step": 42000
},
{
"epoch": 0.62,
"learning_rate": 9.989782526099372e-05,
"loss": 3.1672,
"step": 42500
},
{
"epoch": 0.62,
"learning_rate": 9.989537890235238e-05,
"loss": 3.2336,
"step": 43000
},
{
"epoch": 0.63,
"learning_rate": 9.989290363336908e-05,
"loss": 3.1455,
"step": 43500
},
{
"epoch": 0.64,
"learning_rate": 9.989039945547803e-05,
"loss": 3.1859,
"step": 44000
},
{
"epoch": 0.65,
"learning_rate": 9.98878663701302e-05,
"loss": 3.1396,
"step": 44500
},
{
"epoch": 0.65,
"learning_rate": 9.988530437879333e-05,
"loss": 3.2585,
"step": 45000
},
{
"epoch": 0.66,
"learning_rate": 9.988271348295184e-05,
"loss": 3.2201,
"step": 45500
},
{
"epoch": 0.67,
"learning_rate": 9.988009368410698e-05,
"loss": 3.2758,
"step": 46000
},
{
"epoch": 0.68,
"learning_rate": 9.98774449837767e-05,
"loss": 3.1742,
"step": 46500
},
{
"epoch": 0.68,
"learning_rate": 9.987476738349571e-05,
"loss": 3.3212,
"step": 47000
},
{
"epoch": 0.69,
"learning_rate": 9.987206088481545e-05,
"loss": 3.1915,
"step": 47500
},
{
"epoch": 0.7,
"learning_rate": 9.986932548930414e-05,
"loss": 3.1608,
"step": 48000
},
{
"epoch": 0.7,
"learning_rate": 9.986656119854672e-05,
"loss": 3.217,
"step": 48500
},
{
"epoch": 0.71,
"learning_rate": 9.986376801414485e-05,
"loss": 3.1989,
"step": 49000
},
{
"epoch": 0.72,
"learning_rate": 9.986094593771699e-05,
"loss": 3.3067,
"step": 49500
},
{
"epoch": 0.73,
"learning_rate": 9.985809497089827e-05,
"loss": 3.2195,
"step": 50000
},
{
"epoch": 0.73,
"learning_rate": 9.985521511534062e-05,
"loss": 3.148,
"step": 50500
},
{
"epoch": 0.74,
"learning_rate": 9.985230637271266e-05,
"loss": 3.1987,
"step": 51000
},
{
"epoch": 0.75,
"learning_rate": 9.984936874469979e-05,
"loss": 3.1153,
"step": 51500
},
{
"epoch": 0.76,
"learning_rate": 9.984640223300413e-05,
"loss": 3.2841,
"step": 52000
},
{
"epoch": 0.76,
"learning_rate": 9.98434068393445e-05,
"loss": 3.2033,
"step": 52500
},
{
"epoch": 0.77,
"learning_rate": 9.984038256545653e-05,
"loss": 3.2102,
"step": 53000
},
{
"epoch": 0.78,
"learning_rate": 9.983732941309253e-05,
"loss": 3.1817,
"step": 53500
},
{
"epoch": 0.78,
"learning_rate": 9.983424738402156e-05,
"loss": 3.1485,
"step": 54000
},
{
"epoch": 0.79,
"learning_rate": 9.98311364800294e-05,
"loss": 3.2417,
"step": 54500
},
{
"epoch": 0.8,
"learning_rate": 9.982799670291857e-05,
"loss": 3.2174,
"step": 55000
},
{
"epoch": 0.81,
"learning_rate": 9.98248280545083e-05,
"loss": 3.2862,
"step": 55500
},
{
"epoch": 0.81,
"learning_rate": 9.982163053663459e-05,
"loss": 3.201,
"step": 56000
},
{
"epoch": 0.82,
"learning_rate": 9.981840415115014e-05,
"loss": 3.3873,
"step": 56500
},
{
"epoch": 0.83,
"learning_rate": 9.981514889992436e-05,
"loss": 3.1844,
"step": 57000
},
{
"epoch": 0.84,
"learning_rate": 9.981186478484344e-05,
"loss": 3.1807,
"step": 57500
},
{
"epoch": 0.84,
"learning_rate": 9.980855180781021e-05,
"loss": 3.2758,
"step": 58000
},
{
"epoch": 0.85,
"learning_rate": 9.980520997074432e-05,
"loss": 3.1406,
"step": 58500
},
{
"epoch": 0.86,
"learning_rate": 9.980183927558207e-05,
"loss": 3.1607,
"step": 59000
},
{
"epoch": 0.86,
"learning_rate": 9.97984397242765e-05,
"loss": 3.1829,
"step": 59500
},
{
"epoch": 0.87,
"learning_rate": 9.979501131879741e-05,
"loss": 3.2238,
"step": 60000
},
{
"epoch": 0.88,
"learning_rate": 9.979155406113124e-05,
"loss": 3.2348,
"step": 60500
},
{
"epoch": 0.89,
"learning_rate": 9.978806795328121e-05,
"loss": 3.2933,
"step": 61000
},
{
"epoch": 0.89,
"learning_rate": 9.978455299726726e-05,
"loss": 3.2051,
"step": 61500
},
{
"epoch": 0.9,
"learning_rate": 9.978100919512598e-05,
"loss": 3.1736,
"step": 62000
},
{
"epoch": 0.91,
"learning_rate": 9.977743654891077e-05,
"loss": 3.173,
"step": 62500
},
{
"epoch": 0.92,
"learning_rate": 9.977383506069164e-05,
"loss": 3.2732,
"step": 63000
},
{
"epoch": 0.92,
"learning_rate": 9.977020473255539e-05,
"loss": 3.2447,
"step": 63500
},
{
"epoch": 0.93,
"learning_rate": 9.976654556660548e-05,
"loss": 3.2526,
"step": 64000
},
{
"epoch": 0.94,
"learning_rate": 9.976285756496211e-05,
"loss": 3.1814,
"step": 64500
},
{
"epoch": 0.94,
"learning_rate": 9.97591407297622e-05,
"loss": 3.1533,
"step": 65000
},
{
"epoch": 0.95,
"learning_rate": 9.975539506315933e-05,
"loss": 3.0491,
"step": 65500
},
{
"epoch": 0.96,
"learning_rate": 9.975162056732385e-05,
"loss": 3.1918,
"step": 66000
},
{
"epoch": 0.97,
"learning_rate": 9.974781724444272e-05,
"loss": 3.1918,
"step": 66500
},
{
"epoch": 0.97,
"learning_rate": 9.974398509671969e-05,
"loss": 3.1769,
"step": 67000
},
{
"epoch": 0.98,
"learning_rate": 9.974012412637517e-05,
"loss": 3.196,
"step": 67500
},
{
"epoch": 0.99,
"learning_rate": 9.97362343356463e-05,
"loss": 3.1799,
"step": 68000
},
{
"epoch": 1.0,
"learning_rate": 9.973231572678686e-05,
"loss": 3.2516,
"step": 68500
},
{
"epoch": 1.0,
"eval_bleu": 1.0,
"eval_brevity_penalty": 1.0,
"eval_length_ratio": 1.0,
"eval_loss": 3.0806901454925537,
"eval_precisions": [
1.0,
1.0,
1.0,
1.0
],
"eval_reference_length": 7828480,
"eval_runtime": 21022.3759,
"eval_samples_per_second": 0.727,
"eval_steps_per_second": 0.364,
"eval_translation_length": 7828480,
"step": 68803
}
],
"logging_steps": 500,
"max_steps": 2064090,
"num_train_epochs": 30,
"save_steps": 1000,
"total_flos": 1.1808307371678106e+18,
"trial_name": null,
"trial_params": null
}