byt5_small_by_file / trainer_state.json
tcwong's picture
Upload 10 files
34d348d verified
{
"best_metric": 1.8259689807891846,
"best_model_checkpoint": "model_training/byt5_small/checkpoints-by_file-09-07-11-51/checkpoint-350",
"epoch": 5.685279187817259,
"eval_steps": 25,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08121827411167512,
"grad_norm": 1332.5194091796875,
"learning_rate": 5.102040816326531e-05,
"loss": 57.087,
"step": 5
},
{
"epoch": 0.16243654822335024,
"grad_norm": 73.31609344482422,
"learning_rate": 0.00010204081632653062,
"loss": 16.158,
"step": 10
},
{
"epoch": 0.2436548223350254,
"grad_norm": 7.558203220367432,
"learning_rate": 0.00015306122448979594,
"loss": 4.2458,
"step": 15
},
{
"epoch": 0.3248730964467005,
"grad_norm": 3.45859432220459,
"learning_rate": 0.00020408163265306123,
"loss": 3.5694,
"step": 20
},
{
"epoch": 0.40609137055837563,
"grad_norm": 1.5817078351974487,
"learning_rate": 0.00025510204081632655,
"loss": 2.5571,
"step": 25
},
{
"epoch": 0.40609137055837563,
"eval_loss": 1.86122727394104,
"eval_runtime": 89.2369,
"eval_samples_per_second": 11.206,
"eval_steps_per_second": 1.401,
"step": 25
},
{
"epoch": 0.4873096446700508,
"grad_norm": 0.7394866943359375,
"learning_rate": 0.0003061224489795919,
"loss": 2.1392,
"step": 30
},
{
"epoch": 0.5685279187817259,
"grad_norm": 8.203688621520996,
"learning_rate": 0.00035714285714285714,
"loss": 2.0015,
"step": 35
},
{
"epoch": 0.649746192893401,
"grad_norm": 8.245688438415527,
"learning_rate": 0.00040816326530612246,
"loss": 1.9816,
"step": 40
},
{
"epoch": 0.7309644670050761,
"grad_norm": 1.8892487287521362,
"learning_rate": 0.0004591836734693878,
"loss": 2.3033,
"step": 45
},
{
"epoch": 0.8121827411167513,
"grad_norm": 6.5841898918151855,
"learning_rate": 0.0004999935985425297,
"loss": 3.3388,
"step": 50
},
{
"epoch": 0.8121827411167513,
"eval_loss": 2.1760518550872803,
"eval_runtime": 88.9886,
"eval_samples_per_second": 11.237,
"eval_steps_per_second": 1.405,
"step": 50
},
{
"epoch": 0.8934010152284264,
"grad_norm": 7.12192440032959,
"learning_rate": 0.0004997695819512612,
"loss": 3.6302,
"step": 55
},
{
"epoch": 0.9746192893401016,
"grad_norm": 40.77882385253906,
"learning_rate": 0.0004992258202402822,
"loss": 2.7657,
"step": 60
},
{
"epoch": 1.0558375634517767,
"grad_norm": 2.32853102684021,
"learning_rate": 0.0004983630095117843,
"loss": 2.8533,
"step": 65
},
{
"epoch": 1.1370558375634519,
"grad_norm": 20.28560447692871,
"learning_rate": 0.0004971822543018662,
"loss": 2.6894,
"step": 70
},
{
"epoch": 1.218274111675127,
"grad_norm": 13.825627326965332,
"learning_rate": 0.0004956850661665511,
"loss": 2.6103,
"step": 75
},
{
"epoch": 1.218274111675127,
"eval_loss": 2.4702670574188232,
"eval_runtime": 89.4229,
"eval_samples_per_second": 11.183,
"eval_steps_per_second": 1.398,
"step": 75
},
{
"epoch": 1.299492385786802,
"grad_norm": 10.716843605041504,
"learning_rate": 0.0004938733617467517,
"loss": 2.565,
"step": 80
},
{
"epoch": 1.380710659898477,
"grad_norm": 1.5047727823257446,
"learning_rate": 0.0004917494603146632,
"loss": 2.4888,
"step": 85
},
{
"epoch": 1.4619289340101522,
"grad_norm": 5.189760684967041,
"learning_rate": 0.0004893160808047222,
"loss": 2.4684,
"step": 90
},
{
"epoch": 1.5431472081218274,
"grad_norm": 1.0872949361801147,
"learning_rate": 0.00048657633833293557,
"loss": 2.4044,
"step": 95
},
{
"epoch": 1.6243654822335025,
"grad_norm": 0.9995412230491638,
"learning_rate": 0.0004835337402090316,
"loss": 2.3281,
"step": 100
},
{
"epoch": 1.6243654822335025,
"eval_loss": 2.164686918258667,
"eval_runtime": 89.4619,
"eval_samples_per_second": 11.178,
"eval_steps_per_second": 1.397,
"step": 100
},
{
"epoch": 1.7055837563451777,
"grad_norm": 1.2759788036346436,
"learning_rate": 0.0004801921814465414,
"loss": 2.2785,
"step": 105
},
{
"epoch": 1.7868020304568528,
"grad_norm": 0.9360132217407227,
"learning_rate": 0.00047655593977655674,
"loss": 2.214,
"step": 110
},
{
"epoch": 1.868020304568528,
"grad_norm": 0.33227458596229553,
"learning_rate": 0.0004726296701715489,
"loss": 2.1815,
"step": 115
},
{
"epoch": 1.9492385786802031,
"grad_norm": 0.32235994935035706,
"learning_rate": 0.00046841839888625623,
"loss": 2.1927,
"step": 120
},
{
"epoch": 2.030456852791878,
"grad_norm": 0.5263159275054932,
"learning_rate": 0.0004639275170232734,
"loss": 2.1143,
"step": 125
},
{
"epoch": 2.030456852791878,
"eval_loss": 2.0160372257232666,
"eval_runtime": 88.8689,
"eval_samples_per_second": 11.253,
"eval_steps_per_second": 1.407,
"step": 125
},
{
"epoch": 2.1116751269035534,
"grad_norm": 0.4549558162689209,
"learning_rate": 0.0004591627736315743,
"loss": 2.1318,
"step": 130
},
{
"epoch": 2.1928934010152283,
"grad_norm": 0.670222818851471,
"learning_rate": 0.0004541302683468084,
"loss": 2.1073,
"step": 135
},
{
"epoch": 2.2741116751269037,
"grad_norm": 0.536625862121582,
"learning_rate": 0.0004488364435827881,
"loss": 2.1021,
"step": 140
},
{
"epoch": 2.3553299492385786,
"grad_norm": 0.3629292845726013,
"learning_rate": 0.00044328807628416644,
"loss": 2.0923,
"step": 145
},
{
"epoch": 2.436548223350254,
"grad_norm": 0.2985295057296753,
"learning_rate": 0.0004374922692508611,
"loss": 2.0753,
"step": 150
},
{
"epoch": 2.436548223350254,
"eval_loss": 1.948063611984253,
"eval_runtime": 88.8793,
"eval_samples_per_second": 11.251,
"eval_steps_per_second": 1.406,
"step": 150
},
{
"epoch": 2.517766497461929,
"grad_norm": 0.26300689578056335,
"learning_rate": 0.0004314564420453311,
"loss": 2.0447,
"step": 155
},
{
"epoch": 2.598984771573604,
"grad_norm": 0.1663161963224411,
"learning_rate": 0.0004251883214943475,
"loss": 2.0429,
"step": 160
},
{
"epoch": 2.6802030456852792,
"grad_norm": 0.6070680022239685,
"learning_rate": 0.0004186959317974155,
"loss": 2.0699,
"step": 165
},
{
"epoch": 2.761421319796954,
"grad_norm": 1.7647202014923096,
"learning_rate": 0.00041198758425451266,
"loss": 2.0304,
"step": 170
},
{
"epoch": 2.8426395939086295,
"grad_norm": 0.5461482405662537,
"learning_rate": 0.00040507186662629185,
"loss": 2.0449,
"step": 175
},
{
"epoch": 2.8426395939086295,
"eval_loss": 1.8957690000534058,
"eval_runtime": 88.8966,
"eval_samples_per_second": 11.249,
"eval_steps_per_second": 1.406,
"step": 175
},
{
"epoch": 2.9238578680203045,
"grad_norm": 1.156761646270752,
"learning_rate": 0.0003979576321403705,
"loss": 1.9947,
"step": 180
},
{
"epoch": 3.00507614213198,
"grad_norm": 0.465610533952713,
"learning_rate": 0.0003906539881577793,
"loss": 2.0121,
"step": 185
},
{
"epoch": 3.0862944162436547,
"grad_norm": 0.1722594052553177,
"learning_rate": 0.0003831702845140801,
"loss": 2.0141,
"step": 190
},
{
"epoch": 3.16751269035533,
"grad_norm": 0.24845992028713226,
"learning_rate": 0.00037551610155007613,
"loss": 1.9536,
"step": 195
},
{
"epoch": 3.248730964467005,
"grad_norm": 0.4477495551109314,
"learning_rate": 0.00036770123784744027,
"loss": 2.0201,
"step": 200
},
{
"epoch": 3.248730964467005,
"eval_loss": 1.8864437341690063,
"eval_runtime": 88.9059,
"eval_samples_per_second": 11.248,
"eval_steps_per_second": 1.406,
"step": 200
},
{
"epoch": 3.3299492385786804,
"grad_norm": 0.5112472176551819,
"learning_rate": 0.00035973569768495855,
"loss": 1.9846,
"step": 205
},
{
"epoch": 3.4111675126903553,
"grad_norm": 0.6808192133903503,
"learning_rate": 0.0003516296782314491,
"loss": 1.9839,
"step": 210
},
{
"epoch": 3.4923857868020303,
"grad_norm": 0.24290819466114044,
"learning_rate": 0.00034339355649175095,
"loss": 2.0036,
"step": 215
},
{
"epoch": 3.5736040609137056,
"grad_norm": 0.34100961685180664,
"learning_rate": 0.00033503787602249364,
"loss": 2.0178,
"step": 220
},
{
"epoch": 3.6548223350253806,
"grad_norm": 0.7999327182769775,
"learning_rate": 0.00032657333343465356,
"loss": 1.9947,
"step": 225
},
{
"epoch": 3.6548223350253806,
"eval_loss": 1.8772450685501099,
"eval_runtime": 88.947,
"eval_samples_per_second": 11.243,
"eval_steps_per_second": 1.405,
"step": 225
},
{
"epoch": 3.736040609137056,
"grad_norm": 0.3293197453022003,
"learning_rate": 0.0003180107647001769,
"loss": 2.0078,
"step": 230
},
{
"epoch": 3.817258883248731,
"grad_norm": 0.19176721572875977,
"learning_rate": 0.0003093611312801979,
"loss": 1.9594,
"step": 235
},
{
"epoch": 3.8984771573604062,
"grad_norm": 0.24580970406532288,
"learning_rate": 0.00030063550609261025,
"loss": 1.9514,
"step": 240
},
{
"epoch": 3.979695431472081,
"grad_norm": 0.28957948088645935,
"learning_rate": 0.000291845059336957,
"loss": 1.9809,
"step": 245
},
{
"epoch": 4.060913705583756,
"grad_norm": 0.19108451902866364,
"learning_rate": 0.0002830010441947834,
"loss": 1.9295,
"step": 250
},
{
"epoch": 4.060913705583756,
"eval_loss": 1.8687773942947388,
"eval_runtime": 88.8506,
"eval_samples_per_second": 11.255,
"eval_steps_per_second": 1.407,
"step": 250
},
{
"epoch": 4.1421319796954315,
"grad_norm": 0.18111436069011688,
"learning_rate": 0.00027411478242376017,
"loss": 1.9789,
"step": 255
},
{
"epoch": 4.223350253807107,
"grad_norm": 0.4630391299724579,
"learning_rate": 0.00026519764986401774,
"loss": 1.9657,
"step": 260
},
{
"epoch": 4.304568527918782,
"grad_norm": 0.2104698121547699,
"learning_rate": 0.000256261061875247,
"loss": 1.9403,
"step": 265
},
{
"epoch": 4.385786802030457,
"grad_norm": 0.4305352568626404,
"learning_rate": 0.0002473164587232079,
"loss": 1.9762,
"step": 270
},
{
"epoch": 4.467005076142132,
"grad_norm": 0.25098443031311035,
"learning_rate": 0.0002383752909343547,
"loss": 1.9564,
"step": 275
},
{
"epoch": 4.467005076142132,
"eval_loss": 1.8538275957107544,
"eval_runtime": 88.8543,
"eval_samples_per_second": 11.254,
"eval_steps_per_second": 1.407,
"step": 275
},
{
"epoch": 4.548223350253807,
"grad_norm": 0.3380163908004761,
"learning_rate": 0.0002294490046373259,
"loss": 1.9096,
"step": 280
},
{
"epoch": 4.629441624365482,
"grad_norm": 0.18323899805545807,
"learning_rate": 0.00022054902691006405,
"loss": 1.9775,
"step": 285
},
{
"epoch": 4.710659898477157,
"grad_norm": 0.13836567103862762,
"learning_rate": 0.00021168675115132315,
"loss": 1.9888,
"step": 290
},
{
"epoch": 4.791878172588833,
"grad_norm": 0.20029225945472717,
"learning_rate": 0.00020287352249529153,
"loss": 1.9547,
"step": 295
},
{
"epoch": 4.873096446700508,
"grad_norm": 0.16367541253566742,
"learning_rate": 0.00019412062328800044,
"loss": 1.9243,
"step": 300
},
{
"epoch": 4.873096446700508,
"eval_loss": 1.8513847589492798,
"eval_runtime": 88.8985,
"eval_samples_per_second": 11.249,
"eval_steps_per_second": 1.406,
"step": 300
},
{
"epoch": 4.9543147208121825,
"grad_norm": 0.14919321238994598,
"learning_rate": 0.000185439258644112,
"loss": 1.9703,
"step": 305
},
{
"epoch": 5.035532994923858,
"grad_norm": 0.20038799941539764,
"learning_rate": 0.00017684054210257517,
"loss": 1.9606,
"step": 310
},
{
"epoch": 5.116751269035533,
"grad_norm": 0.3811420798301697,
"learning_rate": 0.00016833548139951395,
"loss": 1.971,
"step": 315
},
{
"epoch": 5.197969543147208,
"grad_norm": 0.34453725814819336,
"learning_rate": 0.0001599349643765599,
"loss": 1.9342,
"step": 320
},
{
"epoch": 5.279187817258883,
"grad_norm": 0.22745971381664276,
"learning_rate": 0.0001516497450426686,
"loss": 1.958,
"step": 325
},
{
"epoch": 5.279187817258883,
"eval_loss": 1.8567323684692383,
"eval_runtime": 88.9414,
"eval_samples_per_second": 11.243,
"eval_steps_per_second": 1.405,
"step": 325
},
{
"epoch": 5.3604060913705585,
"grad_norm": 0.3236718475818634,
"learning_rate": 0.00014349042980726362,
"loss": 1.927,
"step": 330
},
{
"epoch": 5.441624365482234,
"grad_norm": 0.5001394748687744,
"learning_rate": 0.0001354674639023318,
"loss": 1.9415,
"step": 335
},
{
"epoch": 5.522842639593908,
"grad_norm": 0.5372613668441772,
"learning_rate": 0.00012759111801085066,
"loss": 1.9346,
"step": 340
},
{
"epoch": 5.604060913705584,
"grad_norm": 0.39717650413513184,
"learning_rate": 0.00011987147511866788,
"loss": 1.9491,
"step": 345
},
{
"epoch": 5.685279187817259,
"grad_norm": 0.34586790204048157,
"learning_rate": 0.00011231841760666186,
"loss": 1.872,
"step": 350
},
{
"epoch": 5.685279187817259,
"eval_loss": 1.8259689807891846,
"eval_runtime": 88.9255,
"eval_samples_per_second": 11.245,
"eval_steps_per_second": 1.406,
"step": 350
}
],
"logging_steps": 5,
"max_steps": 488,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.435956091813888e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}