Meta-Llama-3-8B-IST_StoryGen / trainer_state.json
jbjeong91's picture
Model save
ab449ca verified
raw
history blame
18.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 487,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002053388090349076,
"grad_norm": 14.228013038635254,
"learning_rate": 4.0816326530612243e-07,
"loss": 2.0477,
"step": 1
},
{
"epoch": 0.01026694045174538,
"grad_norm": 12.610700607299805,
"learning_rate": 2.0408163265306125e-06,
"loss": 2.1254,
"step": 5
},
{
"epoch": 0.02053388090349076,
"grad_norm": 8.27630615234375,
"learning_rate": 4.081632653061225e-06,
"loss": 1.8104,
"step": 10
},
{
"epoch": 0.030800821355236138,
"grad_norm": 3.9820382595062256,
"learning_rate": 6.122448979591837e-06,
"loss": 1.4185,
"step": 15
},
{
"epoch": 0.04106776180698152,
"grad_norm": 2.656637668609619,
"learning_rate": 8.16326530612245e-06,
"loss": 1.2615,
"step": 20
},
{
"epoch": 0.0513347022587269,
"grad_norm": 2.308213949203491,
"learning_rate": 1.0204081632653063e-05,
"loss": 1.1945,
"step": 25
},
{
"epoch": 0.061601642710472276,
"grad_norm": 2.196038007736206,
"learning_rate": 1.2244897959183674e-05,
"loss": 1.118,
"step": 30
},
{
"epoch": 0.07186858316221766,
"grad_norm": 2.158684730529785,
"learning_rate": 1.4285714285714287e-05,
"loss": 1.1065,
"step": 35
},
{
"epoch": 0.08213552361396304,
"grad_norm": 2.052507162094116,
"learning_rate": 1.63265306122449e-05,
"loss": 1.0728,
"step": 40
},
{
"epoch": 0.09240246406570841,
"grad_norm": 2.01088285446167,
"learning_rate": 1.836734693877551e-05,
"loss": 1.0644,
"step": 45
},
{
"epoch": 0.1026694045174538,
"grad_norm": 2.1248321533203125,
"learning_rate": 1.999974277115551e-05,
"loss": 1.0271,
"step": 50
},
{
"epoch": 0.11293634496919917,
"grad_norm": 1.90983247756958,
"learning_rate": 1.9990741151022302e-05,
"loss": 0.99,
"step": 55
},
{
"epoch": 0.12320328542094455,
"grad_norm": 1.873200535774231,
"learning_rate": 1.9968891318940332e-05,
"loss": 0.9706,
"step": 60
},
{
"epoch": 0.13347022587268995,
"grad_norm": 2.0408380031585693,
"learning_rate": 1.9934221374053538e-05,
"loss": 0.9673,
"step": 65
},
{
"epoch": 0.1437371663244353,
"grad_norm": 1.8692070245742798,
"learning_rate": 1.9886775902323405e-05,
"loss": 0.9476,
"step": 70
},
{
"epoch": 0.1540041067761807,
"grad_norm": 1.8879870176315308,
"learning_rate": 1.9826615919190886e-05,
"loss": 0.8964,
"step": 75
},
{
"epoch": 0.16427104722792607,
"grad_norm": 2.0121207237243652,
"learning_rate": 1.975381879110983e-05,
"loss": 0.9109,
"step": 80
},
{
"epoch": 0.17453798767967146,
"grad_norm": 2.071239948272705,
"learning_rate": 1.9668478136052776e-05,
"loss": 0.8292,
"step": 85
},
{
"epoch": 0.18480492813141683,
"grad_norm": 1.7859665155410767,
"learning_rate": 1.957070370311717e-05,
"loss": 0.8361,
"step": 90
},
{
"epoch": 0.19507186858316222,
"grad_norm": 2.460575819015503,
"learning_rate": 1.946062123138668e-05,
"loss": 0.7816,
"step": 95
},
{
"epoch": 0.2053388090349076,
"grad_norm": 2.230203866958618,
"learning_rate": 1.9338372288229253e-05,
"loss": 0.8011,
"step": 100
},
{
"epoch": 0.2053388090349076,
"eval_loss": 1.2018204927444458,
"eval_runtime": 19.1301,
"eval_samples_per_second": 23.314,
"eval_steps_per_second": 0.732,
"step": 100
},
{
"epoch": 0.21560574948665298,
"grad_norm": 1.8656007051467896,
"learning_rate": 1.9204114087239806e-05,
"loss": 0.726,
"step": 105
},
{
"epoch": 0.22587268993839835,
"grad_norm": 1.8082348108291626,
"learning_rate": 1.9058019286061662e-05,
"loss": 0.7246,
"step": 110
},
{
"epoch": 0.23613963039014374,
"grad_norm": 2.0330450534820557,
"learning_rate": 1.890027576434677e-05,
"loss": 0.7142,
"step": 115
},
{
"epoch": 0.2464065708418891,
"grad_norm": 1.9140502214431763,
"learning_rate": 1.8731086382140228e-05,
"loss": 0.7093,
"step": 120
},
{
"epoch": 0.25667351129363447,
"grad_norm": 2.4732396602630615,
"learning_rate": 1.8550668718999873e-05,
"loss": 0.6693,
"step": 125
},
{
"epoch": 0.2669404517453799,
"grad_norm": 2.632209062576294,
"learning_rate": 1.8359254794186368e-05,
"loss": 0.7145,
"step": 130
},
{
"epoch": 0.27720739219712526,
"grad_norm": 2.557025909423828,
"learning_rate": 1.815709076828368e-05,
"loss": 0.6378,
"step": 135
},
{
"epoch": 0.2874743326488706,
"grad_norm": 1.9707200527191162,
"learning_rate": 1.7944436626633625e-05,
"loss": 0.5618,
"step": 140
},
{
"epoch": 0.29774127310061604,
"grad_norm": 13.473517417907715,
"learning_rate": 1.7721565844991643e-05,
"loss": 0.5915,
"step": 145
},
{
"epoch": 0.3080082135523614,
"grad_norm": 3.342193603515625,
"learning_rate": 1.748876503783373e-05,
"loss": 0.584,
"step": 150
},
{
"epoch": 0.3182751540041068,
"grad_norm": 1.8444448709487915,
"learning_rate": 1.7246333589766786e-05,
"loss": 0.5936,
"step": 155
},
{
"epoch": 0.32854209445585214,
"grad_norm": 1.684865117073059,
"learning_rate": 1.699458327051647e-05,
"loss": 0.5379,
"step": 160
},
{
"epoch": 0.33880903490759756,
"grad_norm": 2.2392663955688477,
"learning_rate": 1.6733837833987634e-05,
"loss": 0.533,
"step": 165
},
{
"epoch": 0.3490759753593429,
"grad_norm": 1.850683569908142,
"learning_rate": 1.6464432601912914e-05,
"loss": 0.5305,
"step": 170
},
{
"epoch": 0.3593429158110883,
"grad_norm": 1.9396766424179077,
"learning_rate": 1.6186714032625036e-05,
"loss": 0.4685,
"step": 175
},
{
"epoch": 0.36960985626283366,
"grad_norm": 1.9056674242019653,
"learning_rate": 1.5901039275507247e-05,
"loss": 0.4402,
"step": 180
},
{
"epoch": 0.3798767967145791,
"grad_norm": 1.9246410131454468,
"learning_rate": 1.560777571169498e-05,
"loss": 0.4796,
"step": 185
},
{
"epoch": 0.39014373716632444,
"grad_norm": 1.6989043951034546,
"learning_rate": 1.5307300481619334e-05,
"loss": 0.4851,
"step": 190
},
{
"epoch": 0.4004106776180698,
"grad_norm": 2.2395427227020264,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.436,
"step": 195
},
{
"epoch": 0.4106776180698152,
"grad_norm": 1.839769721031189,
"learning_rate": 1.4686269458911333e-05,
"loss": 0.4058,
"step": 200
},
{
"epoch": 0.4106776180698152,
"eval_loss": 1.28135347366333,
"eval_runtime": 19.6018,
"eval_samples_per_second": 22.753,
"eval_steps_per_second": 0.714,
"step": 200
},
{
"epoch": 0.4209445585215606,
"grad_norm": 2.4135115146636963,
"learning_rate": 1.4366512319560642e-05,
"loss": 0.3935,
"step": 205
},
{
"epoch": 0.43121149897330596,
"grad_norm": 1.8663309812545776,
"learning_rate": 1.4041139793432274e-05,
"loss": 0.3793,
"step": 210
},
{
"epoch": 0.4414784394250513,
"grad_norm": 1.6783833503723145,
"learning_rate": 1.3710570313464778e-05,
"loss": 0.3641,
"step": 215
},
{
"epoch": 0.4517453798767967,
"grad_norm": 1.7463020086288452,
"learning_rate": 1.3375228995941135e-05,
"loss": 0.366,
"step": 220
},
{
"epoch": 0.4620123203285421,
"grad_norm": 1.7100086212158203,
"learning_rate": 1.3035547093784187e-05,
"loss": 0.3338,
"step": 225
},
{
"epoch": 0.4722792607802875,
"grad_norm": 1.5141621828079224,
"learning_rate": 1.2691961441960238e-05,
"loss": 0.3076,
"step": 230
},
{
"epoch": 0.48254620123203285,
"grad_norm": 1.7006425857543945,
"learning_rate": 1.2344913895704099e-05,
"loss": 0.3131,
"step": 235
},
{
"epoch": 0.4928131416837782,
"grad_norm": 1.575813889503479,
"learning_rate": 1.1994850762287988e-05,
"loss": 0.2957,
"step": 240
},
{
"epoch": 0.5030800821355236,
"grad_norm": 1.5434437990188599,
"learning_rate": 1.164222222706509e-05,
"loss": 0.2995,
"step": 245
},
{
"epoch": 0.5133470225872689,
"grad_norm": 1.7136874198913574,
"learning_rate": 1.128748177452581e-05,
"loss": 0.2937,
"step": 250
},
{
"epoch": 0.5236139630390144,
"grad_norm": 1.378823161125183,
"learning_rate": 1.0931085605111354e-05,
"loss": 0.2605,
"step": 255
},
{
"epoch": 0.5338809034907598,
"grad_norm": 1.536054015159607,
"learning_rate": 1.0573492048534515e-05,
"loss": 0.27,
"step": 260
},
{
"epoch": 0.5441478439425051,
"grad_norm": 1.396335482597351,
"learning_rate": 1.0215160974362224e-05,
"loss": 0.2791,
"step": 265
},
{
"epoch": 0.5544147843942505,
"grad_norm": 3.4254772663116455,
"learning_rate": 9.856553200617805e-06,
"loss": 0.2783,
"step": 270
},
{
"epoch": 0.5646817248459959,
"grad_norm": 1.5541990995407104,
"learning_rate": 9.49812990116353e-06,
"loss": 0.269,
"step": 275
},
{
"epoch": 0.5749486652977412,
"grad_norm": 1.3364843130111694,
"learning_rate": 9.140352012625538e-06,
"loss": 0.225,
"step": 280
},
{
"epoch": 0.5852156057494866,
"grad_norm": 1.2611546516418457,
"learning_rate": 8.783679641623845e-06,
"loss": 0.209,
"step": 285
},
{
"epoch": 0.5954825462012321,
"grad_norm": 1.3054827451705933,
"learning_rate": 8.428571473069775e-06,
"loss": 0.1905,
"step": 290
},
{
"epoch": 0.6057494866529775,
"grad_norm": 1.3551859855651855,
"learning_rate": 8.075484180291702e-06,
"loss": 0.2282,
"step": 295
},
{
"epoch": 0.6160164271047228,
"grad_norm": 1.5083565711975098,
"learning_rate": 7.724871837747708e-06,
"loss": 0.2067,
"step": 300
},
{
"epoch": 0.6160164271047228,
"eval_loss": 1.3847053050994873,
"eval_runtime": 19.5099,
"eval_samples_per_second": 22.86,
"eval_steps_per_second": 0.718,
"step": 300
},
{
"epoch": 0.6262833675564682,
"grad_norm": 1.1781014204025269,
"learning_rate": 7.377185337080443e-06,
"loss": 0.1895,
"step": 305
},
{
"epoch": 0.6365503080082136,
"grad_norm": 1.429604411125183,
"learning_rate": 7.032871807265097e-06,
"loss": 0.1929,
"step": 310
},
{
"epoch": 0.6468172484599589,
"grad_norm": 1.2146339416503906,
"learning_rate": 6.692374039596241e-06,
"loss": 0.1842,
"step": 315
},
{
"epoch": 0.6570841889117043,
"grad_norm": 1.354224681854248,
"learning_rate": 6.356129918252927e-06,
"loss": 0.1798,
"step": 320
},
{
"epoch": 0.6673511293634496,
"grad_norm": 1.2704695463180542,
"learning_rate": 6.024571857174443e-06,
"loss": 0.1636,
"step": 325
},
{
"epoch": 0.6776180698151951,
"grad_norm": 1.0078154802322388,
"learning_rate": 5.698126243970845e-06,
"loss": 0.1472,
"step": 330
},
{
"epoch": 0.6878850102669405,
"grad_norm": 1.2621588706970215,
"learning_rate": 5.377212891583419e-06,
"loss": 0.1716,
"step": 335
},
{
"epoch": 0.6981519507186859,
"grad_norm": 1.163734793663025,
"learning_rate": 5.062244498400228e-06,
"loss": 0.1595,
"step": 340
},
{
"epoch": 0.7084188911704312,
"grad_norm": 1.1806224584579468,
"learning_rate": 4.753626117521103e-06,
"loss": 0.1674,
"step": 345
},
{
"epoch": 0.7186858316221766,
"grad_norm": 1.2119107246398926,
"learning_rate": 4.4517546358545175e-06,
"loss": 0.1453,
"step": 350
},
{
"epoch": 0.728952772073922,
"grad_norm": 1.1235816478729248,
"learning_rate": 4.1570182637163155e-06,
"loss": 0.15,
"step": 355
},
{
"epoch": 0.7392197125256673,
"grad_norm": 1.1243553161621094,
"learning_rate": 3.869796035586625e-06,
"loss": 0.1426,
"step": 360
},
{
"epoch": 0.7494866529774127,
"grad_norm": 1.1293926239013672,
"learning_rate": 3.590457322666997e-06,
"loss": 0.1401,
"step": 365
},
{
"epoch": 0.7597535934291582,
"grad_norm": 1.0441910028457642,
"learning_rate": 3.3193613578646633e-06,
"loss": 0.1359,
"step": 370
},
{
"epoch": 0.7700205338809035,
"grad_norm": 1.05973219871521,
"learning_rate": 3.0568567738147505e-06,
"loss": 0.1491,
"step": 375
},
{
"epoch": 0.7802874743326489,
"grad_norm": 1.1299043893814087,
"learning_rate": 2.8032811545345294e-06,
"loss": 0.1342,
"step": 380
},
{
"epoch": 0.7905544147843943,
"grad_norm": 1.086897373199463,
"learning_rate": 2.5589606012863968e-06,
"loss": 0.1379,
"step": 385
},
{
"epoch": 0.8008213552361396,
"grad_norm": 1.0586342811584473,
"learning_rate": 2.324209313207736e-06,
"loss": 0.1281,
"step": 390
},
{
"epoch": 0.811088295687885,
"grad_norm": 1.0056428909301758,
"learning_rate": 2.099329183247126e-06,
"loss": 0.1157,
"step": 395
},
{
"epoch": 0.8213552361396304,
"grad_norm": 0.9998786449432373,
"learning_rate": 1.8846094099263911e-06,
"loss": 0.1183,
"step": 400
},
{
"epoch": 0.8213552361396304,
"eval_loss": 1.4414706230163574,
"eval_runtime": 19.2041,
"eval_samples_per_second": 23.224,
"eval_steps_per_second": 0.729,
"step": 400
},
{
"epoch": 0.8316221765913757,
"grad_norm": 0.9717850089073181,
"learning_rate": 1.6803261254278635e-06,
"loss": 0.1143,
"step": 405
},
{
"epoch": 0.8418891170431212,
"grad_norm": 1.070147156715393,
"learning_rate": 1.4867420404851306e-06,
"loss": 0.1197,
"step": 410
},
{
"epoch": 0.8521560574948666,
"grad_norm": 0.9136067628860474,
"learning_rate": 1.30410610653389e-06,
"loss": 0.1166,
"step": 415
},
{
"epoch": 0.8624229979466119,
"grad_norm": 1.1148228645324707,
"learning_rate": 1.1326531955574526e-06,
"loss": 0.1182,
"step": 420
},
{
"epoch": 0.8726899383983573,
"grad_norm": 1.0000271797180176,
"learning_rate": 9.72603798038574e-07,
"loss": 0.1209,
"step": 425
},
{
"epoch": 0.8829568788501027,
"grad_norm": 0.8131821751594543,
"learning_rate": 8.241637394060619e-07,
"loss": 0.1107,
"step": 430
},
{
"epoch": 0.893223819301848,
"grad_norm": 0.8392049074172974,
"learning_rate": 6.875239153408541e-07,
"loss": 0.1053,
"step": 435
},
{
"epoch": 0.9034907597535934,
"grad_norm": 1.0311695337295532,
"learning_rate": 5.628600462818668e-07,
"loss": 0.1053,
"step": 440
},
{
"epoch": 0.9137577002053389,
"grad_norm": 0.8752442002296448,
"learning_rate": 4.503324514474483e-07,
"loss": 0.1013,
"step": 445
},
{
"epoch": 0.9240246406570842,
"grad_norm": 0.9375615119934082,
"learning_rate": 3.500858426629439e-07,
"loss": 0.1033,
"step": 450
},
{
"epoch": 0.9342915811088296,
"grad_norm": 0.8648166060447693,
"learning_rate": 2.6224913825956933e-07,
"loss": 0.1098,
"step": 455
},
{
"epoch": 0.944558521560575,
"grad_norm": 0.9131202697753906,
"learning_rate": 1.869352972839067e-07,
"loss": 0.1073,
"step": 460
},
{
"epoch": 0.9548254620123203,
"grad_norm": 0.9665296077728271,
"learning_rate": 1.242411742312233e-07,
"loss": 0.0966,
"step": 465
},
{
"epoch": 0.9650924024640657,
"grad_norm": 0.8326826691627502,
"learning_rate": 7.42473944894384e-08,
"loss": 0.1107,
"step": 470
},
{
"epoch": 0.9753593429158111,
"grad_norm": 0.8301469683647156,
"learning_rate": 3.701825065392184e-08,
"loss": 0.1123,
"step": 475
},
{
"epoch": 0.9856262833675564,
"grad_norm": 0.9057932496070862,
"learning_rate": 1.2601619846444035e-08,
"loss": 0.0985,
"step": 480
},
{
"epoch": 0.9958932238193019,
"grad_norm": 0.7701483964920044,
"learning_rate": 1.0289021446308057e-09,
"loss": 0.0917,
"step": 485
},
{
"epoch": 1.0,
"step": 487,
"total_flos": 1.4371645474744566e+18,
"train_loss": 0.45526161300328233,
"train_runtime": 3331.5803,
"train_samples_per_second": 4.672,
"train_steps_per_second": 0.146
}
],
"logging_steps": 5,
"max_steps": 487,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4371645474744566e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}