reprover_random / trainer_state.json
tcwong's picture
Upload 11 files
d50c3fa verified
{
"best_metric": 1.2138804197311401,
"best_model_checkpoint": "model_training/reprover/checkpoints-random-09-07-14-12/checkpoint-400",
"epoch": 6.49746192893401,
"eval_steps": 25,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08121827411167512,
"grad_norm": 25.242706298828125,
"learning_rate": 5.102040816326531e-05,
"loss": 12.5197,
"step": 5
},
{
"epoch": 0.16243654822335024,
"grad_norm": 8.962030410766602,
"learning_rate": 0.00010204081632653062,
"loss": 7.4254,
"step": 10
},
{
"epoch": 0.2436548223350254,
"grad_norm": 0.5921331644058228,
"learning_rate": 0.00015306122448979594,
"loss": 3.9799,
"step": 15
},
{
"epoch": 0.3248730964467005,
"grad_norm": 0.23172463476657867,
"learning_rate": 0.00020408163265306123,
"loss": 3.6019,
"step": 20
},
{
"epoch": 0.40609137055837563,
"grad_norm": 0.24555863440036774,
"learning_rate": 0.00025510204081632655,
"loss": 3.5033,
"step": 25
},
{
"epoch": 0.40609137055837563,
"eval_loss": 3.385397434234619,
"eval_runtime": 85.9926,
"eval_samples_per_second": 11.629,
"eval_steps_per_second": 1.454,
"step": 25
},
{
"epoch": 0.4873096446700508,
"grad_norm": 1.7724591493606567,
"learning_rate": 0.0003061224489795919,
"loss": 3.414,
"step": 30
},
{
"epoch": 0.5685279187817259,
"grad_norm": 1.4451329708099365,
"learning_rate": 0.00035714285714285714,
"loss": 2.5288,
"step": 35
},
{
"epoch": 0.649746192893401,
"grad_norm": 1.1171362400054932,
"learning_rate": 0.00040816326530612246,
"loss": 1.9554,
"step": 40
},
{
"epoch": 0.7309644670050761,
"grad_norm": 0.5590386986732483,
"learning_rate": 0.0004591836734693878,
"loss": 1.7407,
"step": 45
},
{
"epoch": 0.8121827411167513,
"grad_norm": 0.7381185293197632,
"learning_rate": 0.0004999935985425297,
"loss": 1.6238,
"step": 50
},
{
"epoch": 0.8121827411167513,
"eval_loss": 1.457683801651001,
"eval_runtime": 85.8152,
"eval_samples_per_second": 11.653,
"eval_steps_per_second": 1.457,
"step": 50
},
{
"epoch": 0.8934010152284264,
"grad_norm": 0.63067227602005,
"learning_rate": 0.0004997695819512612,
"loss": 1.5906,
"step": 55
},
{
"epoch": 0.9746192893401016,
"grad_norm": 0.35589519143104553,
"learning_rate": 0.0004992258202402822,
"loss": 1.5011,
"step": 60
},
{
"epoch": 1.0558375634517767,
"grad_norm": 0.1975679099559784,
"learning_rate": 0.0004983630095117843,
"loss": 1.4414,
"step": 65
},
{
"epoch": 1.1370558375634519,
"grad_norm": 0.23968252539634705,
"learning_rate": 0.0004971822543018662,
"loss": 1.3931,
"step": 70
},
{
"epoch": 1.218274111675127,
"grad_norm": 0.20230525732040405,
"learning_rate": 0.0004956850661665511,
"loss": 1.4338,
"step": 75
},
{
"epoch": 1.218274111675127,
"eval_loss": 1.2832187414169312,
"eval_runtime": 85.7925,
"eval_samples_per_second": 11.656,
"eval_steps_per_second": 1.457,
"step": 75
},
{
"epoch": 1.299492385786802,
"grad_norm": 0.17177316546440125,
"learning_rate": 0.0004938733617467517,
"loss": 1.4159,
"step": 80
},
{
"epoch": 1.380710659898477,
"grad_norm": 0.22775708138942719,
"learning_rate": 0.0004917494603146632,
"loss": 1.3703,
"step": 85
},
{
"epoch": 1.4619289340101522,
"grad_norm": 0.10171830654144287,
"learning_rate": 0.0004893160808047222,
"loss": 1.3801,
"step": 90
},
{
"epoch": 1.5431472081218274,
"grad_norm": 0.4525371193885803,
"learning_rate": 0.00048657633833293557,
"loss": 1.4181,
"step": 95
},
{
"epoch": 1.6243654822335025,
"grad_norm": 0.2127549946308136,
"learning_rate": 0.0004835337402090316,
"loss": 1.3681,
"step": 100
},
{
"epoch": 1.6243654822335025,
"eval_loss": 1.2635972499847412,
"eval_runtime": 85.8407,
"eval_samples_per_second": 11.649,
"eval_steps_per_second": 1.456,
"step": 100
},
{
"epoch": 1.7055837563451777,
"grad_norm": 0.3497621715068817,
"learning_rate": 0.0004801921814465414,
"loss": 1.3974,
"step": 105
},
{
"epoch": 1.7868020304568528,
"grad_norm": 0.33355796337127686,
"learning_rate": 0.00047655593977655674,
"loss": 1.3714,
"step": 110
},
{
"epoch": 1.868020304568528,
"grad_norm": 0.26004713773727417,
"learning_rate": 0.0004726296701715489,
"loss": 1.3513,
"step": 115
},
{
"epoch": 1.9492385786802031,
"grad_norm": 0.21243992447853088,
"learning_rate": 0.00046841839888625623,
"loss": 1.3371,
"step": 120
},
{
"epoch": 2.030456852791878,
"grad_norm": 0.1547948122024536,
"learning_rate": 0.0004639275170232734,
"loss": 1.364,
"step": 125
},
{
"epoch": 2.030456852791878,
"eval_loss": 1.2679340839385986,
"eval_runtime": 85.9259,
"eval_samples_per_second": 11.638,
"eval_steps_per_second": 1.455,
"step": 125
},
{
"epoch": 2.1116751269035534,
"grad_norm": 0.1696767359972,
"learning_rate": 0.0004591627736315743,
"loss": 1.3461,
"step": 130
},
{
"epoch": 2.1928934010152283,
"grad_norm": 0.608369767665863,
"learning_rate": 0.0004541302683468084,
"loss": 1.309,
"step": 135
},
{
"epoch": 2.2741116751269037,
"grad_norm": 0.47118842601776123,
"learning_rate": 0.0004488364435827881,
"loss": 1.3606,
"step": 140
},
{
"epoch": 2.3553299492385786,
"grad_norm": 0.32203948497772217,
"learning_rate": 0.00044328807628416644,
"loss": 1.3892,
"step": 145
},
{
"epoch": 2.436548223350254,
"grad_norm": 0.47044509649276733,
"learning_rate": 0.0004374922692508611,
"loss": 1.3706,
"step": 150
},
{
"epoch": 2.436548223350254,
"eval_loss": 1.2735475301742554,
"eval_runtime": 85.8193,
"eval_samples_per_second": 11.652,
"eval_steps_per_second": 1.457,
"step": 150
},
{
"epoch": 2.517766497461929,
"grad_norm": 0.34873417019844055,
"learning_rate": 0.0004314564420453311,
"loss": 1.3656,
"step": 155
},
{
"epoch": 2.598984771573604,
"grad_norm": 0.3404662609100342,
"learning_rate": 0.0004251883214943475,
"loss": 1.345,
"step": 160
},
{
"epoch": 2.6802030456852792,
"grad_norm": 0.34060540795326233,
"learning_rate": 0.0004186959317974155,
"loss": 1.3485,
"step": 165
},
{
"epoch": 2.761421319796954,
"grad_norm": 0.24678784608840942,
"learning_rate": 0.00041198758425451266,
"loss": 1.3189,
"step": 170
},
{
"epoch": 2.8426395939086295,
"grad_norm": 0.31498032808303833,
"learning_rate": 0.00040507186662629185,
"loss": 1.3653,
"step": 175
},
{
"epoch": 2.8426395939086295,
"eval_loss": 1.272367000579834,
"eval_runtime": 85.9187,
"eval_samples_per_second": 11.639,
"eval_steps_per_second": 1.455,
"step": 175
},
{
"epoch": 2.9238578680203045,
"grad_norm": 0.23488959670066833,
"learning_rate": 0.0003979576321403705,
"loss": 1.3121,
"step": 180
},
{
"epoch": 3.00507614213198,
"grad_norm": 0.41097673773765564,
"learning_rate": 0.0003906539881577793,
"loss": 1.366,
"step": 185
},
{
"epoch": 3.0862944162436547,
"grad_norm": 1.2468574047088623,
"learning_rate": 0.0003831702845140801,
"loss": 1.3558,
"step": 190
},
{
"epoch": 3.16751269035533,
"grad_norm": 0.4176631271839142,
"learning_rate": 0.00037551610155007613,
"loss": 1.3358,
"step": 195
},
{
"epoch": 3.248730964467005,
"grad_norm": 0.20121543109416962,
"learning_rate": 0.00036770123784744027,
"loss": 1.3063,
"step": 200
},
{
"epoch": 3.248730964467005,
"eval_loss": 1.2347419261932373,
"eval_runtime": 85.8995,
"eval_samples_per_second": 11.642,
"eval_steps_per_second": 1.455,
"step": 200
},
{
"epoch": 3.3299492385786804,
"grad_norm": 0.1536010503768921,
"learning_rate": 0.00035973569768495855,
"loss": 1.2936,
"step": 205
},
{
"epoch": 3.4111675126903553,
"grad_norm": 0.15390706062316895,
"learning_rate": 0.0003516296782314491,
"loss": 1.29,
"step": 210
},
{
"epoch": 3.4923857868020303,
"grad_norm": 0.15325894951820374,
"learning_rate": 0.00034339355649175095,
"loss": 1.3385,
"step": 215
},
{
"epoch": 3.5736040609137056,
"grad_norm": 0.11753768473863602,
"learning_rate": 0.00033503787602249364,
"loss": 1.2825,
"step": 220
},
{
"epoch": 3.6548223350253806,
"grad_norm": 0.0821477621793747,
"learning_rate": 0.00032657333343465356,
"loss": 1.281,
"step": 225
},
{
"epoch": 3.6548223350253806,
"eval_loss": 1.2228115797042847,
"eval_runtime": 85.9026,
"eval_samples_per_second": 11.641,
"eval_steps_per_second": 1.455,
"step": 225
},
{
"epoch": 3.736040609137056,
"grad_norm": 0.06417221575975418,
"learning_rate": 0.0003180107647001769,
"loss": 1.2609,
"step": 230
},
{
"epoch": 3.817258883248731,
"grad_norm": 0.3063823878765106,
"learning_rate": 0.0003093611312801979,
"loss": 1.2957,
"step": 235
},
{
"epoch": 3.8984771573604062,
"grad_norm": 0.28450486063957214,
"learning_rate": 0.00030063550609261025,
"loss": 1.2869,
"step": 240
},
{
"epoch": 3.979695431472081,
"grad_norm": 0.23690135776996613,
"learning_rate": 0.000291845059336957,
"loss": 1.3068,
"step": 245
},
{
"epoch": 4.060913705583756,
"grad_norm": 0.4273748993873596,
"learning_rate": 0.0002830010441947834,
"loss": 1.2819,
"step": 250
},
{
"epoch": 4.060913705583756,
"eval_loss": 1.2325246334075928,
"eval_runtime": 85.872,
"eval_samples_per_second": 11.645,
"eval_steps_per_second": 1.456,
"step": 250
},
{
"epoch": 4.1421319796954315,
"grad_norm": 0.47748294472694397,
"learning_rate": 0.00027411478242376017,
"loss": 1.3036,
"step": 255
},
{
"epoch": 4.223350253807107,
"grad_norm": 0.851790189743042,
"learning_rate": 0.00026519764986401774,
"loss": 1.2969,
"step": 260
},
{
"epoch": 4.304568527918782,
"grad_norm": 0.2811254560947418,
"learning_rate": 0.000256261061875247,
"loss": 1.2699,
"step": 265
},
{
"epoch": 4.385786802030457,
"grad_norm": 0.30236339569091797,
"learning_rate": 0.0002473164587232079,
"loss": 1.308,
"step": 270
},
{
"epoch": 4.467005076142132,
"grad_norm": 0.4765733480453491,
"learning_rate": 0.0002383752909343547,
"loss": 1.303,
"step": 275
},
{
"epoch": 4.467005076142132,
"eval_loss": 1.2213741540908813,
"eval_runtime": 85.8151,
"eval_samples_per_second": 11.653,
"eval_steps_per_second": 1.457,
"step": 275
},
{
"epoch": 4.548223350253807,
"grad_norm": 0.22984470427036285,
"learning_rate": 0.0002294490046373259,
"loss": 1.3021,
"step": 280
},
{
"epoch": 4.629441624365482,
"grad_norm": 0.216758593916893,
"learning_rate": 0.00022054902691006405,
"loss": 1.3071,
"step": 285
},
{
"epoch": 4.710659898477157,
"grad_norm": 0.16396842896938324,
"learning_rate": 0.00021168675115132315,
"loss": 1.2813,
"step": 290
},
{
"epoch": 4.791878172588833,
"grad_norm": 0.0981246754527092,
"learning_rate": 0.00020287352249529153,
"loss": 1.3091,
"step": 295
},
{
"epoch": 4.873096446700508,
"grad_norm": 0.10069616883993149,
"learning_rate": 0.00019412062328800044,
"loss": 1.2781,
"step": 300
},
{
"epoch": 4.873096446700508,
"eval_loss": 1.220151424407959,
"eval_runtime": 85.8774,
"eval_samples_per_second": 11.645,
"eval_steps_per_second": 1.456,
"step": 300
},
{
"epoch": 4.9543147208121825,
"grad_norm": 0.07661138474941254,
"learning_rate": 0.000185439258644112,
"loss": 1.2749,
"step": 305
},
{
"epoch": 5.035532994923858,
"grad_norm": 0.09922365099191666,
"learning_rate": 0.00017684054210257517,
"loss": 1.2464,
"step": 310
},
{
"epoch": 5.116751269035533,
"grad_norm": 0.08816192299127579,
"learning_rate": 0.00016833548139951395,
"loss": 1.2642,
"step": 315
},
{
"epoch": 5.197969543147208,
"grad_norm": 0.17599380016326904,
"learning_rate": 0.0001599349643765599,
"loss": 1.2967,
"step": 320
},
{
"epoch": 5.279187817258883,
"grad_norm": 0.1083323284983635,
"learning_rate": 0.0001516497450426686,
"loss": 1.3184,
"step": 325
},
{
"epoch": 5.279187817258883,
"eval_loss": 1.2185090780258179,
"eval_runtime": 85.8042,
"eval_samples_per_second": 11.654,
"eval_steps_per_second": 1.457,
"step": 325
},
{
"epoch": 5.3604060913705585,
"grad_norm": 0.0962451919913292,
"learning_rate": 0.00014349042980726362,
"loss": 1.2535,
"step": 330
},
{
"epoch": 5.441624365482234,
"grad_norm": 0.08135022222995758,
"learning_rate": 0.0001354674639023318,
"loss": 1.2894,
"step": 335
},
{
"epoch": 5.522842639593908,
"grad_norm": 0.05687703192234039,
"learning_rate": 0.00012759111801085066,
"loss": 1.2648,
"step": 340
},
{
"epoch": 5.604060913705584,
"grad_norm": 0.14364075660705566,
"learning_rate": 0.00011987147511866788,
"loss": 1.3106,
"step": 345
},
{
"epoch": 5.685279187817259,
"grad_norm": 0.0585949569940567,
"learning_rate": 0.00011231841760666186,
"loss": 1.2706,
"step": 350
},
{
"epoch": 5.685279187817259,
"eval_loss": 1.2204599380493164,
"eval_runtime": 85.8832,
"eval_samples_per_second": 11.644,
"eval_steps_per_second": 1.455,
"step": 350
},
{
"epoch": 5.7664974619289335,
"grad_norm": 0.06366579979658127,
"learning_rate": 0.0001049416145997094,
"loss": 1.2985,
"step": 355
},
{
"epoch": 5.847715736040609,
"grad_norm": 0.14090530574321747,
"learning_rate": 9.775050958865584e-05,
"loss": 1.3086,
"step": 360
},
{
"epoch": 5.928934010152284,
"grad_norm": 0.08585643023252487,
"learning_rate": 9.075430834113152e-05,
"loss": 1.2382,
"step": 365
},
{
"epoch": 6.01015228426396,
"grad_norm": 0.0822765976190567,
"learning_rate": 8.396196711669335e-05,
"loss": 1.2685,
"step": 370
},
{
"epoch": 6.091370558375634,
"grad_norm": 0.05758822336792946,
"learning_rate": 7.738218120137671e-05,
"loss": 1.2538,
"step": 375
},
{
"epoch": 6.091370558375634,
"eval_loss": 1.216223120689392,
"eval_runtime": 85.8934,
"eval_samples_per_second": 11.642,
"eval_steps_per_second": 1.455,
"step": 375
},
{
"epoch": 6.1725888324873095,
"grad_norm": 0.09431485831737518,
"learning_rate": 7.102337377633394e-05,
"loss": 1.2767,
"step": 380
},
{
"epoch": 6.253807106598985,
"grad_norm": 0.07947517186403275,
"learning_rate": 6.489368513481228e-05,
"loss": 1.283,
"step": 385
},
{
"epoch": 6.33502538071066,
"grad_norm": 0.07814743369817734,
"learning_rate": 5.9000962261273136e-05,
"loss": 1.2769,
"step": 390
},
{
"epoch": 6.416243654822335,
"grad_norm": 0.06358485668897629,
"learning_rate": 5.3352748785993164e-05,
"loss": 1.3046,
"step": 395
},
{
"epoch": 6.49746192893401,
"grad_norm": 0.04739053547382355,
"learning_rate": 4.795627532800806e-05,
"loss": 1.2765,
"step": 400
},
{
"epoch": 6.49746192893401,
"eval_loss": 1.2138804197311401,
"eval_runtime": 85.8771,
"eval_samples_per_second": 11.645,
"eval_steps_per_second": 1.456,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 488,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.7459851945508864e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}