deepseek_prover_base_no_err / trainer_state.json
tcwong's picture
Upload 11 files
acabbee verified
raw
history blame
18.2 kB
{
"best_metric": 0.1424967348575592,
"best_model_checkpoint": "model_training/deepseek_prover_base_no_err/checkpoints-by_file-09-07-08-59/checkpoint-450",
"epoch": 3.6568527918781726,
"eval_steps": 50,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04060913705583756,
"grad_norm": 113.10508728027344,
"learning_rate": 1e-05,
"loss": 15.6138,
"step": 5
},
{
"epoch": 0.08121827411167512,
"grad_norm": 114.42933654785156,
"learning_rate": 2e-05,
"loss": 11.7959,
"step": 10
},
{
"epoch": 0.1218274111675127,
"grad_norm": 33.93031311035156,
"learning_rate": 3e-05,
"loss": 3.0499,
"step": 15
},
{
"epoch": 0.16243654822335024,
"grad_norm": 0.41336098313331604,
"learning_rate": 4e-05,
"loss": 0.1854,
"step": 20
},
{
"epoch": 0.20304568527918782,
"grad_norm": 0.4682806432247162,
"learning_rate": 5e-05,
"loss": 0.8857,
"step": 25
},
{
"epoch": 0.2436548223350254,
"grad_norm": 506.6198425292969,
"learning_rate": 6e-05,
"loss": 0.4888,
"step": 30
},
{
"epoch": 0.28426395939086296,
"grad_norm": 0.2869759202003479,
"learning_rate": 7e-05,
"loss": 0.2279,
"step": 35
},
{
"epoch": 0.3248730964467005,
"grad_norm": 0.18190807104110718,
"learning_rate": 8e-05,
"loss": 0.1661,
"step": 40
},
{
"epoch": 0.36548223350253806,
"grad_norm": 0.09090979397296906,
"learning_rate": 9e-05,
"loss": 0.1575,
"step": 45
},
{
"epoch": 0.40609137055837563,
"grad_norm": 0.14082124829292297,
"learning_rate": 0.0001,
"loss": 0.1569,
"step": 50
},
{
"epoch": 0.40609137055837563,
"eval_loss": 0.16598469018936157,
"eval_runtime": 571.0235,
"eval_samples_per_second": 1.751,
"eval_steps_per_second": 0.219,
"step": 50
},
{
"epoch": 0.4467005076142132,
"grad_norm": 1.0748934745788574,
"learning_rate": 9.996842891446092e-05,
"loss": 0.15,
"step": 55
},
{
"epoch": 0.4873096446700508,
"grad_norm": 0.09886147826910019,
"learning_rate": 9.987375552718133e-05,
"loss": 0.1498,
"step": 60
},
{
"epoch": 0.5279187817258884,
"grad_norm": 0.07677606493234634,
"learning_rate": 9.971609939582557e-05,
"loss": 0.1499,
"step": 65
},
{
"epoch": 0.5685279187817259,
"grad_norm": 0.26791003346443176,
"learning_rate": 9.9495659615402e-05,
"loss": 0.1631,
"step": 70
},
{
"epoch": 0.6091370558375635,
"grad_norm": 0.07558655738830566,
"learning_rate": 9.921271456683715e-05,
"loss": 0.1535,
"step": 75
},
{
"epoch": 0.649746192893401,
"grad_norm": 0.04132043197751045,
"learning_rate": 9.886762156542428e-05,
"loss": 0.1454,
"step": 80
},
{
"epoch": 0.6903553299492385,
"grad_norm": 0.03367358446121216,
"learning_rate": 9.846081640959007e-05,
"loss": 0.1372,
"step": 85
},
{
"epoch": 0.7309644670050761,
"grad_norm": 0.05775720626115799,
"learning_rate": 9.79928128305494e-05,
"loss": 0.1411,
"step": 90
},
{
"epoch": 0.7715736040609137,
"grad_norm": 0.05461897701025009,
"learning_rate": 9.746420184354334e-05,
"loss": 0.1369,
"step": 95
},
{
"epoch": 0.8121827411167513,
"grad_norm": 0.03982888534665108,
"learning_rate": 9.687565100147939e-05,
"loss": 0.1422,
"step": 100
},
{
"epoch": 0.8121827411167513,
"eval_loss": 0.15572066605091095,
"eval_runtime": 570.7773,
"eval_samples_per_second": 1.752,
"eval_steps_per_second": 0.219,
"step": 100
},
{
"epoch": 0.8527918781725888,
"grad_norm": 0.049296505749225616,
"learning_rate": 9.622790355191672e-05,
"loss": 0.1396,
"step": 105
},
{
"epoch": 0.8934010152284264,
"grad_norm": 0.04366493597626686,
"learning_rate": 9.552177749846083e-05,
"loss": 0.1466,
"step": 110
},
{
"epoch": 0.934010152284264,
"grad_norm": 0.0475134439766407,
"learning_rate": 9.475816456775313e-05,
"loss": 0.1421,
"step": 115
},
{
"epoch": 0.9746192893401016,
"grad_norm": 0.044564343988895416,
"learning_rate": 9.393802908335977e-05,
"loss": 0.137,
"step": 120
},
{
"epoch": 1.015228426395939,
"grad_norm": 0.04059167578816414,
"learning_rate": 9.306240674798203e-05,
"loss": 0.1442,
"step": 125
},
{
"epoch": 1.0558375634517767,
"grad_norm": 0.0424371100962162,
"learning_rate": 9.213240333552589e-05,
"loss": 0.1362,
"step": 130
},
{
"epoch": 1.0964467005076142,
"grad_norm": 0.059122174978256226,
"learning_rate": 9.114919329468282e-05,
"loss": 0.1351,
"step": 135
},
{
"epoch": 1.1370558375634519,
"grad_norm": 0.06440797448158264,
"learning_rate": 9.011401826578492e-05,
"loss": 0.1404,
"step": 140
},
{
"epoch": 1.1776649746192893,
"grad_norm": 0.044413380324840546,
"learning_rate": 8.902818551280758e-05,
"loss": 0.1258,
"step": 145
},
{
"epoch": 1.218274111675127,
"grad_norm": 0.06735623627901077,
"learning_rate": 8.789306627249985e-05,
"loss": 0.1238,
"step": 150
},
{
"epoch": 1.218274111675127,
"eval_loss": 0.15116116404533386,
"eval_runtime": 571.8633,
"eval_samples_per_second": 1.749,
"eval_steps_per_second": 0.219,
"step": 150
},
{
"epoch": 1.2588832487309645,
"grad_norm": 0.05769121274352074,
"learning_rate": 8.6710094022727e-05,
"loss": 0.1319,
"step": 155
},
{
"epoch": 1.299492385786802,
"grad_norm": 0.048896849155426025,
"learning_rate": 8.548076267221256e-05,
"loss": 0.1232,
"step": 160
},
{
"epoch": 1.3401015228426396,
"grad_norm": 0.0718092992901802,
"learning_rate": 8.420662467396547e-05,
"loss": 0.1397,
"step": 165
},
{
"epoch": 1.380710659898477,
"grad_norm": 0.054730452597141266,
"learning_rate": 8.288928906477496e-05,
"loss": 0.121,
"step": 170
},
{
"epoch": 1.4213197969543148,
"grad_norm": 0.08080842345952988,
"learning_rate": 8.15304194332491e-05,
"loss": 0.1358,
"step": 175
},
{
"epoch": 1.4619289340101522,
"grad_norm": 0.05518824979662895,
"learning_rate": 8.013173181896283e-05,
"loss": 0.1347,
"step": 180
},
{
"epoch": 1.50253807106599,
"grad_norm": 0.07823627442121506,
"learning_rate": 7.869499254536865e-05,
"loss": 0.1386,
"step": 185
},
{
"epoch": 1.5431472081218274,
"grad_norm": 0.08264634758234024,
"learning_rate": 7.722201598920673e-05,
"loss": 0.1261,
"step": 190
},
{
"epoch": 1.5837563451776648,
"grad_norm": 0.06334514170885086,
"learning_rate": 7.571466228923115e-05,
"loss": 0.1338,
"step": 195
},
{
"epoch": 1.6243654822335025,
"grad_norm": 0.06367149949073792,
"learning_rate": 7.417483499714589e-05,
"loss": 0.1306,
"step": 200
},
{
"epoch": 1.6243654822335025,
"eval_loss": 0.1474769562482834,
"eval_runtime": 570.894,
"eval_samples_per_second": 1.752,
"eval_steps_per_second": 0.219,
"step": 200
},
{
"epoch": 1.6649746192893402,
"grad_norm": 0.08058533072471619,
"learning_rate": 7.260447867371709e-05,
"loss": 0.1342,
"step": 205
},
{
"epoch": 1.7055837563451777,
"grad_norm": 0.07283538579940796,
"learning_rate": 7.100557643309732e-05,
"loss": 0.1247,
"step": 210
},
{
"epoch": 1.7461928934010151,
"grad_norm": 0.08428559452295303,
"learning_rate": 6.938014743846285e-05,
"loss": 0.1278,
"step": 215
},
{
"epoch": 1.7868020304568528,
"grad_norm": 0.08377284556627274,
"learning_rate": 6.773024435212678e-05,
"loss": 0.1312,
"step": 220
},
{
"epoch": 1.8274111675126905,
"grad_norm": 0.10298043489456177,
"learning_rate": 6.605795074334794e-05,
"loss": 0.1304,
"step": 225
},
{
"epoch": 1.868020304568528,
"grad_norm": 0.09377053380012512,
"learning_rate": 6.436537845710903e-05,
"loss": 0.1303,
"step": 230
},
{
"epoch": 1.9086294416243654,
"grad_norm": 0.11010950058698654,
"learning_rate": 6.265466494718732e-05,
"loss": 0.1163,
"step": 235
},
{
"epoch": 1.9492385786802031,
"grad_norm": 0.17498579621315002,
"learning_rate": 6.092797057688495e-05,
"loss": 0.1124,
"step": 240
},
{
"epoch": 1.9898477157360406,
"grad_norm": 0.09446436911821365,
"learning_rate": 5.918747589082853e-05,
"loss": 0.1193,
"step": 245
},
{
"epoch": 2.030456852791878,
"grad_norm": 0.09566423296928406,
"learning_rate": 5.7435378861282585e-05,
"loss": 0.1252,
"step": 250
},
{
"epoch": 2.030456852791878,
"eval_loss": 0.14643818140029907,
"eval_runtime": 570.7851,
"eval_samples_per_second": 1.752,
"eval_steps_per_second": 0.219,
"step": 250
},
{
"epoch": 2.0710659898477157,
"grad_norm": 0.10885192453861237,
"learning_rate": 5.567389211245485e-05,
"loss": 0.1152,
"step": 255
},
{
"epoch": 2.1116751269035534,
"grad_norm": 0.09853996336460114,
"learning_rate": 5.390524012629824e-05,
"loss": 0.1248,
"step": 260
},
{
"epoch": 2.152284263959391,
"grad_norm": 0.0892142653465271,
"learning_rate": 5.2131656433338506e-05,
"loss": 0.1207,
"step": 265
},
{
"epoch": 2.1928934010152283,
"grad_norm": 0.09116163849830627,
"learning_rate": 5.035538079207488e-05,
"loss": 0.1143,
"step": 270
},
{
"epoch": 2.233502538071066,
"grad_norm": 0.09667850285768509,
"learning_rate": 4.857865636051585e-05,
"loss": 0.1155,
"step": 275
},
{
"epoch": 2.2741116751269037,
"grad_norm": 0.1532243937253952,
"learning_rate": 4.6803726863421725e-05,
"loss": 0.1146,
"step": 280
},
{
"epoch": 2.314720812182741,
"grad_norm": 0.11655127257108688,
"learning_rate": 4.503283375883165e-05,
"loss": 0.111,
"step": 285
},
{
"epoch": 2.3553299492385786,
"grad_norm": 0.1412302553653717,
"learning_rate": 4.326821340745304e-05,
"loss": 0.1102,
"step": 290
},
{
"epoch": 2.3959390862944163,
"grad_norm": 0.10279002040624619,
"learning_rate": 4.151209424848819e-05,
"loss": 0.112,
"step": 295
},
{
"epoch": 2.436548223350254,
"grad_norm": 0.08668510615825653,
"learning_rate": 3.976669398546451e-05,
"loss": 0.1197,
"step": 300
},
{
"epoch": 2.436548223350254,
"eval_loss": 0.14324025809764862,
"eval_runtime": 570.6919,
"eval_samples_per_second": 1.752,
"eval_steps_per_second": 0.219,
"step": 300
},
{
"epoch": 2.4771573604060912,
"grad_norm": 0.0965413898229599,
"learning_rate": 3.803421678562213e-05,
"loss": 0.1169,
"step": 305
},
{
"epoch": 2.517766497461929,
"grad_norm": 0.07902427762746811,
"learning_rate": 3.631685049639586e-05,
"loss": 0.1084,
"step": 310
},
{
"epoch": 2.5583756345177666,
"grad_norm": 0.0779964029788971,
"learning_rate": 3.461676388250651e-05,
"loss": 0.1092,
"step": 315
},
{
"epoch": 2.598984771573604,
"grad_norm": 0.08506277203559875,
"learning_rate": 3.293610388715048e-05,
"loss": 0.1116,
"step": 320
},
{
"epoch": 2.6395939086294415,
"grad_norm": 0.1012549176812172,
"learning_rate": 3.127699292074683e-05,
"loss": 0.118,
"step": 325
},
{
"epoch": 2.6802030456852792,
"grad_norm": 0.13339918851852417,
"learning_rate": 2.964152618066508e-05,
"loss": 0.1043,
"step": 330
},
{
"epoch": 2.720812182741117,
"grad_norm": 0.10142350941896439,
"learning_rate": 2.8031769005319147e-05,
"loss": 0.1103,
"step": 335
},
{
"epoch": 2.761421319796954,
"grad_norm": 0.08831272274255753,
"learning_rate": 2.6449754265968264e-05,
"loss": 0.1125,
"step": 340
},
{
"epoch": 2.802030456852792,
"grad_norm": 0.07664326578378677,
"learning_rate": 2.4897479799518796e-05,
"loss": 0.1069,
"step": 345
},
{
"epoch": 2.8426395939086295,
"grad_norm": 0.08216122537851334,
"learning_rate": 2.3376905885569182e-05,
"loss": 0.1044,
"step": 350
},
{
"epoch": 2.8426395939086295,
"eval_loss": 0.1425444483757019,
"eval_runtime": 570.665,
"eval_samples_per_second": 1.752,
"eval_steps_per_second": 0.219,
"step": 350
},
{
"epoch": 2.885279187817259,
"grad_norm": 0.12189479172229767,
"learning_rate": 2.1889952770883643e-05,
"loss": 0.1051,
"step": 355
},
{
"epoch": 2.9258883248730965,
"grad_norm": 0.09821608662605286,
"learning_rate": 2.043849824442124e-05,
"loss": 0.1066,
"step": 360
},
{
"epoch": 2.966497461928934,
"grad_norm": 0.0908147543668747,
"learning_rate": 1.9024375265982384e-05,
"loss": 0.1076,
"step": 365
},
{
"epoch": 3.0071065989847714,
"grad_norm": 0.11563821136951447,
"learning_rate": 1.764936965146773e-05,
"loss": 0.1134,
"step": 370
},
{
"epoch": 3.047715736040609,
"grad_norm": 0.10024692863225937,
"learning_rate": 1.631521781767214e-05,
"loss": 0.1081,
"step": 375
},
{
"epoch": 3.0883248730964468,
"grad_norm": 0.11522582173347473,
"learning_rate": 1.502360458946232e-05,
"loss": 0.0996,
"step": 380
},
{
"epoch": 3.1289340101522845,
"grad_norm": 0.1005687341094017,
"learning_rate": 1.3776161072106702e-05,
"loss": 0.1155,
"step": 385
},
{
"epoch": 3.1695431472081217,
"grad_norm": 0.10551727563142776,
"learning_rate": 1.257446259144494e-05,
"loss": 0.1184,
"step": 390
},
{
"epoch": 3.2101522842639594,
"grad_norm": 0.09149405360221863,
"learning_rate": 1.1420026704498077e-05,
"loss": 0.1071,
"step": 395
},
{
"epoch": 3.250761421319797,
"grad_norm": 0.08743679523468018,
"learning_rate": 1.031431128303153e-05,
"loss": 0.0984,
"step": 400
},
{
"epoch": 3.250761421319797,
"eval_loss": 0.14250527322292328,
"eval_runtime": 575.342,
"eval_samples_per_second": 1.738,
"eval_steps_per_second": 0.217,
"step": 400
},
{
"epoch": 3.2913705583756343,
"grad_norm": 0.1152808740735054,
"learning_rate": 9.258712672491415e-06,
"loss": 0.1075,
"step": 405
},
{
"epoch": 3.331979695431472,
"grad_norm": 0.0870693102478981,
"learning_rate": 8.254563928638893e-06,
"loss": 0.1055,
"step": 410
},
{
"epoch": 3.3725888324873097,
"grad_norm": 0.1064496636390686,
"learning_rate": 7.3031331341093915e-06,
"loss": 0.1162,
"step": 415
},
{
"epoch": 3.4131979695431474,
"grad_norm": 0.10074327141046524,
"learning_rate": 6.405621797022848e-06,
"loss": 0.1032,
"step": 420
},
{
"epoch": 3.4538071065989846,
"grad_norm": 0.11636592447757721,
"learning_rate": 5.563163333667099e-06,
"loss": 0.1057,
"step": 425
},
{
"epoch": 3.4944162436548223,
"grad_norm": 0.08998982608318329,
"learning_rate": 4.776821637170526e-06,
"loss": 0.105,
"step": 430
},
{
"epoch": 3.53502538071066,
"grad_norm": 0.09578435868024826,
"learning_rate": 4.047589733971646e-06,
"loss": 0.1032,
"step": 435
},
{
"epoch": 3.575634517766497,
"grad_norm": 0.10539178550243378,
"learning_rate": 3.376388529782215e-06,
"loss": 0.1046,
"step": 440
},
{
"epoch": 3.616243654822335,
"grad_norm": 0.1038966253399849,
"learning_rate": 2.7640656466274782e-06,
"loss": 0.0981,
"step": 445
},
{
"epoch": 3.6568527918781726,
"grad_norm": 0.09623471647500992,
"learning_rate": 2.2113943524323167e-06,
"loss": 0.1014,
"step": 450
},
{
"epoch": 3.6568527918781726,
"eval_loss": 0.1424967348575592,
"eval_runtime": 575.2181,
"eval_samples_per_second": 1.738,
"eval_steps_per_second": 0.217,
"step": 450
}
],
"logging_steps": 5,
"max_steps": 492,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.8705510622704435e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}