Qwen2.5-14b-bnk-lora-10 / trainer_state.json
Imran1's picture
Upload folder using huggingface_hub
f49248d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.788508830762352,
"eval_steps": 50,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017885088307623517,
"grad_norm": 0.27167433500289917,
"learning_rate": 5.9523809523809525e-06,
"loss": 1.9206,
"step": 10
},
{
"epoch": 0.035770176615247035,
"grad_norm": 0.2490677386522293,
"learning_rate": 1.1904761904761905e-05,
"loss": 1.8864,
"step": 20
},
{
"epoch": 0.05365526492287056,
"grad_norm": 0.23479315638542175,
"learning_rate": 1.785714285714286e-05,
"loss": 1.8092,
"step": 30
},
{
"epoch": 0.07154035323049407,
"grad_norm": 0.2516893446445465,
"learning_rate": 2.380952380952381e-05,
"loss": 1.6878,
"step": 40
},
{
"epoch": 0.0894254415381176,
"grad_norm": 0.30081456899642944,
"learning_rate": 2.9761904761904762e-05,
"loss": 1.4673,
"step": 50
},
{
"epoch": 0.0894254415381176,
"eval_loss": 1.3079078197479248,
"eval_runtime": 340.2145,
"eval_samples_per_second": 5.843,
"eval_steps_per_second": 2.922,
"step": 50
},
{
"epoch": 0.10731052984574112,
"grad_norm": 0.40885159373283386,
"learning_rate": 3.571428571428572e-05,
"loss": 1.1216,
"step": 60
},
{
"epoch": 0.12519561815336464,
"grad_norm": 0.34105631709098816,
"learning_rate": 4.166666666666667e-05,
"loss": 0.623,
"step": 70
},
{
"epoch": 0.14308070646098814,
"grad_norm": 0.08575189858675003,
"learning_rate": 4.761904761904762e-05,
"loss": 0.321,
"step": 80
},
{
"epoch": 0.16096579476861167,
"grad_norm": 0.06351594626903534,
"learning_rate": 5.3571428571428575e-05,
"loss": 0.2671,
"step": 90
},
{
"epoch": 0.1788508830762352,
"grad_norm": 0.044943634420633316,
"learning_rate": 5.9523809523809524e-05,
"loss": 0.2634,
"step": 100
},
{
"epoch": 0.1788508830762352,
"eval_loss": 0.2529277801513672,
"eval_runtime": 340.0791,
"eval_samples_per_second": 5.846,
"eval_steps_per_second": 2.923,
"step": 100
},
{
"epoch": 0.1967359713838587,
"grad_norm": 0.04525942727923393,
"learning_rate": 6.547619047619048e-05,
"loss": 0.2476,
"step": 110
},
{
"epoch": 0.21462105969148224,
"grad_norm": 0.03497561439871788,
"learning_rate": 7.142857142857143e-05,
"loss": 0.2364,
"step": 120
},
{
"epoch": 0.23250614799910574,
"grad_norm": 0.03806141018867493,
"learning_rate": 7.738095238095239e-05,
"loss": 0.2375,
"step": 130
},
{
"epoch": 0.25039123630672927,
"grad_norm": 0.03912373259663582,
"learning_rate": 8.333333333333334e-05,
"loss": 0.2458,
"step": 140
},
{
"epoch": 0.2682763246143528,
"grad_norm": 0.038528475910425186,
"learning_rate": 8.92857142857143e-05,
"loss": 0.2356,
"step": 150
},
{
"epoch": 0.2682763246143528,
"eval_loss": 0.2338634878396988,
"eval_runtime": 340.1092,
"eval_samples_per_second": 5.845,
"eval_steps_per_second": 2.923,
"step": 150
},
{
"epoch": 0.2861614129219763,
"grad_norm": 0.033717814832925797,
"learning_rate": 9.523809523809524e-05,
"loss": 0.2328,
"step": 160
},
{
"epoch": 0.3040465012295998,
"grad_norm": 0.034611135721206665,
"learning_rate": 9.98674618952949e-05,
"loss": 0.232,
"step": 170
},
{
"epoch": 0.32193158953722334,
"grad_norm": 0.03772770240902901,
"learning_rate": 9.920477137176939e-05,
"loss": 0.2321,
"step": 180
},
{
"epoch": 0.33981667784484687,
"grad_norm": 0.03764208406209946,
"learning_rate": 9.854208084824388e-05,
"loss": 0.2252,
"step": 190
},
{
"epoch": 0.3577017661524704,
"grad_norm": 0.04233132675290108,
"learning_rate": 9.787939032471835e-05,
"loss": 0.2255,
"step": 200
},
{
"epoch": 0.3577017661524704,
"eval_loss": 0.22211842238903046,
"eval_runtime": 340.2159,
"eval_samples_per_second": 5.843,
"eval_steps_per_second": 2.922,
"step": 200
},
{
"epoch": 0.3755868544600939,
"grad_norm": 0.04666154831647873,
"learning_rate": 9.721669980119285e-05,
"loss": 0.2221,
"step": 210
},
{
"epoch": 0.3934719427677174,
"grad_norm": 0.05546770244836807,
"learning_rate": 9.655400927766734e-05,
"loss": 0.2241,
"step": 220
},
{
"epoch": 0.41135703107534094,
"grad_norm": 0.04022465646266937,
"learning_rate": 9.589131875414182e-05,
"loss": 0.2144,
"step": 230
},
{
"epoch": 0.42924211938296447,
"grad_norm": 0.041572101414203644,
"learning_rate": 9.52286282306163e-05,
"loss": 0.2113,
"step": 240
},
{
"epoch": 0.44712720769058795,
"grad_norm": 0.039912398904561996,
"learning_rate": 9.45659377070908e-05,
"loss": 0.2143,
"step": 250
},
{
"epoch": 0.44712720769058795,
"eval_loss": 0.21348513662815094,
"eval_runtime": 339.9419,
"eval_samples_per_second": 5.848,
"eval_steps_per_second": 2.924,
"step": 250
},
{
"epoch": 0.4650122959982115,
"grad_norm": 0.04531717300415039,
"learning_rate": 9.390324718356528e-05,
"loss": 0.2078,
"step": 260
},
{
"epoch": 0.482897384305835,
"grad_norm": 0.05076432228088379,
"learning_rate": 9.324055666003977e-05,
"loss": 0.2105,
"step": 270
},
{
"epoch": 0.5007824726134585,
"grad_norm": 0.05077740550041199,
"learning_rate": 9.257786613651425e-05,
"loss": 0.2102,
"step": 280
},
{
"epoch": 0.518667560921082,
"grad_norm": 0.05294053629040718,
"learning_rate": 9.191517561298874e-05,
"loss": 0.2116,
"step": 290
},
{
"epoch": 0.5365526492287056,
"grad_norm": 0.04222816973924637,
"learning_rate": 9.125248508946323e-05,
"loss": 0.2004,
"step": 300
},
{
"epoch": 0.5365526492287056,
"eval_loss": 0.205940380692482,
"eval_runtime": 340.0032,
"eval_samples_per_second": 5.847,
"eval_steps_per_second": 2.924,
"step": 300
},
{
"epoch": 0.5544377375363291,
"grad_norm": 0.06563922017812729,
"learning_rate": 9.058979456593771e-05,
"loss": 0.2024,
"step": 310
},
{
"epoch": 0.5723228258439526,
"grad_norm": 0.06504207849502563,
"learning_rate": 8.99271040424122e-05,
"loss": 0.2076,
"step": 320
},
{
"epoch": 0.5902079141515761,
"grad_norm": 0.055658888071775436,
"learning_rate": 8.926441351888669e-05,
"loss": 0.2026,
"step": 330
},
{
"epoch": 0.6080930024591996,
"grad_norm": 0.0635204091668129,
"learning_rate": 8.860172299536117e-05,
"loss": 0.199,
"step": 340
},
{
"epoch": 0.6259780907668232,
"grad_norm": 0.06404862552881241,
"learning_rate": 8.793903247183566e-05,
"loss": 0.2048,
"step": 350
},
{
"epoch": 0.6259780907668232,
"eval_loss": 0.1987379640340805,
"eval_runtime": 339.7556,
"eval_samples_per_second": 5.851,
"eval_steps_per_second": 2.926,
"step": 350
},
{
"epoch": 0.6438631790744467,
"grad_norm": 0.061456065624952316,
"learning_rate": 8.727634194831014e-05,
"loss": 0.2002,
"step": 360
},
{
"epoch": 0.6617482673820702,
"grad_norm": 0.059758663177490234,
"learning_rate": 8.661365142478463e-05,
"loss": 0.1928,
"step": 370
},
{
"epoch": 0.6796333556896937,
"grad_norm": 0.07035389542579651,
"learning_rate": 8.595096090125912e-05,
"loss": 0.1982,
"step": 380
},
{
"epoch": 0.6975184439973172,
"grad_norm": 0.05884738266468048,
"learning_rate": 8.52882703777336e-05,
"loss": 0.199,
"step": 390
},
{
"epoch": 0.7154035323049408,
"grad_norm": 0.07666763663291931,
"learning_rate": 8.462557985420809e-05,
"loss": 0.1901,
"step": 400
},
{
"epoch": 0.7154035323049408,
"eval_loss": 0.19112130999565125,
"eval_runtime": 339.7085,
"eval_samples_per_second": 5.852,
"eval_steps_per_second": 2.926,
"step": 400
},
{
"epoch": 0.7332886206125643,
"grad_norm": 0.07783893495798111,
"learning_rate": 8.396288933068258e-05,
"loss": 0.1903,
"step": 410
},
{
"epoch": 0.7511737089201878,
"grad_norm": 0.06356322020292282,
"learning_rate": 8.330019880715706e-05,
"loss": 0.189,
"step": 420
},
{
"epoch": 0.7690587972278113,
"grad_norm": 0.06573337316513062,
"learning_rate": 8.263750828363155e-05,
"loss": 0.1921,
"step": 430
},
{
"epoch": 0.7869438855354348,
"grad_norm": 0.0807296559214592,
"learning_rate": 8.197481776010603e-05,
"loss": 0.1826,
"step": 440
},
{
"epoch": 0.8048289738430584,
"grad_norm": 0.07666200399398804,
"learning_rate": 8.131212723658052e-05,
"loss": 0.1835,
"step": 450
},
{
"epoch": 0.8048289738430584,
"eval_loss": 0.18313755095005035,
"eval_runtime": 340.1916,
"eval_samples_per_second": 5.844,
"eval_steps_per_second": 2.922,
"step": 450
},
{
"epoch": 0.8227140621506819,
"grad_norm": 0.09144561737775803,
"learning_rate": 8.064943671305501e-05,
"loss": 0.1813,
"step": 460
},
{
"epoch": 0.8405991504583054,
"grad_norm": 0.10572754591703415,
"learning_rate": 7.99867461895295e-05,
"loss": 0.1776,
"step": 470
},
{
"epoch": 0.8584842387659289,
"grad_norm": 0.08453221619129181,
"learning_rate": 7.932405566600398e-05,
"loss": 0.1809,
"step": 480
},
{
"epoch": 0.8763693270735524,
"grad_norm": 0.08854762464761734,
"learning_rate": 7.866136514247847e-05,
"loss": 0.176,
"step": 490
},
{
"epoch": 0.8942544153811759,
"grad_norm": 0.09418456256389618,
"learning_rate": 7.799867461895295e-05,
"loss": 0.1749,
"step": 500
},
{
"epoch": 0.8942544153811759,
"eval_loss": 0.17343606054782867,
"eval_runtime": 340.1945,
"eval_samples_per_second": 5.844,
"eval_steps_per_second": 2.922,
"step": 500
},
{
"epoch": 0.9121395036887995,
"grad_norm": 0.08916131407022476,
"learning_rate": 7.733598409542744e-05,
"loss": 0.1705,
"step": 510
},
{
"epoch": 0.930024591996423,
"grad_norm": 0.09915047138929367,
"learning_rate": 7.667329357190193e-05,
"loss": 0.166,
"step": 520
},
{
"epoch": 0.9479096803040465,
"grad_norm": 0.09700702130794525,
"learning_rate": 7.601060304837641e-05,
"loss": 0.1675,
"step": 530
},
{
"epoch": 0.96579476861167,
"grad_norm": 0.13390330970287323,
"learning_rate": 7.53479125248509e-05,
"loss": 0.1685,
"step": 540
},
{
"epoch": 0.9836798569192935,
"grad_norm": 0.10853017121553421,
"learning_rate": 7.468522200132538e-05,
"loss": 0.1682,
"step": 550
},
{
"epoch": 0.9836798569192935,
"eval_loss": 0.16290181875228882,
"eval_runtime": 340.1443,
"eval_samples_per_second": 5.845,
"eval_steps_per_second": 2.922,
"step": 550
},
{
"epoch": 1.001564945226917,
"grad_norm": 0.11536076664924622,
"learning_rate": 7.402253147779987e-05,
"loss": 0.1601,
"step": 560
},
{
"epoch": 1.0194500335345407,
"grad_norm": 0.13258640468120575,
"learning_rate": 7.335984095427436e-05,
"loss": 0.1572,
"step": 570
},
{
"epoch": 1.037335121842164,
"grad_norm": 0.12075567245483398,
"learning_rate": 7.269715043074884e-05,
"loss": 0.1502,
"step": 580
},
{
"epoch": 1.0552202101497876,
"grad_norm": 0.1262330263853073,
"learning_rate": 7.203445990722333e-05,
"loss": 0.1507,
"step": 590
},
{
"epoch": 1.0731052984574112,
"grad_norm": 0.13084138929843903,
"learning_rate": 7.137176938369782e-05,
"loss": 0.1507,
"step": 600
},
{
"epoch": 1.0731052984574112,
"eval_loss": 0.15283866226673126,
"eval_runtime": 340.005,
"eval_samples_per_second": 5.847,
"eval_steps_per_second": 2.923,
"step": 600
},
{
"epoch": 1.0909903867650346,
"grad_norm": 0.13570532202720642,
"learning_rate": 7.07090788601723e-05,
"loss": 0.1476,
"step": 610
},
{
"epoch": 1.1088754750726582,
"grad_norm": 0.12759168446063995,
"learning_rate": 7.004638833664679e-05,
"loss": 0.1455,
"step": 620
},
{
"epoch": 1.1267605633802817,
"grad_norm": 0.15715628862380981,
"learning_rate": 6.938369781312127e-05,
"loss": 0.1465,
"step": 630
},
{
"epoch": 1.144645651687905,
"grad_norm": 0.15863963961601257,
"learning_rate": 6.872100728959576e-05,
"loss": 0.1375,
"step": 640
},
{
"epoch": 1.1625307399955287,
"grad_norm": 0.14566145837306976,
"learning_rate": 6.805831676607025e-05,
"loss": 0.1378,
"step": 650
},
{
"epoch": 1.1625307399955287,
"eval_loss": 0.14257138967514038,
"eval_runtime": 340.2975,
"eval_samples_per_second": 5.842,
"eval_steps_per_second": 2.921,
"step": 650
},
{
"epoch": 1.1804158283031523,
"grad_norm": 0.14659343659877777,
"learning_rate": 6.739562624254473e-05,
"loss": 0.1389,
"step": 660
},
{
"epoch": 1.1983009166107759,
"grad_norm": 0.1562536507844925,
"learning_rate": 6.673293571901922e-05,
"loss": 0.1368,
"step": 670
},
{
"epoch": 1.2161860049183992,
"grad_norm": 0.16155236959457397,
"learning_rate": 6.60702451954937e-05,
"loss": 0.1367,
"step": 680
},
{
"epoch": 1.2340710932260228,
"grad_norm": 0.15700899064540863,
"learning_rate": 6.540755467196819e-05,
"loss": 0.133,
"step": 690
},
{
"epoch": 1.2519561815336462,
"grad_norm": 0.14563362300395966,
"learning_rate": 6.474486414844268e-05,
"loss": 0.1304,
"step": 700
},
{
"epoch": 1.2519561815336462,
"eval_loss": 0.1325962394475937,
"eval_runtime": 340.3546,
"eval_samples_per_second": 5.841,
"eval_steps_per_second": 2.92,
"step": 700
},
{
"epoch": 1.2698412698412698,
"grad_norm": 0.15527787804603577,
"learning_rate": 6.408217362491717e-05,
"loss": 0.1309,
"step": 710
},
{
"epoch": 1.2877263581488934,
"grad_norm": 0.17507752776145935,
"learning_rate": 6.341948310139165e-05,
"loss": 0.1276,
"step": 720
},
{
"epoch": 1.305611446456517,
"grad_norm": 0.18257690966129303,
"learning_rate": 6.275679257786614e-05,
"loss": 0.1294,
"step": 730
},
{
"epoch": 1.3234965347641405,
"grad_norm": 0.1934031993150711,
"learning_rate": 6.209410205434062e-05,
"loss": 0.1236,
"step": 740
},
{
"epoch": 1.341381623071764,
"grad_norm": 0.1760103851556778,
"learning_rate": 6.143141153081511e-05,
"loss": 0.122,
"step": 750
},
{
"epoch": 1.341381623071764,
"eval_loss": 0.12294474244117737,
"eval_runtime": 340.3675,
"eval_samples_per_second": 5.841,
"eval_steps_per_second": 2.92,
"step": 750
},
{
"epoch": 1.3592667113793875,
"grad_norm": 0.18259243667125702,
"learning_rate": 6.0768721007289597e-05,
"loss": 0.1191,
"step": 760
},
{
"epoch": 1.3771517996870108,
"grad_norm": 0.19982369244098663,
"learning_rate": 6.010603048376409e-05,
"loss": 0.1179,
"step": 770
},
{
"epoch": 1.3950368879946344,
"grad_norm": 0.19513551890850067,
"learning_rate": 5.9443339960238576e-05,
"loss": 0.1136,
"step": 780
},
{
"epoch": 1.412921976302258,
"grad_norm": 0.20091329514980316,
"learning_rate": 5.8780649436713056e-05,
"loss": 0.1151,
"step": 790
},
{
"epoch": 1.4308070646098816,
"grad_norm": 0.17873592674732208,
"learning_rate": 5.811795891318754e-05,
"loss": 0.1133,
"step": 800
},
{
"epoch": 1.4308070646098816,
"eval_loss": 0.11276984959840775,
"eval_runtime": 340.2802,
"eval_samples_per_second": 5.842,
"eval_steps_per_second": 2.921,
"step": 800
},
{
"epoch": 1.448692152917505,
"grad_norm": 0.1767304390668869,
"learning_rate": 5.745526838966203e-05,
"loss": 0.1113,
"step": 810
},
{
"epoch": 1.4665772412251286,
"grad_norm": 0.19418853521347046,
"learning_rate": 5.679257786613652e-05,
"loss": 0.1062,
"step": 820
},
{
"epoch": 1.4844623295327521,
"grad_norm": 0.18987976014614105,
"learning_rate": 5.6129887342611e-05,
"loss": 0.1067,
"step": 830
},
{
"epoch": 1.5023474178403755,
"grad_norm": 0.19089455902576447,
"learning_rate": 5.546719681908549e-05,
"loss": 0.1059,
"step": 840
},
{
"epoch": 1.520232506147999,
"grad_norm": 0.2054363191127777,
"learning_rate": 5.480450629555997e-05,
"loss": 0.1052,
"step": 850
},
{
"epoch": 1.520232506147999,
"eval_loss": 0.10367103666067123,
"eval_runtime": 340.2498,
"eval_samples_per_second": 5.843,
"eval_steps_per_second": 2.921,
"step": 850
},
{
"epoch": 1.5381175944556227,
"grad_norm": 0.2290847897529602,
"learning_rate": 5.4141815772034466e-05,
"loss": 0.0994,
"step": 860
},
{
"epoch": 1.5560026827632463,
"grad_norm": 0.20180433988571167,
"learning_rate": 5.3479125248508946e-05,
"loss": 0.1002,
"step": 870
},
{
"epoch": 1.5738877710708696,
"grad_norm": 0.2535998225212097,
"learning_rate": 5.281643472498343e-05,
"loss": 0.0962,
"step": 880
},
{
"epoch": 1.5917728593784932,
"grad_norm": 0.2272896021604538,
"learning_rate": 5.215374420145792e-05,
"loss": 0.0937,
"step": 890
},
{
"epoch": 1.6096579476861166,
"grad_norm": 0.2379264086484909,
"learning_rate": 5.149105367793241e-05,
"loss": 0.0984,
"step": 900
},
{
"epoch": 1.6096579476861166,
"eval_loss": 0.09358672797679901,
"eval_runtime": 340.1031,
"eval_samples_per_second": 5.845,
"eval_steps_per_second": 2.923,
"step": 900
},
{
"epoch": 1.6275430359937402,
"grad_norm": 0.20866374671459198,
"learning_rate": 5.08283631544069e-05,
"loss": 0.092,
"step": 910
},
{
"epoch": 1.6454281243013638,
"grad_norm": 0.23577432334423065,
"learning_rate": 5.016567263088138e-05,
"loss": 0.0921,
"step": 920
},
{
"epoch": 1.6633132126089873,
"grad_norm": 0.22456888854503632,
"learning_rate": 4.9502982107355864e-05,
"loss": 0.0888,
"step": 930
},
{
"epoch": 1.681198300916611,
"grad_norm": 0.2216615080833435,
"learning_rate": 4.884029158383036e-05,
"loss": 0.0874,
"step": 940
},
{
"epoch": 1.6990833892242343,
"grad_norm": 0.23125343024730682,
"learning_rate": 4.8177601060304836e-05,
"loss": 0.0842,
"step": 950
},
{
"epoch": 1.6990833892242343,
"eval_loss": 0.08562839031219482,
"eval_runtime": 339.6325,
"eval_samples_per_second": 5.853,
"eval_steps_per_second": 2.927,
"step": 950
},
{
"epoch": 1.7169684775318577,
"grad_norm": 0.22048653662204742,
"learning_rate": 4.751491053677933e-05,
"loss": 0.087,
"step": 960
},
{
"epoch": 1.7348535658394812,
"grad_norm": 0.23853205144405365,
"learning_rate": 4.685222001325381e-05,
"loss": 0.0789,
"step": 970
},
{
"epoch": 1.7527386541471048,
"grad_norm": 0.23902659118175507,
"learning_rate": 4.61895294897283e-05,
"loss": 0.0771,
"step": 980
},
{
"epoch": 1.7706237424547284,
"grad_norm": 0.2581934332847595,
"learning_rate": 4.552683896620278e-05,
"loss": 0.0782,
"step": 990
},
{
"epoch": 1.788508830762352,
"grad_norm": 0.23264268040657043,
"learning_rate": 4.4864148442677275e-05,
"loss": 0.0767,
"step": 1000
},
{
"epoch": 1.788508830762352,
"eval_loss": 0.07814358174800873,
"eval_runtime": 339.6457,
"eval_samples_per_second": 5.853,
"eval_steps_per_second": 2.927,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 1677,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.142888239316599e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}