gliner-biomed-bi-fused-small-v1.0 / trainer_state.json
Ihor's picture
Upload folder using huggingface_hub
e3a5235 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7711654268508679,
"eval_steps": 500,
"global_step": 10000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017711654268508677,
"grad_norm": 4225.109375,
"learning_rate": 1.0000000000000002e-06,
"loss": 308.9427,
"step": 100
},
{
"epoch": 0.035423308537017355,
"grad_norm": 27728.119140625,
"learning_rate": 2.0000000000000003e-06,
"loss": 355.2513,
"step": 200
},
{
"epoch": 0.053134962805526036,
"grad_norm": 830.27294921875,
"learning_rate": 3e-06,
"loss": 161.2664,
"step": 300
},
{
"epoch": 0.07084661707403471,
"grad_norm": 127.9210205078125,
"learning_rate": 4.000000000000001e-06,
"loss": 110.5367,
"step": 400
},
{
"epoch": 0.08855827134254339,
"grad_norm": 306.7760314941406,
"learning_rate": 5e-06,
"loss": 164.8705,
"step": 500
},
{
"epoch": 0.10626992561105207,
"grad_norm": 297.30364990234375,
"learning_rate": 6e-06,
"loss": 297.7952,
"step": 600
},
{
"epoch": 0.12398157987956075,
"grad_norm": 1049.6143798828125,
"learning_rate": 7e-06,
"loss": 98.5203,
"step": 700
},
{
"epoch": 0.14169323414806942,
"grad_norm": 376.938232421875,
"learning_rate": 8.000000000000001e-06,
"loss": 168.0427,
"step": 800
},
{
"epoch": 0.1594048884165781,
"grad_norm": 503.4874267578125,
"learning_rate": 9e-06,
"loss": 185.119,
"step": 900
},
{
"epoch": 0.17711654268508678,
"grad_norm": 799.0496826171875,
"learning_rate": 1e-05,
"loss": 70.9623,
"step": 1000
},
{
"epoch": 0.19482819695359546,
"grad_norm": 293.7897033691406,
"learning_rate": 9.99695413509548e-06,
"loss": 149.6974,
"step": 1100
},
{
"epoch": 0.21253985122210414,
"grad_norm": 940.9512939453125,
"learning_rate": 9.987820251299121e-06,
"loss": 86.5666,
"step": 1200
},
{
"epoch": 0.23025150549061282,
"grad_norm": 900.4419555664062,
"learning_rate": 9.972609476841368e-06,
"loss": 97.8882,
"step": 1300
},
{
"epoch": 0.2479631597591215,
"grad_norm": 484.7297058105469,
"learning_rate": 9.951340343707852e-06,
"loss": 139.5794,
"step": 1400
},
{
"epoch": 0.26567481402763016,
"grad_norm": 466.8398742675781,
"learning_rate": 9.924038765061042e-06,
"loss": 135.3206,
"step": 1500
},
{
"epoch": 0.28338646829613884,
"grad_norm": 860.2133178710938,
"learning_rate": 9.890738003669029e-06,
"loss": 89.5288,
"step": 1600
},
{
"epoch": 0.3010981225646475,
"grad_norm": 1099.1881103515625,
"learning_rate": 9.851478631379982e-06,
"loss": 91.6384,
"step": 1700
},
{
"epoch": 0.3188097768331562,
"grad_norm": 250.72775268554688,
"learning_rate": 9.806308479691595e-06,
"loss": 154.9306,
"step": 1800
},
{
"epoch": 0.3365214311016649,
"grad_norm": 12806.482421875,
"learning_rate": 9.755282581475769e-06,
"loss": 191.4907,
"step": 1900
},
{
"epoch": 0.35423308537017356,
"grad_norm": 248.52146911621094,
"learning_rate": 9.698463103929542e-06,
"loss": 99.3807,
"step": 2000
},
{
"epoch": 0.37194473963868224,
"grad_norm": 591.319091796875,
"learning_rate": 9.635919272833938e-06,
"loss": 107.2973,
"step": 2100
},
{
"epoch": 0.3896563939071909,
"grad_norm": 1257.0123291015625,
"learning_rate": 9.567727288213005e-06,
"loss": 121.5465,
"step": 2200
},
{
"epoch": 0.4073680481756996,
"grad_norm": 501.8513488769531,
"learning_rate": 9.493970231495836e-06,
"loss": 130.1157,
"step": 2300
},
{
"epoch": 0.4250797024442083,
"grad_norm": 4555.56787109375,
"learning_rate": 9.414737964294636e-06,
"loss": 61.3746,
"step": 2400
},
{
"epoch": 0.44279135671271697,
"grad_norm": 1030.1964111328125,
"learning_rate": 9.330127018922195e-06,
"loss": 123.0796,
"step": 2500
},
{
"epoch": 0.46050301098122565,
"grad_norm": 788.5494384765625,
"learning_rate": 9.24024048078213e-06,
"loss": 92.3144,
"step": 2600
},
{
"epoch": 0.4782146652497343,
"grad_norm": 486.0594177246094,
"learning_rate": 9.145187862775208e-06,
"loss": 137.4813,
"step": 2700
},
{
"epoch": 0.495926319518243,
"grad_norm": 648.8170776367188,
"learning_rate": 9.045084971874738e-06,
"loss": 106.6566,
"step": 2800
},
{
"epoch": 0.5136379737867517,
"grad_norm": 1567.68505859375,
"learning_rate": 8.94005376803361e-06,
"loss": 90.1854,
"step": 2900
},
{
"epoch": 0.5313496280552603,
"grad_norm": 631.2234497070312,
"learning_rate": 8.83022221559489e-06,
"loss": 143.8329,
"step": 3000
},
{
"epoch": 0.549061282323769,
"grad_norm": 327.2228088378906,
"learning_rate": 8.715724127386971e-06,
"loss": 126.8167,
"step": 3100
},
{
"epoch": 0.5667729365922777,
"grad_norm": 524.2578125,
"learning_rate": 8.596699001693257e-06,
"loss": 135.6904,
"step": 3200
},
{
"epoch": 0.5844845908607864,
"grad_norm": 587.7288208007812,
"learning_rate": 8.473291852294986e-06,
"loss": 144.2552,
"step": 3300
},
{
"epoch": 0.602196245129295,
"grad_norm": 292.5152893066406,
"learning_rate": 8.345653031794292e-06,
"loss": 107.0582,
"step": 3400
},
{
"epoch": 0.6199078993978038,
"grad_norm": 2563.22216796875,
"learning_rate": 8.213938048432697e-06,
"loss": 107.5712,
"step": 3500
},
{
"epoch": 0.6376195536663124,
"grad_norm": 2963.86279296875,
"learning_rate": 8.078307376628292e-06,
"loss": 75.9044,
"step": 3600
},
{
"epoch": 0.6553312079348211,
"grad_norm": 216.5026397705078,
"learning_rate": 7.938926261462366e-06,
"loss": 79.0156,
"step": 3700
},
{
"epoch": 0.6730428622033298,
"grad_norm": 674.3363037109375,
"learning_rate": 7.795964517353734e-06,
"loss": 67.8441,
"step": 3800
},
{
"epoch": 0.6907545164718385,
"grad_norm": 284.2720947265625,
"learning_rate": 7.649596321166024e-06,
"loss": 77.3812,
"step": 3900
},
{
"epoch": 0.7084661707403471,
"grad_norm": 4744.10791015625,
"learning_rate": 7.500000000000001e-06,
"loss": 102.2295,
"step": 4000
},
{
"epoch": 0.7261778250088559,
"grad_norm": 3887.924560546875,
"learning_rate": 7.347357813929455e-06,
"loss": 119.0388,
"step": 4100
},
{
"epoch": 0.7438894792773645,
"grad_norm": 6186.1201171875,
"learning_rate": 7.191855733945388e-06,
"loss": 96.2807,
"step": 4200
},
{
"epoch": 0.7616011335458732,
"grad_norm": 229.56248474121094,
"learning_rate": 7.033683215379002e-06,
"loss": 101.2205,
"step": 4300
},
{
"epoch": 0.7793127878143818,
"grad_norm": 306.1286926269531,
"learning_rate": 6.873032967079562e-06,
"loss": 77.5637,
"step": 4400
},
{
"epoch": 0.7970244420828906,
"grad_norm": 549.1785888671875,
"learning_rate": 6.710100716628345e-06,
"loss": 72.9524,
"step": 4500
},
{
"epoch": 0.8147360963513992,
"grad_norm": 560.0545654296875,
"learning_rate": 6.545084971874738e-06,
"loss": 96.7195,
"step": 4600
},
{
"epoch": 0.832447750619908,
"grad_norm": 492.44091796875,
"learning_rate": 6.378186779084996e-06,
"loss": 100.4574,
"step": 4700
},
{
"epoch": 0.8501594048884166,
"grad_norm": 471.7704162597656,
"learning_rate": 6.209609477998339e-06,
"loss": 97.3029,
"step": 4800
},
{
"epoch": 0.8678710591569253,
"grad_norm": 266.0693359375,
"learning_rate": 6.039558454088796e-06,
"loss": 84.8428,
"step": 4900
},
{
"epoch": 0.8855827134254339,
"grad_norm": 863.2924194335938,
"learning_rate": 5.8682408883346535e-06,
"loss": 67.9786,
"step": 5000
},
{
"epoch": 0.9032943676939427,
"grad_norm": 1584.0009765625,
"learning_rate": 5.695865504800328e-06,
"loss": 100.1995,
"step": 5100
},
{
"epoch": 0.9210060219624513,
"grad_norm": 2196.005126953125,
"learning_rate": 5.522642316338268e-06,
"loss": 90.8572,
"step": 5200
},
{
"epoch": 0.9387176762309599,
"grad_norm": 598.8994140625,
"learning_rate": 5.348782368720627e-06,
"loss": 66.768,
"step": 5300
},
{
"epoch": 0.9564293304994687,
"grad_norm": 696.6085815429688,
"learning_rate": 5.174497483512506e-06,
"loss": 78.7763,
"step": 5400
},
{
"epoch": 0.9741409847679773,
"grad_norm": 273.0209655761719,
"learning_rate": 5e-06,
"loss": 235.4915,
"step": 5500
},
{
"epoch": 0.991852639036486,
"grad_norm": 1731.2950439453125,
"learning_rate": 4.825502516487497e-06,
"loss": 82.1385,
"step": 5600
},
{
"epoch": 1.0,
"eval_loss": 226.7201690673828,
"eval_runtime": 21.734,
"eval_samples_per_second": 115.487,
"eval_steps_per_second": 28.895,
"step": 5646
},
{
"epoch": 1.0095642933049946,
"grad_norm": 538.6300659179688,
"learning_rate": 4.651217631279374e-06,
"loss": 56.6562,
"step": 5700
},
{
"epoch": 1.0272759475735034,
"grad_norm": 1225.543212890625,
"learning_rate": 4.477357683661734e-06,
"loss": 82.8729,
"step": 5800
},
{
"epoch": 1.0449876018420121,
"grad_norm": 652.039794921875,
"learning_rate": 4.304134495199675e-06,
"loss": 72.214,
"step": 5900
},
{
"epoch": 1.0626992561105206,
"grad_norm": 248.68992614746094,
"learning_rate": 4.131759111665349e-06,
"loss": 142.3135,
"step": 6000
},
{
"epoch": 1.0804109103790294,
"grad_norm": 457.009033203125,
"learning_rate": 3.960441545911205e-06,
"loss": 66.6412,
"step": 6100
},
{
"epoch": 1.098122564647538,
"grad_norm": 817.4805297851562,
"learning_rate": 3.790390522001662e-06,
"loss": 97.1646,
"step": 6200
},
{
"epoch": 1.1158342189160468,
"grad_norm": 1199.6221923828125,
"learning_rate": 3.6218132209150047e-06,
"loss": 88.5047,
"step": 6300
},
{
"epoch": 1.1335458731845554,
"grad_norm": 553.9771728515625,
"learning_rate": 3.4549150281252635e-06,
"loss": 101.1097,
"step": 6400
},
{
"epoch": 1.151257527453064,
"grad_norm": 814.3031616210938,
"learning_rate": 3.289899283371657e-06,
"loss": 82.8931,
"step": 6500
},
{
"epoch": 1.1689691817215728,
"grad_norm": 274.961181640625,
"learning_rate": 3.12696703292044e-06,
"loss": 92.692,
"step": 6600
},
{
"epoch": 1.1866808359900816,
"grad_norm": 1075.9053955078125,
"learning_rate": 2.966316784621e-06,
"loss": 102.8753,
"step": 6700
},
{
"epoch": 1.20439249025859,
"grad_norm": 117.70056915283203,
"learning_rate": 2.8081442660546126e-06,
"loss": 87.495,
"step": 6800
},
{
"epoch": 1.2221041445270988,
"grad_norm": 457.9502258300781,
"learning_rate": 2.6526421860705474e-06,
"loss": 57.6712,
"step": 6900
},
{
"epoch": 1.2398157987956075,
"grad_norm": 1044.0828857421875,
"learning_rate": 2.5000000000000015e-06,
"loss": 100.2862,
"step": 7000
},
{
"epoch": 1.257527453064116,
"grad_norm": 454.90802001953125,
"learning_rate": 2.3504036788339763e-06,
"loss": 153.9108,
"step": 7100
},
{
"epoch": 1.2752391073326248,
"grad_norm": 275.4113464355469,
"learning_rate": 2.204035482646267e-06,
"loss": 133.3204,
"step": 7200
},
{
"epoch": 1.2929507616011335,
"grad_norm": 323.5784606933594,
"learning_rate": 2.061073738537635e-06,
"loss": 59.3089,
"step": 7300
},
{
"epoch": 1.3106624158696423,
"grad_norm": 2257.790771484375,
"learning_rate": 1.9216926233717087e-06,
"loss": 152.4049,
"step": 7400
},
{
"epoch": 1.328374070138151,
"grad_norm": 503.2570495605469,
"learning_rate": 1.7860619515673034e-06,
"loss": 96.8615,
"step": 7500
},
{
"epoch": 1.3460857244066595,
"grad_norm": 3797.780517578125,
"learning_rate": 1.6543469682057105e-06,
"loss": 61.9906,
"step": 7600
},
{
"epoch": 1.3637973786751683,
"grad_norm": 1134.3297119140625,
"learning_rate": 1.5267081477050132e-06,
"loss": 72.4744,
"step": 7700
},
{
"epoch": 1.381509032943677,
"grad_norm": 187.08917236328125,
"learning_rate": 1.4033009983067454e-06,
"loss": 99.3734,
"step": 7800
},
{
"epoch": 1.3992206872121855,
"grad_norm": 617.7765502929688,
"learning_rate": 1.2842758726130283e-06,
"loss": 143.0522,
"step": 7900
},
{
"epoch": 1.4169323414806942,
"grad_norm": 1724.1121826171875,
"learning_rate": 1.1697777844051105e-06,
"loss": 88.8854,
"step": 8000
},
{
"epoch": 1.434643995749203,
"grad_norm": 1899.291259765625,
"learning_rate": 1.0599462319663906e-06,
"loss": 60.2541,
"step": 8100
},
{
"epoch": 1.4523556500177117,
"grad_norm": 775.2619018554688,
"learning_rate": 9.549150281252633e-07,
"loss": 60.3232,
"step": 8200
},
{
"epoch": 1.4700673042862205,
"grad_norm": 522.8294067382812,
"learning_rate": 8.54812137224792e-07,
"loss": 68.5027,
"step": 8300
},
{
"epoch": 1.487778958554729,
"grad_norm": 804.1600341796875,
"learning_rate": 7.597595192178702e-07,
"loss": 133.9367,
"step": 8400
},
{
"epoch": 1.5054906128232377,
"grad_norm": 582.5191040039062,
"learning_rate": 6.698729810778065e-07,
"loss": 55.4999,
"step": 8500
},
{
"epoch": 1.5232022670917464,
"grad_norm": 406.82830810546875,
"learning_rate": 5.852620357053651e-07,
"loss": 55.5675,
"step": 8600
},
{
"epoch": 1.540913921360255,
"grad_norm": 523.9061889648438,
"learning_rate": 5.06029768504166e-07,
"loss": 74.5364,
"step": 8700
},
{
"epoch": 1.5586255756287637,
"grad_norm": 397.27508544921875,
"learning_rate": 4.322727117869951e-07,
"loss": 95.4718,
"step": 8800
},
{
"epoch": 1.5763372298972724,
"grad_norm": 827.7362670898438,
"learning_rate": 3.6408072716606346e-07,
"loss": 61.7358,
"step": 8900
},
{
"epoch": 1.594048884165781,
"grad_norm": 8013.48486328125,
"learning_rate": 3.015368960704584e-07,
"loss": 67.585,
"step": 9000
},
{
"epoch": 1.61176053843429,
"grad_norm": 4467.837890625,
"learning_rate": 2.447174185242324e-07,
"loss": 108.6042,
"step": 9100
},
{
"epoch": 1.6294721927027984,
"grad_norm": 1124.7557373046875,
"learning_rate": 1.9369152030840553e-07,
"loss": 88.8729,
"step": 9200
},
{
"epoch": 1.6471838469713072,
"grad_norm": 461.0145263671875,
"learning_rate": 1.4852136862001766e-07,
"loss": 106.0446,
"step": 9300
},
{
"epoch": 1.664895501239816,
"grad_norm": 1302.7359619140625,
"learning_rate": 1.0926199633097156e-07,
"loss": 52.0522,
"step": 9400
},
{
"epoch": 1.6826071555083244,
"grad_norm": 570.8071899414062,
"learning_rate": 7.59612349389599e-08,
"loss": 66.1858,
"step": 9500
},
{
"epoch": 1.7003188097768331,
"grad_norm": 791.3758544921875,
"learning_rate": 4.865965629214819e-08,
"loss": 91.2349,
"step": 9600
},
{
"epoch": 1.7180304640453419,
"grad_norm": 1101.4031982421875,
"learning_rate": 2.7390523158633552e-08,
"loss": 102.8052,
"step": 9700
},
{
"epoch": 1.7357421183138504,
"grad_norm": 1016.6182250976562,
"learning_rate": 1.2179748700879013e-08,
"loss": 132.7038,
"step": 9800
},
{
"epoch": 1.7534537725823593,
"grad_norm": 265.1811218261719,
"learning_rate": 3.0458649045211897e-09,
"loss": 116.7223,
"step": 9900
},
{
"epoch": 1.7711654268508679,
"grad_norm": 1466.9412841796875,
"learning_rate": 0.0,
"loss": 103.9828,
"step": 10000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}