fr-3B-annealing / trainer_state.json
moussaKam's picture
upload
c0c3cf8 verified
raw
history blame
7.93 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500.0,
"global_step": 2078,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02406159769008662,
"grad_norm": 0.12731152772903442,
"learning_rate": 0.0001997144304734381,
"loss": 1.4321,
"step": 50
},
{
"epoch": 0.04812319538017324,
"grad_norm": 0.17785188555717468,
"learning_rate": 0.00019885935289284243,
"loss": 1.1646,
"step": 100
},
{
"epoch": 0.07218479307025986,
"grad_norm": 0.20939375460147858,
"learning_rate": 0.00019743965094021025,
"loss": 1.1437,
"step": 150
},
{
"epoch": 0.09624639076034648,
"grad_norm": 0.16228704154491425,
"learning_rate": 0.00019546343308783103,
"loss": 1.1293,
"step": 200
},
{
"epoch": 0.12030798845043311,
"grad_norm": 0.17629307508468628,
"learning_rate": 0.00019294198628763448,
"loss": 1.1407,
"step": 250
},
{
"epoch": 0.14436958614051973,
"grad_norm": 0.21964997053146362,
"learning_rate": 0.0001898897115070003,
"loss": 1.1248,
"step": 300
},
{
"epoch": 0.16843118383060635,
"grad_norm": 0.14089353382587433,
"learning_rate": 0.00018632404147920934,
"loss": 1.1263,
"step": 350
},
{
"epoch": 0.19249278152069296,
"grad_norm": 15.953330039978027,
"learning_rate": 0.00018226534113829578,
"loss": 1.1168,
"step": 400
},
{
"epoch": 0.2165543792107796,
"grad_norm": 0.1421365886926651,
"learning_rate": 0.00017773679130695595,
"loss": 1.1209,
"step": 450
},
{
"epoch": 0.24061597690086622,
"grad_norm": 0.12633951008319855,
"learning_rate": 0.0001727642563018167,
"loss": 1.1136,
"step": 500
},
{
"epoch": 0.2646775745909528,
"grad_norm": 0.1551765352487564,
"learning_rate": 0.0001673761362122227,
"loss": 1.1031,
"step": 550
},
{
"epoch": 0.28873917228103946,
"grad_norm": 0.10191167891025543,
"learning_rate": 0.00016160320469623483,
"loss": 1.1027,
"step": 600
},
{
"epoch": 0.3128007699711261,
"grad_norm": 0.10823129862546921,
"learning_rate": 0.00015547843322025095,
"loss": 1.0963,
"step": 650
},
{
"epoch": 0.3368623676612127,
"grad_norm": 0.08364494144916534,
"learning_rate": 0.00014903680274608503,
"loss": 1.0993,
"step": 700
},
{
"epoch": 0.36092396535129934,
"grad_norm": 0.12315221130847931,
"learning_rate": 0.0001423151039410329,
"loss": 1.0921,
"step": 750
},
{
"epoch": 0.3849855630413859,
"grad_norm": 0.1051948294043541,
"learning_rate": 0.00013535172705200364,
"loss": 1.0916,
"step": 800
},
{
"epoch": 0.40904716073147257,
"grad_norm": 0.10586602240800858,
"learning_rate": 0.00012818644264382662,
"loss": 1.0871,
"step": 850
},
{
"epoch": 0.4331087584215592,
"grad_norm": 0.10873878002166748,
"learning_rate": 0.0001208601744540244,
"loss": 1.0819,
"step": 900
},
{
"epoch": 0.4571703561116458,
"grad_norm": 0.08816947042942047,
"learning_rate": 0.00011341476566136547,
"loss": 1.0789,
"step": 950
},
{
"epoch": 0.48123195380173245,
"grad_norm": 0.09611134976148605,
"learning_rate": 0.00010589273990312944,
"loss": 1.0814,
"step": 1000
},
{
"epoch": 0.5052935514918191,
"grad_norm": 0.08588535338640213,
"learning_rate": 9.833705840600763e-05,
"loss": 1.0734,
"step": 1050
},
{
"epoch": 0.5293551491819056,
"grad_norm": 0.07991491258144379,
"learning_rate": 9.07908746177598e-05,
"loss": 1.0722,
"step": 1100
},
{
"epoch": 0.5534167468719923,
"grad_norm": 0.07974735647439957,
"learning_rate": 8.3297287741021e-05,
"loss": 1.0769,
"step": 1150
},
{
"epoch": 0.5774783445620789,
"grad_norm": 0.08054530620574951,
"learning_rate": 7.589909657692416e-05,
"loss": 1.0801,
"step": 1200
},
{
"epoch": 0.6015399422521656,
"grad_norm": 0.08244979381561279,
"learning_rate": 6.863855508443216e-05,
"loss": 1.0704,
"step": 1250
},
{
"epoch": 0.6256015399422522,
"grad_norm": 0.07688669115304947,
"learning_rate": 6.155713105147687e-05,
"loss": 1.0649,
"step": 1300
},
{
"epoch": 0.6496631376323387,
"grad_norm": 0.06365044414997101,
"learning_rate": 5.469526925622783e-05,
"loss": 1.061,
"step": 1350
},
{
"epoch": 0.6737247353224254,
"grad_norm": 0.061256468296051025,
"learning_rate": 4.809216047116902e-05,
"loss": 1.0646,
"step": 1400
},
{
"epoch": 0.697786333012512,
"grad_norm": 0.06060384586453438,
"learning_rate": 4.178551762929218e-05,
"loss": 1.0626,
"step": 1450
},
{
"epoch": 0.7218479307025987,
"grad_norm": 0.06605063378810883,
"learning_rate": 3.581136043080835e-05,
"loss": 1.0643,
"step": 1500
},
{
"epoch": 0.7459095283926853,
"grad_norm": 0.05938854441046715,
"learning_rate": 3.0203809620573042e-05,
"loss": 1.0551,
"step": 1550
},
{
"epoch": 0.7699711260827719,
"grad_norm": 0.059756290167570114,
"learning_rate": 2.499489211118732e-05,
"loss": 1.0601,
"step": 1600
},
{
"epoch": 0.7940327237728585,
"grad_norm": 0.05975335091352463,
"learning_rate": 2.0214358064792172e-05,
"loss": 1.0499,
"step": 1650
},
{
"epoch": 0.8180943214629451,
"grad_norm": 0.05708390101790428,
"learning_rate": 1.5889510978274558e-05,
"loss": 1.0557,
"step": 1700
},
{
"epoch": 0.8421559191530318,
"grad_norm": 0.05504734441637993,
"learning_rate": 1.2045051742333412e-05,
"loss": 1.0549,
"step": 1750
},
{
"epoch": 0.8662175168431184,
"grad_norm": 0.055338677018880844,
"learning_rate": 8.702937565046632e-06,
"loss": 1.0611,
"step": 1800
},
{
"epoch": 0.890279114533205,
"grad_norm": 0.05398984253406525,
"learning_rate": 5.882256565680689e-06,
"loss": 1.0554,
"step": 1850
},
{
"epoch": 0.9143407122232916,
"grad_norm": 0.05350427329540253,
"learning_rate": 3.5991187549870077e-06,
"loss": 1.0526,
"step": 1900
},
{
"epoch": 0.9384023099133783,
"grad_norm": 0.05145389959216118,
"learning_rate": 1.8665640246390836e-06,
"loss": 1.0552,
"step": 1950
},
{
"epoch": 0.9624639076034649,
"grad_norm": 0.05064576491713524,
"learning_rate": 6.944876713186777e-07,
"loss": 1.0546,
"step": 2000
},
{
"epoch": 0.9865255052935515,
"grad_norm": 0.05046650767326355,
"learning_rate": 8.958388081204971e-08,
"loss": 1.053,
"step": 2050
}
],
"logging_steps": 50,
"max_steps": 2078,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4740522235991163e+20,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}