fr-8B-annealing / trainer_state.json
moussaKam's picture
upload
7ae9824 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500.0,
"global_step": 2078,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02406159769008662,
"grad_norm": 0.2745775878429413,
"learning_rate": 0.0001997144304734381,
"loss": 1.4399,
"step": 50
},
{
"epoch": 0.04812319538017324,
"grad_norm": 0.3128788471221924,
"learning_rate": 0.00019885935289284243,
"loss": 1.0761,
"step": 100
},
{
"epoch": 0.07218479307025986,
"grad_norm": 0.30212661623954773,
"learning_rate": 0.00019743965094021025,
"loss": 1.0595,
"step": 150
},
{
"epoch": 0.09624639076034648,
"grad_norm": 0.19943593442440033,
"learning_rate": 0.00019546343308783103,
"loss": 1.047,
"step": 200
},
{
"epoch": 0.12030798845043311,
"grad_norm": 0.33598530292510986,
"learning_rate": 0.00019294198628763448,
"loss": 1.0584,
"step": 250
},
{
"epoch": 0.14436958614051973,
"grad_norm": 0.21461080014705658,
"learning_rate": 0.0001898897115070003,
"loss": 1.0463,
"step": 300
},
{
"epoch": 0.16843118383060635,
"grad_norm": 0.2849867641925812,
"learning_rate": 0.00018632404147920934,
"loss": 1.0414,
"step": 350
},
{
"epoch": 0.19249278152069296,
"grad_norm": 0.16972823441028595,
"learning_rate": 0.00018226534113829578,
"loss": 1.0332,
"step": 400
},
{
"epoch": 0.2165543792107796,
"grad_norm": 0.2295919805765152,
"learning_rate": 0.00017773679130695595,
"loss": 1.0346,
"step": 450
},
{
"epoch": 0.24061597690086622,
"grad_norm": 0.2062266767024994,
"learning_rate": 0.0001727642563018167,
"loss": 1.0341,
"step": 500
},
{
"epoch": 0.2646775745909528,
"grad_norm": 0.2998173236846924,
"learning_rate": 0.0001673761362122227,
"loss": 1.0241,
"step": 550
},
{
"epoch": 0.28873917228103946,
"grad_norm": 0.11832105368375778,
"learning_rate": 0.00016160320469623483,
"loss": 1.0226,
"step": 600
},
{
"epoch": 0.3128007699711261,
"grad_norm": 0.1334855556488037,
"learning_rate": 0.00015547843322025095,
"loss": 1.0185,
"step": 650
},
{
"epoch": 0.3368623676612127,
"grad_norm": 0.13620489835739136,
"learning_rate": 0.00014903680274608503,
"loss": 1.0213,
"step": 700
},
{
"epoch": 0.36092396535129934,
"grad_norm": 0.13768567144870758,
"learning_rate": 0.0001423151039410329,
"loss": 1.0149,
"step": 750
},
{
"epoch": 0.3849855630413859,
"grad_norm": 0.11512122303247452,
"learning_rate": 0.00013535172705200364,
"loss": 1.0131,
"step": 800
},
{
"epoch": 0.40904716073147257,
"grad_norm": 0.1289459615945816,
"learning_rate": 0.00012818644264382662,
"loss": 1.0093,
"step": 850
},
{
"epoch": 0.4331087584215592,
"grad_norm": 0.10245691239833832,
"learning_rate": 0.0001208601744540244,
"loss": 1.0044,
"step": 900
},
{
"epoch": 0.4571703561116458,
"grad_norm": 0.09356267005205154,
"learning_rate": 0.00011341476566136547,
"loss": 1.0008,
"step": 950
},
{
"epoch": 0.48123195380173245,
"grad_norm": 0.0957803875207901,
"learning_rate": 0.00010589273990312944,
"loss": 1.0035,
"step": 1000
},
{
"epoch": 0.5052935514918191,
"grad_norm": 0.09853529930114746,
"learning_rate": 9.833705840600763e-05,
"loss": 0.9965,
"step": 1050
},
{
"epoch": 0.5293551491819056,
"grad_norm": 0.1084134429693222,
"learning_rate": 9.07908746177598e-05,
"loss": 0.9947,
"step": 1100
},
{
"epoch": 0.5534167468719923,
"grad_norm": 0.09273923188447952,
"learning_rate": 8.3297287741021e-05,
"loss": 0.9988,
"step": 1150
},
{
"epoch": 0.5774783445620789,
"grad_norm": 0.09696114808320999,
"learning_rate": 7.589909657692416e-05,
"loss": 1.0023,
"step": 1200
},
{
"epoch": 0.6015399422521656,
"grad_norm": 0.07934155315160751,
"learning_rate": 6.863855508443216e-05,
"loss": 0.9919,
"step": 1250
},
{
"epoch": 0.6256015399422522,
"grad_norm": 0.09847329556941986,
"learning_rate": 6.155713105147687e-05,
"loss": 0.9873,
"step": 1300
},
{
"epoch": 0.6496631376323387,
"grad_norm": 0.07471206784248352,
"learning_rate": 5.469526925622783e-05,
"loss": 0.9842,
"step": 1350
},
{
"epoch": 0.6737247353224254,
"grad_norm": 0.07969838380813599,
"learning_rate": 4.809216047116902e-05,
"loss": 0.9868,
"step": 1400
},
{
"epoch": 0.697786333012512,
"grad_norm": 0.07660534232854843,
"learning_rate": 4.178551762929218e-05,
"loss": 0.9846,
"step": 1450
},
{
"epoch": 0.7218479307025987,
"grad_norm": 0.08437798917293549,
"learning_rate": 3.581136043080835e-05,
"loss": 0.9861,
"step": 1500
},
{
"epoch": 0.7459095283926853,
"grad_norm": 0.06967335939407349,
"learning_rate": 3.0203809620573042e-05,
"loss": 0.9772,
"step": 1550
},
{
"epoch": 0.7699711260827719,
"grad_norm": 0.06973665207624435,
"learning_rate": 2.499489211118732e-05,
"loss": 0.9813,
"step": 1600
},
{
"epoch": 0.7940327237728585,
"grad_norm": 0.06934628635644913,
"learning_rate": 2.0214358064792172e-05,
"loss": 0.9719,
"step": 1650
},
{
"epoch": 0.8180943214629451,
"grad_norm": 0.07001589238643646,
"learning_rate": 1.5889510978274558e-05,
"loss": 0.977,
"step": 1700
},
{
"epoch": 0.8421559191530318,
"grad_norm": 0.06061296537518501,
"learning_rate": 1.2045051742333412e-05,
"loss": 0.9764,
"step": 1750
},
{
"epoch": 0.8662175168431184,
"grad_norm": 0.06056005507707596,
"learning_rate": 8.702937565046632e-06,
"loss": 0.9827,
"step": 1800
},
{
"epoch": 0.890279114533205,
"grad_norm": 0.05652038753032684,
"learning_rate": 5.882256565680689e-06,
"loss": 0.9769,
"step": 1850
},
{
"epoch": 0.9143407122232916,
"grad_norm": 0.055849190801382065,
"learning_rate": 3.5991187549870077e-06,
"loss": 0.9741,
"step": 1900
},
{
"epoch": 0.9384023099133783,
"grad_norm": 0.053746215999126434,
"learning_rate": 1.8665640246390836e-06,
"loss": 0.9771,
"step": 1950
},
{
"epoch": 0.9624639076034649,
"grad_norm": 0.05398011952638626,
"learning_rate": 6.944876713186777e-07,
"loss": 0.9755,
"step": 2000
},
{
"epoch": 0.9865255052935515,
"grad_norm": 0.05222630873322487,
"learning_rate": 8.958388081204971e-08,
"loss": 0.9742,
"step": 2050
}
],
"logging_steps": 50,
"max_steps": 2078,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.924669146551204e+20,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}