|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.575107296137339,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.08583690987124463,
|
|
"grad_norm": 0.13883593678474426,
|
|
"learning_rate": 0.0001998960663781063,
|
|
"loss": 0.829,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.17167381974248927,
|
|
"grad_norm": 0.13720104098320007,
|
|
"learning_rate": 0.00019906589321760313,
|
|
"loss": 0.648,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.2575107296137339,
|
|
"grad_norm": 0.1670483946800232,
|
|
"learning_rate": 0.00019741244594178857,
|
|
"loss": 0.587,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.34334763948497854,
|
|
"grad_norm": 0.16993218660354614,
|
|
"learning_rate": 0.00019494946530743566,
|
|
"loss": 0.5547,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.4291845493562232,
|
|
"grad_norm": 0.16408105194568634,
|
|
"learning_rate": 0.00019169741959214142,
|
|
"loss": 0.5319,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.5150214592274678,
|
|
"grad_norm": 0.1872694492340088,
|
|
"learning_rate": 0.0001876833344953899,
|
|
"loss": 0.5147,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.6008583690987125,
|
|
"grad_norm": 0.18225158751010895,
|
|
"learning_rate": 0.0001829405685450202,
|
|
"loss": 0.5009,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.6866952789699571,
|
|
"grad_norm": 0.1850147843360901,
|
|
"learning_rate": 0.00017750853587555535,
|
|
"loss": 0.4866,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.7725321888412017,
|
|
"grad_norm": 0.20939144492149353,
|
|
"learning_rate": 0.00017143237868220553,
|
|
"loss": 0.4721,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.8583690987124464,
|
|
"grad_norm": 0.21306931972503662,
|
|
"learning_rate": 0.00016476259207257407,
|
|
"loss": 0.4626,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.944206008583691,
|
|
"grad_norm": 0.13636501133441925,
|
|
"learning_rate": 0.0001575546044336872,
|
|
"loss": 0.4505,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 1.0300429184549356,
|
|
"grad_norm": 0.21951957046985626,
|
|
"learning_rate": 0.00014986831680165167,
|
|
"loss": 0.4371,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 1.1158798283261802,
|
|
"grad_norm": 0.183538556098938,
|
|
"learning_rate": 0.00014176760506194906,
|
|
"loss": 0.4241,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.201716738197425,
|
|
"grad_norm": 0.27745020389556885,
|
|
"learning_rate": 0.00013331978911726523,
|
|
"loss": 0.4247,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.2875536480686696,
|
|
"grad_norm": 0.16314753890037537,
|
|
"learning_rate": 0.00012459507343426653,
|
|
"loss": 0.4142,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.3733905579399142,
|
|
"grad_norm": 0.13504645228385925,
|
|
"learning_rate": 0.00011566596361858548,
|
|
"loss": 0.3982,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.4592274678111588,
|
|
"grad_norm": 0.21324139833450317,
|
|
"learning_rate": 0.0001066066638664925,
|
|
"loss": 0.4026,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.5450643776824036,
|
|
"grad_norm": 0.3690386116504669,
|
|
"learning_rate": 9.749246030065306e-05,
|
|
"loss": 0.3991,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.6309012875536482,
|
|
"grad_norm": 0.32246139645576477,
|
|
"learning_rate": 8.839909531467737e-05,
|
|
"loss": 0.3949,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.7167381974248928,
|
|
"grad_norm": 0.14840121567249298,
|
|
"learning_rate": 7.940213812589018e-05,
|
|
"loss": 0.3874,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.8025751072961373,
|
|
"grad_norm": 0.16993016004562378,
|
|
"learning_rate": 7.057635676725945e-05,
|
|
"loss": 0.3886,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.888412017167382,
|
|
"grad_norm": 0.21142154932022095,
|
|
"learning_rate": 6.199509673746246e-05,
|
|
"loss": 0.3868,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 1.9742489270386265,
|
|
"grad_norm": 0.17925000190734863,
|
|
"learning_rate": 5.372967147273683e-05,
|
|
"loss": 0.381,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 2.060085836909871,
|
|
"grad_norm": 0.20707811415195465,
|
|
"learning_rate": 4.584876970591957e-05,
|
|
"loss": 0.3713,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 2.1459227467811157,
|
|
"grad_norm": 0.22432269155979156,
|
|
"learning_rate": 3.841788463774003e-05,
|
|
"loss": 0.366,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 2.2317596566523603,
|
|
"grad_norm": 0.26082319021224976,
|
|
"learning_rate": 3.149876966416321e-05,
|
|
"loss": 0.3681,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 2.317596566523605,
|
|
"grad_norm": 0.16953982412815094,
|
|
"learning_rate": 2.514892518288988e-05,
|
|
"loss": 0.3684,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.40343347639485,
|
|
"grad_norm": 0.24221323430538177,
|
|
"learning_rate": 1.9421120743841902e-05,
|
|
"loss": 0.3694,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.4892703862660945,
|
|
"grad_norm": 0.14716067910194397,
|
|
"learning_rate": 1.436295651473667e-05,
|
|
"loss": 0.37,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.575107296137339,
|
|
"grad_norm": 0.2028035819530487,
|
|
"learning_rate": 1.0016467706135135e-05,
|
|
"loss": 0.367,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 3495,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1.1064164824802918e+17,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|