jdannem6's picture
Uploaded checkpoint-2000
0f4ca08 verified
raw
history blame
3.84 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1,
"eval_steps": 2000,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.9697519540786743,
"learning_rate": 1e-06,
"loss": 0.1629,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 1.3848124742507935,
"learning_rate": 9.898989898989898e-07,
"loss": 0.14,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 0.9986572265625,
"learning_rate": 9.797979797979797e-07,
"loss": 0.1354,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 0.11438798904418945,
"learning_rate": 9.696969696969698e-07,
"loss": 0.1182,
"step": 400
},
{
"epoch": 0.03,
"grad_norm": 0.8548241257667542,
"learning_rate": 9.595959595959596e-07,
"loss": 0.1192,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 1.5312464237213135,
"learning_rate": 9.494949494949495e-07,
"loss": 0.0997,
"step": 600
},
{
"epoch": 0.04,
"grad_norm": 0.9692059755325317,
"learning_rate": 9.393939393939395e-07,
"loss": 0.102,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 0.42864611744880676,
"learning_rate": 9.292929292929292e-07,
"loss": 0.0901,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 0.852543830871582,
"learning_rate": 9.191919191919192e-07,
"loss": 0.0937,
"step": 900
},
{
"epoch": 0.05,
"grad_norm": 0.5718303322792053,
"learning_rate": 9.09090909090909e-07,
"loss": 0.093,
"step": 1000
},
{
"epoch": 0.06,
"grad_norm": 0.9396565556526184,
"learning_rate": 8.98989898989899e-07,
"loss": 0.0892,
"step": 1100
},
{
"epoch": 0.06,
"grad_norm": 0.08157779276371002,
"learning_rate": 8.888888888888888e-07,
"loss": 0.0934,
"step": 1200
},
{
"epoch": 0.07,
"grad_norm": 0.8076322078704834,
"learning_rate": 8.787878787878787e-07,
"loss": 0.0725,
"step": 1300
},
{
"epoch": 0.07,
"grad_norm": 1.5076119899749756,
"learning_rate": 8.686868686868687e-07,
"loss": 0.0835,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 1.1567238569259644,
"learning_rate": 8.585858585858586e-07,
"loss": 0.0747,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 0.6817927956581116,
"learning_rate": 8.484848484848484e-07,
"loss": 0.0903,
"step": 1600
},
{
"epoch": 0.09,
"grad_norm": 0.6467050313949585,
"learning_rate": 8.383838383838383e-07,
"loss": 0.0721,
"step": 1700
},
{
"epoch": 0.09,
"grad_norm": 1.8435570001602173,
"learning_rate": 8.282828282828283e-07,
"loss": 0.0847,
"step": 1800
},
{
"epoch": 0.1,
"grad_norm": 0.6265794634819031,
"learning_rate": 8.181818181818182e-07,
"loss": 0.0687,
"step": 1900
},
{
"epoch": 0.1,
"grad_norm": 1.360060453414917,
"learning_rate": 8.08080808080808e-07,
"loss": 0.0748,
"step": 2000
},
{
"epoch": 0.1,
"eval_loss": 0.06745574623346329,
"eval_runtime": 304.7718,
"eval_samples_per_second": 3.281,
"eval_steps_per_second": 0.82,
"step": 2000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 1.63205502468096e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}