|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"global_step": 18342, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.863700795987351e-05, |
|
"loss": 2.8121, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.7274015919747035e-05, |
|
"loss": 2.3962, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.5911023879620545e-05, |
|
"loss": 2.2275, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.454803183949406e-05, |
|
"loss": 2.1283, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.318503979936757e-05, |
|
"loss": 2.0421, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.182204775924109e-05, |
|
"loss": 1.9646, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.0459055719114605e-05, |
|
"loss": 1.9383, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.9096063678988114e-05, |
|
"loss": 1.8862, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.773307163886163e-05, |
|
"loss": 1.8524, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.637007959873515e-05, |
|
"loss": 1.8164, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.500708755860866e-05, |
|
"loss": 1.7536, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.3644095518482174e-05, |
|
"loss": 1.7631, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.228110347835569e-05, |
|
"loss": 1.4821, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.09181114382292e-05, |
|
"loss": 1.4501, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.9555119398102717e-05, |
|
"loss": 1.4364, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 2.8192127357976233e-05, |
|
"loss": 1.4028, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 2.6829135317849746e-05, |
|
"loss": 1.3865, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 2.546614327772326e-05, |
|
"loss": 1.4069, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 2.4103151237596773e-05, |
|
"loss": 1.3883, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 2.2740159197470286e-05, |
|
"loss": 1.3654, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.1377167157343802e-05, |
|
"loss": 1.3411, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.0014175117217316e-05, |
|
"loss": 1.3359, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 1.8651183077090832e-05, |
|
"loss": 1.379, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.7288191036964345e-05, |
|
"loss": 1.3316, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.592519899683786e-05, |
|
"loss": 1.2242, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.4562206956711375e-05, |
|
"loss": 1.0537, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.3199214916584887e-05, |
|
"loss": 1.0697, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 1.1836222876458403e-05, |
|
"loss": 1.0704, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.0473230836331916e-05, |
|
"loss": 1.0918, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 9.110238796205431e-06, |
|
"loss": 1.0878, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 7.747246756078944e-06, |
|
"loss": 1.0506, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 6.384254715952459e-06, |
|
"loss": 1.0557, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 5.021262675825973e-06, |
|
"loss": 1.0325, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 3.658270635699488e-06, |
|
"loss": 1.0784, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 2.295278595573002e-06, |
|
"loss": 1.0239, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 9.322865554465163e-07, |
|
"loss": 1.0211, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 18342, |
|
"total_flos": 2.148457555862323e+16, |
|
"train_loss": 1.4955534830489579, |
|
"train_runtime": 15709.331, |
|
"train_samples_per_second": 9.34, |
|
"train_steps_per_second": 1.168 |
|
} |
|
], |
|
"max_steps": 18342, |
|
"num_train_epochs": 3, |
|
"total_flos": 2.148457555862323e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|