|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.0, |
|
"global_step": 44765, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00029664916787668934, |
|
"loss": 3.2895, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0002932983357533787, |
|
"loss": 2.7517, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00028994750363006813, |
|
"loss": 2.6498, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0002865966715067575, |
|
"loss": 2.5331, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00028324583938344687, |
|
"loss": 2.4775, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00027989500726013623, |
|
"loss": 2.4187, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00027654417513682566, |
|
"loss": 2.402, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.000273193343013515, |
|
"loss": 2.3717, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002698425108902044, |
|
"loss": 2.3738, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00026649167876689376, |
|
"loss": 2.3227, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_gen_len": 13.4213, |
|
"eval_loss": 2.011871576309204, |
|
"eval_rouge1": 35.4228, |
|
"eval_rouge2": 16.8454, |
|
"eval_rougeL": 33.0039, |
|
"eval_rougeLsum": 33.0042, |
|
"eval_runtime": 109.3685, |
|
"eval_samples_per_second": 27.43, |
|
"eval_steps_per_second": 3.429, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0002631408466435831, |
|
"loss": 2.2981, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00025979001452027255, |
|
"loss": 2.2863, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002564391823969619, |
|
"loss": 2.2597, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00025308835027365123, |
|
"loss": 2.0983, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00024973751815034065, |
|
"loss": 2.0933, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00024638668602703, |
|
"loss": 2.1024, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0002430358539037194, |
|
"loss": 2.0818, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00023968502178040878, |
|
"loss": 2.0917, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00023633418965709815, |
|
"loss": 2.0658, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00023298335753378752, |
|
"loss": 2.0517, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_gen_len": 12.7787, |
|
"eval_loss": 1.900165319442749, |
|
"eval_rouge1": 36.7771, |
|
"eval_rouge2": 18.1217, |
|
"eval_rougeL": 34.4954, |
|
"eval_rougeLsum": 34.4605, |
|
"eval_runtime": 106.6009, |
|
"eval_samples_per_second": 28.142, |
|
"eval_steps_per_second": 3.518, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0002296325254104769, |
|
"loss": 2.0494, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00022628169328716628, |
|
"loss": 2.0672, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00022293086116385567, |
|
"loss": 2.0591, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00021958002904054504, |
|
"loss": 2.0515, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00021622919691723444, |
|
"loss": 2.0628, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0002128783647939238, |
|
"loss": 1.959, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00020952753267061317, |
|
"loss": 1.8223, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00020617670054730257, |
|
"loss": 1.8548, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00020282586842399193, |
|
"loss": 1.8462, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00019947503630068133, |
|
"loss": 1.8388, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"eval_gen_len": 13.2803, |
|
"eval_loss": 1.8676202297210693, |
|
"eval_rouge1": 38.3396, |
|
"eval_rouge2": 19.4592, |
|
"eval_rougeL": 35.8451, |
|
"eval_rougeLsum": 35.8358, |
|
"eval_runtime": 111.5416, |
|
"eval_samples_per_second": 26.896, |
|
"eval_steps_per_second": 3.362, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0001961242041773707, |
|
"loss": 1.8675, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.0001927733720540601, |
|
"loss": 1.8656, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.00018942253993074946, |
|
"loss": 1.8836, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.00018607170780743885, |
|
"loss": 1.8646, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00018272087568412822, |
|
"loss": 1.8718, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0001793700435608176, |
|
"loss": 1.8641, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.00017601921143750698, |
|
"loss": 1.8633, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00017266837931419635, |
|
"loss": 1.8435, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.00016931754719088575, |
|
"loss": 1.7436, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.0001659667150675751, |
|
"loss": 1.6942, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"eval_gen_len": 13.0213, |
|
"eval_loss": 1.8757922649383545, |
|
"eval_rouge1": 39.0889, |
|
"eval_rouge2": 20.3841, |
|
"eval_rougeL": 36.655, |
|
"eval_rougeLsum": 36.6291, |
|
"eval_runtime": 103.7035, |
|
"eval_samples_per_second": 28.929, |
|
"eval_steps_per_second": 3.616, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00016261588294426446, |
|
"loss": 1.691, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00015926505082095385, |
|
"loss": 1.6786, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.00015591421869764322, |
|
"loss": 1.7014, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.0001525633865743326, |
|
"loss": 1.72, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.000149212554451022, |
|
"loss": 1.7038, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00014586172232771135, |
|
"loss": 1.7212, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.00014251089020440074, |
|
"loss": 1.6839, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.0001391600580810901, |
|
"loss": 1.6908, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0001358092259577795, |
|
"loss": 1.6973, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00013245839383446887, |
|
"loss": 1.7123, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_gen_len": 13.1837, |
|
"eval_loss": 1.8253004550933838, |
|
"eval_rouge1": 39.6282, |
|
"eval_rouge2": 20.9321, |
|
"eval_rougeL": 37.1541, |
|
"eval_rougeLsum": 37.1195, |
|
"eval_runtime": 103.6467, |
|
"eval_samples_per_second": 28.944, |
|
"eval_steps_per_second": 3.618, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00012910756171115827, |
|
"loss": 1.7076, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 0.00012575672958784764, |
|
"loss": 1.5729, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 0.00012240589746453703, |
|
"loss": 1.5568, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.0001190550653412264, |
|
"loss": 1.5458, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.00011570423321791578, |
|
"loss": 1.5678, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 0.00011235340109460516, |
|
"loss": 1.5979, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.00010900256897129453, |
|
"loss": 1.5604, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 0.00010565173684798391, |
|
"loss": 1.5539, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 0.00010230090472467328, |
|
"loss": 1.5681, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 9.895007260136266e-05, |
|
"loss": 1.5719, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"eval_gen_len": 13.3723, |
|
"eval_loss": 1.8311357498168945, |
|
"eval_rouge1": 39.7541, |
|
"eval_rouge2": 21.1663, |
|
"eval_rougeL": 37.3503, |
|
"eval_rougeLsum": 37.3096, |
|
"eval_runtime": 106.3723, |
|
"eval_samples_per_second": 28.203, |
|
"eval_steps_per_second": 3.525, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 9.559924047805204e-05, |
|
"loss": 1.5787, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 9.224840835474142e-05, |
|
"loss": 1.5805, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 8.889757623143079e-05, |
|
"loss": 1.5646, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 8.554674410812017e-05, |
|
"loss": 1.5593, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 8.219591198480955e-05, |
|
"loss": 1.4606, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 7.884507986149893e-05, |
|
"loss": 1.4393, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 7.549424773818831e-05, |
|
"loss": 1.4826, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 7.21434156148777e-05, |
|
"loss": 1.4668, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 6.879258349156706e-05, |
|
"loss": 1.4599, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 6.544175136825644e-05, |
|
"loss": 1.4763, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"eval_gen_len": 13.2783, |
|
"eval_loss": 1.8474199771881104, |
|
"eval_rouge1": 39.8798, |
|
"eval_rouge2": 21.3044, |
|
"eval_rougeL": 37.4297, |
|
"eval_rougeLsum": 37.4135, |
|
"eval_runtime": 104.818, |
|
"eval_samples_per_second": 28.621, |
|
"eval_steps_per_second": 3.578, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 6.209091924494583e-05, |
|
"loss": 1.4651, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 5.87400871216352e-05, |
|
"loss": 1.475, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 5.538925499832458e-05, |
|
"loss": 1.4734, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 5.203842287501396e-05, |
|
"loss": 1.4487, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 4.868759075170333e-05, |
|
"loss": 1.4408, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 4.533675862839271e-05, |
|
"loss": 1.4643, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 4.198592650508209e-05, |
|
"loss": 1.4441, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 3.863509438177147e-05, |
|
"loss": 1.3854, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 3.528426225846085e-05, |
|
"loss": 1.3919, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 3.193343013515022e-05, |
|
"loss": 1.3963, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"eval_gen_len": 13.4713, |
|
"eval_loss": 1.8532978296279907, |
|
"eval_rouge1": 40.1839, |
|
"eval_rouge2": 21.4959, |
|
"eval_rougeL": 37.5371, |
|
"eval_rougeLsum": 37.5414, |
|
"eval_runtime": 113.6133, |
|
"eval_samples_per_second": 26.405, |
|
"eval_steps_per_second": 3.301, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 2.8582598011839604e-05, |
|
"loss": 1.379, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 2.5231765888528982e-05, |
|
"loss": 1.3968, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 2.188093376521836e-05, |
|
"loss": 1.3814, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 1.8530101641907737e-05, |
|
"loss": 1.3891, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 1.5179269518597117e-05, |
|
"loss": 1.3787, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 1.1828437395286495e-05, |
|
"loss": 1.386, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 8.477605271975873e-06, |
|
"loss": 1.3927, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 5.126773148665251e-06, |
|
"loss": 1.368, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 1.7759410253546295e-06, |
|
"loss": 1.375, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"step": 44765, |
|
"total_flos": 7.978118776281907e+16, |
|
"train_loss": 1.7918549016882, |
|
"train_runtime": 13775.2907, |
|
"train_samples_per_second": 25.997, |
|
"train_steps_per_second": 3.25 |
|
} |
|
], |
|
"max_steps": 44765, |
|
"num_train_epochs": 7, |
|
"total_flos": 7.978118776281907e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|