|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.591919191919192, |
|
"global_step": 320, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9457994579945803e-05, |
|
"loss": 4.3763, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.878048780487805e-05, |
|
"loss": 3.8394, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.81029810298103e-05, |
|
"loss": 3.639, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.7425474254742554e-05, |
|
"loss": 3.7962, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.6747967479674795e-05, |
|
"loss": 3.5152, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.607046070460705e-05, |
|
"loss": 3.5752, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.53929539295393e-05, |
|
"loss": 3.4463, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.4715447154471546e-05, |
|
"loss": 3.5218, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_loss": 3.2419066429138184, |
|
"eval_rouge2_fmeasure": 0.0, |
|
"eval_rouge2_precision": 0.0, |
|
"eval_rouge2_recall": 0.0, |
|
"eval_runtime": 180.4791, |
|
"eval_samples_per_second": 0.15, |
|
"eval_steps_per_second": 0.078, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.4037940379403794e-05, |
|
"loss": 3.5264, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.336043360433605e-05, |
|
"loss": 3.6286, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.26829268292683e-05, |
|
"loss": 3.4611, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.2005420054200545e-05, |
|
"loss": 3.9319, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.132791327913279e-05, |
|
"loss": 3.7751, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.065040650406504e-05, |
|
"loss": 3.6241, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.9972899728997295e-05, |
|
"loss": 3.3906, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.9295392953929537e-05, |
|
"loss": 3.1815, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_loss": 3.189028739929199, |
|
"eval_rouge2_fmeasure": 0.0, |
|
"eval_rouge2_precision": 0.0, |
|
"eval_rouge2_recall": 0.0, |
|
"eval_runtime": 265.9332, |
|
"eval_samples_per_second": 0.102, |
|
"eval_steps_per_second": 0.053, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.861788617886179e-05, |
|
"loss": 3.3673, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.794037940379404e-05, |
|
"loss": 3.3792, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.726287262872629e-05, |
|
"loss": 3.5732, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 3.6585365853658535e-05, |
|
"loss": 3.2772, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.6043360433604336e-05, |
|
"loss": 3.3803, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.5365853658536584e-05, |
|
"loss": 3.2597, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.468834688346884e-05, |
|
"loss": 3.4085, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.401084010840109e-05, |
|
"loss": 3.2319, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_loss": 3.166618824005127, |
|
"eval_rouge2_fmeasure": 0.0, |
|
"eval_rouge2_precision": 0.0, |
|
"eval_rouge2_recall": 0.0, |
|
"eval_runtime": 433.3435, |
|
"eval_samples_per_second": 0.062, |
|
"eval_steps_per_second": 0.032, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.6139, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.265582655826558e-05, |
|
"loss": 3.5873, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.197831978319784e-05, |
|
"loss": 3.3622, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.130081300813008e-05, |
|
"loss": 3.4854, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 3.0623306233062334e-05, |
|
"loss": 3.4429, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.9945799457994585e-05, |
|
"loss": 3.1618, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.926829268292683e-05, |
|
"loss": 3.428, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.859078590785908e-05, |
|
"loss": 3.2305, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"eval_loss": 3.152249336242676, |
|
"eval_rouge2_fmeasure": 0.0037, |
|
"eval_rouge2_precision": 0.0062, |
|
"eval_rouge2_recall": 0.0026, |
|
"eval_runtime": 372.2318, |
|
"eval_samples_per_second": 0.073, |
|
"eval_steps_per_second": 0.038, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 2.7913279132791332e-05, |
|
"loss": 3.0798, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 2.7235772357723577e-05, |
|
"loss": 3.2169, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 2.6558265582655828e-05, |
|
"loss": 3.4364, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.588075880758808e-05, |
|
"loss": 3.2353, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 2.5203252032520324e-05, |
|
"loss": 3.4389, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.4525745257452575e-05, |
|
"loss": 3.2847, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 2.3848238482384823e-05, |
|
"loss": 3.0442, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 2.3170731707317075e-05, |
|
"loss": 3.2043, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_loss": 3.1420483589172363, |
|
"eval_rouge2_fmeasure": 0.0032, |
|
"eval_rouge2_precision": 0.0041, |
|
"eval_rouge2_recall": 0.0026, |
|
"eval_runtime": 417.3311, |
|
"eval_samples_per_second": 0.065, |
|
"eval_steps_per_second": 0.034, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.2493224932249323e-05, |
|
"loss": 3.4208, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 2.181571815718157e-05, |
|
"loss": 3.7157, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.1138211382113822e-05, |
|
"loss": 3.0534, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.046070460704607e-05, |
|
"loss": 3.2667, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.978319783197832e-05, |
|
"loss": 3.3489, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.9105691056910573e-05, |
|
"loss": 3.3139, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.842818428184282e-05, |
|
"loss": 3.3155, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.775067750677507e-05, |
|
"loss": 3.603, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_loss": 3.135263681411743, |
|
"eval_rouge2_fmeasure": 0.0, |
|
"eval_rouge2_precision": 0.0, |
|
"eval_rouge2_recall": 0.0, |
|
"eval_runtime": 517.2838, |
|
"eval_samples_per_second": 0.052, |
|
"eval_steps_per_second": 0.027, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.707317073170732e-05, |
|
"loss": 3.4609, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.6395663956639568e-05, |
|
"loss": 3.2715, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.5718157181571816e-05, |
|
"loss": 3.91, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.5040650406504067e-05, |
|
"loss": 3.0947, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.4498644986449867e-05, |
|
"loss": 3.2667, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.3821138211382115e-05, |
|
"loss": 3.5289, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.3143631436314363e-05, |
|
"loss": 3.3016, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.2466124661246612e-05, |
|
"loss": 3.2669, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"eval_loss": 3.130876064300537, |
|
"eval_rouge2_fmeasure": 0.0, |
|
"eval_rouge2_precision": 0.0, |
|
"eval_rouge2_recall": 0.0, |
|
"eval_runtime": 441.1701, |
|
"eval_samples_per_second": 0.061, |
|
"eval_steps_per_second": 0.032, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.1788617886178862e-05, |
|
"loss": 3.0182, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 3.4221, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.0433604336043361e-05, |
|
"loss": 3.1717, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.756097560975611e-06, |
|
"loss": 3.309, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 9.078590785907859e-06, |
|
"loss": 3.215, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 8.401084010840109e-06, |
|
"loss": 3.2693, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 7.723577235772358e-06, |
|
"loss": 3.3973, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 7.046070460704607e-06, |
|
"loss": 3.3138, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"eval_loss": 3.127232074737549, |
|
"eval_rouge2_fmeasure": 0.0, |
|
"eval_rouge2_precision": 0.0, |
|
"eval_rouge2_recall": 0.0, |
|
"eval_runtime": 495.8047, |
|
"eval_samples_per_second": 0.054, |
|
"eval_steps_per_second": 0.028, |
|
"step": 320 |
|
} |
|
], |
|
"max_steps": 369, |
|
"num_train_epochs": 3, |
|
"total_flos": 1.248720939122688e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|