|
{ |
|
"best_metric": 0.04831862449645996, |
|
"best_model_checkpoint": "./model/new-14/checkpoint-4410", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 4410, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.1337868480725626e-05, |
|
"loss": 0.4789, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.267573696145125e-05, |
|
"loss": 0.2222, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.401360544217687e-05, |
|
"loss": 0.109, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.53514739229025e-05, |
|
"loss": 0.072, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.964792934717747e-05, |
|
"loss": 0.0751, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.905119942713928e-05, |
|
"loss": 0.0525, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.8454469507101085e-05, |
|
"loss": 0.0604, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.78577395870629e-05, |
|
"loss": 0.0512, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.726100966702471e-05, |
|
"loss": 0.0659, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.6664279746986514e-05, |
|
"loss": 0.0771, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6067549826948325e-05, |
|
"loss": 0.0599, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.547081990691014e-05, |
|
"loss": 0.054, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.487408998687194e-05, |
|
"loss": 0.0655, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.4277360066833754e-05, |
|
"loss": 0.0838, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.3680630146795565e-05, |
|
"loss": 0.0633, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.308390022675737e-05, |
|
"loss": 0.0608, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.248717030671918e-05, |
|
"loss": 0.0577, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.1890440386680994e-05, |
|
"loss": 0.0543, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.12937104666428e-05, |
|
"loss": 0.0464, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.069698054660461e-05, |
|
"loss": 0.0422, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.0100250626566415e-05, |
|
"loss": 0.0642, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 3.950352070652823e-05, |
|
"loss": 0.0528, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 3.890679078649004e-05, |
|
"loss": 0.0511, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 3.831006086645185e-05, |
|
"loss": 0.0706, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.7713330946413655e-05, |
|
"loss": 0.0474, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 3.711660102637547e-05, |
|
"loss": 0.0566, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.651987110633727e-05, |
|
"loss": 0.0557, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 3.592314118629908e-05, |
|
"loss": 0.0537, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.532641126626089e-05, |
|
"loss": 0.0646, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.4729681346222707e-05, |
|
"loss": 0.07, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.413295142618451e-05, |
|
"loss": 0.0508, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.353622150614632e-05, |
|
"loss": 0.0521, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.293949158610813e-05, |
|
"loss": 0.0636, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.234276166606994e-05, |
|
"loss": 0.0657, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 3.1746031746031745e-05, |
|
"loss": 0.0532, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.1149301825993556e-05, |
|
"loss": 0.0574, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.055257190595537e-05, |
|
"loss": 0.0425, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.9955841985917176e-05, |
|
"loss": 0.0545, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.9359112065878985e-05, |
|
"loss": 0.0624, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.8762382145840793e-05, |
|
"loss": 0.0488, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.81656522258026e-05, |
|
"loss": 0.0462, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 2.756892230576441e-05, |
|
"loss": 0.0531, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.6972192385726218e-05, |
|
"loss": 0.0476, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 2.6375462465688033e-05, |
|
"loss": 0.0537, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1_1": { |
|
"f1": 0.9362880886426593 |
|
}, |
|
"eval_f1_2": { |
|
"f1": 0.9555555555555556 |
|
}, |
|
"eval_loss": 0.04831862449645996, |
|
"eval_precision_1": { |
|
"precision": 0.9548022598870056 |
|
}, |
|
"eval_precision_2": { |
|
"precision": 0.9666424945612763 |
|
}, |
|
"eval_recall_1": { |
|
"recall": 0.9184782608695652 |
|
}, |
|
"eval_recall_2": { |
|
"recall": 0.9447200566973778 |
|
}, |
|
"eval_runtime": 173.0776, |
|
"eval_samples_per_second": 58.24, |
|
"eval_steps_per_second": 3.64, |
|
"step": 4410 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 8820, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|