|
{ |
|
"best_metric": 1.0, |
|
"best_model_checkpoint": "distilbert-base-uncased_finetuned_text_2_disease_cel_v2/checkpoint-501", |
|
"epoch": 3.0, |
|
"global_step": 501, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.920159680638723e-05, |
|
"loss": 3.208, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 3.840319361277446e-05, |
|
"loss": 3.1566, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.7604790419161676e-05, |
|
"loss": 3.0327, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 3.680638722554891e-05, |
|
"loss": 2.8296, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3.600798403193613e-05, |
|
"loss": 2.5667, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.5209580838323356e-05, |
|
"loss": 2.3494, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.441117764471058e-05, |
|
"loss": 2.0569, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.3612774451097804e-05, |
|
"loss": 1.7982, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 3.2814371257485035e-05, |
|
"loss": 1.577, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.201596806387226e-05, |
|
"loss": 1.3064, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.1217564870259484e-05, |
|
"loss": 1.2182, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.041916167664671e-05, |
|
"loss": 0.9891, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.9620758483033936e-05, |
|
"loss": 0.872, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 2.8822355289421164e-05, |
|
"loss": 0.7503, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.8023952095808385e-05, |
|
"loss": 0.6134, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.7225548902195612e-05, |
|
"loss": 0.5425, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.963963963963964, |
|
"eval_f1": 0.9630884536155822, |
|
"eval_loss": 0.36853650212287903, |
|
"eval_precision": 0.9698101430349508, |
|
"eval_recall": 0.963963963963964, |
|
"eval_runtime": 3.8045, |
|
"eval_samples_per_second": 175.058, |
|
"eval_steps_per_second": 5.52, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 2.6427145708582837e-05, |
|
"loss": 0.4085, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 2.5628742514970064e-05, |
|
"loss": 0.3705, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 2.4830339321357285e-05, |
|
"loss": 0.317, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2.4031936127744513e-05, |
|
"loss": 0.25, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.323353293413174e-05, |
|
"loss": 0.2564, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 2.2435129740518965e-05, |
|
"loss": 0.2334, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 2.163672654690619e-05, |
|
"loss": 0.231, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 2.0838323353293413e-05, |
|
"loss": 0.1942, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.003992015968064e-05, |
|
"loss": 0.1782, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 1.9241516966067865e-05, |
|
"loss": 0.1353, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.8443113772455093e-05, |
|
"loss": 0.1027, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 1.7644710578842317e-05, |
|
"loss": 0.1048, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 1.684630738522954e-05, |
|
"loss": 0.1052, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.604790419161677e-05, |
|
"loss": 0.0849, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.5249500998003994e-05, |
|
"loss": 0.0974, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.4451097804391218e-05, |
|
"loss": 0.075, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.3652694610778446e-05, |
|
"loss": 0.0887, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9984984984984985, |
|
"eval_f1": 0.9984973421631103, |
|
"eval_loss": 0.042379751801490784, |
|
"eval_precision": 0.9985541096652208, |
|
"eval_recall": 0.9984984984984985, |
|
"eval_runtime": 3.9115, |
|
"eval_samples_per_second": 170.269, |
|
"eval_steps_per_second": 5.369, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.285429141716567e-05, |
|
"loss": 0.0749, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.2055888223552896e-05, |
|
"loss": 0.0532, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.125748502994012e-05, |
|
"loss": 0.0807, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.0459081836327348e-05, |
|
"loss": 0.0712, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 9.660678642714572e-06, |
|
"loss": 0.0575, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 8.862275449101796e-06, |
|
"loss": 0.0533, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 8.063872255489022e-06, |
|
"loss": 0.0612, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 7.2654690618762484e-06, |
|
"loss": 0.0482, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 6.4670658682634736e-06, |
|
"loss": 0.0539, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.668662674650699e-06, |
|
"loss": 0.0655, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.870259481037925e-06, |
|
"loss": 0.0646, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.07185628742515e-06, |
|
"loss": 0.0642, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.2734530938123754e-06, |
|
"loss": 0.0448, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 2.475049900199601e-06, |
|
"loss": 0.0394, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 1.6766467065868263e-06, |
|
"loss": 0.0657, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 8.78243512974052e-07, |
|
"loss": 0.0769, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 7.984031936127744e-08, |
|
"loss": 0.0399, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 0.026059171184897423, |
|
"eval_precision": 1.0, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 3.7333, |
|
"eval_samples_per_second": 178.395, |
|
"eval_steps_per_second": 5.625, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 501, |
|
"total_flos": 2118227382190080.0, |
|
"train_loss": 0.661047323973117, |
|
"train_runtime": 349.1831, |
|
"train_samples_per_second": 45.775, |
|
"train_steps_per_second": 1.435 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9981231231231231, |
|
"eval_f1": 0.9981237024802164, |
|
"eval_loss": 0.036395490169525146, |
|
"eval_precision": 0.9981332041882663, |
|
"eval_recall": 0.9981231231231231, |
|
"eval_runtime": 34.4871, |
|
"eval_samples_per_second": 154.492, |
|
"eval_steps_per_second": 4.842, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 0.026059171184897423, |
|
"eval_precision": 1.0, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 4.0142, |
|
"eval_samples_per_second": 165.91, |
|
"eval_steps_per_second": 5.231, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 0.025151288136839867, |
|
"eval_precision": 1.0, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 3.787, |
|
"eval_samples_per_second": 176.129, |
|
"eval_steps_per_second": 5.545, |
|
"step": 501 |
|
} |
|
], |
|
"max_steps": 501, |
|
"num_train_epochs": 3, |
|
"total_flos": 2118227382190080.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|