|
{ |
|
"best_metric": 0.9936204146730463, |
|
"best_model_checkpoint": "Brain_Tumor_Class_swin/checkpoint-660", |
|
"epoch": 2.997732426303855, |
|
"global_step": 660, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 7.5757575757575764e-06, |
|
"loss": 1.4262, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 1.1655, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.8214, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.0303030303030306e-05, |
|
"loss": 0.5529, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.787878787878788e-05, |
|
"loss": 0.4809, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.3331, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.966329966329967e-05, |
|
"loss": 0.2898, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.882154882154882e-05, |
|
"loss": 0.2294, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.797979797979798e-05, |
|
"loss": 0.2239, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.713804713804714e-05, |
|
"loss": 0.2285, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.1986, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.2205, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.4612794612794616e-05, |
|
"loss": 0.2039, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.3771043771043774e-05, |
|
"loss": 0.1516, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.292929292929293e-05, |
|
"loss": 0.1656, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.208754208754209e-05, |
|
"loss": 0.2107, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.124579124579125e-05, |
|
"loss": 0.1502, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.0404040404040405e-05, |
|
"loss": 0.1566, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.956228956228956e-05, |
|
"loss": 0.1468, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.872053872053872e-05, |
|
"loss": 0.1507, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.787878787878788e-05, |
|
"loss": 0.1334, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.1248, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9767145135566189, |
|
"eval_f1": 0.9767145135566189, |
|
"eval_loss": 0.060983214527368546, |
|
"eval_precision": 0.9767145135566189, |
|
"eval_recall": 0.9767145135566189, |
|
"eval_runtime": 420.9001, |
|
"eval_samples_per_second": 7.448, |
|
"eval_steps_per_second": 0.233, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.61952861952862e-05, |
|
"loss": 0.1459, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.535353535353535e-05, |
|
"loss": 0.1481, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.451178451178451e-05, |
|
"loss": 0.1443, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 3.3670033670033675e-05, |
|
"loss": 0.1318, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 3.282828282828283e-05, |
|
"loss": 0.1296, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 3.198653198653199e-05, |
|
"loss": 0.094, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.114478114478115e-05, |
|
"loss": 0.096, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.0303030303030306e-05, |
|
"loss": 0.1289, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 2.946127946127946e-05, |
|
"loss": 0.0931, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.8619528619528618e-05, |
|
"loss": 0.1031, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.0982, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.6936026936026937e-05, |
|
"loss": 0.11, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 2.6094276094276095e-05, |
|
"loss": 0.0974, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.5252525252525256e-05, |
|
"loss": 0.0739, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 2.441077441077441e-05, |
|
"loss": 0.0712, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.356902356902357e-05, |
|
"loss": 0.1139, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.0882, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.1885521885521887e-05, |
|
"loss": 0.1168, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 2.1043771043771045e-05, |
|
"loss": 0.0851, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 2.0202020202020203e-05, |
|
"loss": 0.0816, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.936026936026936e-05, |
|
"loss": 0.0786, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.0887, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9920255183413078, |
|
"eval_f1": 0.9920255183413078, |
|
"eval_loss": 0.03002375364303589, |
|
"eval_precision": 0.9920255183413078, |
|
"eval_recall": 0.9920255183413078, |
|
"eval_runtime": 377.2212, |
|
"eval_samples_per_second": 8.311, |
|
"eval_steps_per_second": 0.26, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.7676767676767676e-05, |
|
"loss": 0.0823, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.6835016835016837e-05, |
|
"loss": 0.0786, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.5993265993265995e-05, |
|
"loss": 0.0641, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.0808, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.4309764309764309e-05, |
|
"loss": 0.0761, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.3468013468013468e-05, |
|
"loss": 0.0761, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 1.2626262626262628e-05, |
|
"loss": 0.0721, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.1784511784511786e-05, |
|
"loss": 0.0712, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.0942760942760944e-05, |
|
"loss": 0.0659, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.0101010101010101e-05, |
|
"loss": 0.0699, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.0569, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 8.417508417508419e-06, |
|
"loss": 0.0582, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 7.5757575757575764e-06, |
|
"loss": 0.0712, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 6.734006734006734e-06, |
|
"loss": 0.0586, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.892255892255893e-06, |
|
"loss": 0.0751, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 5.050505050505051e-06, |
|
"loss": 0.052, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.208754208754209e-06, |
|
"loss": 0.0764, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 3.367003367003367e-06, |
|
"loss": 0.0568, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 2.5252525252525253e-06, |
|
"loss": 0.0573, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.6835016835016836e-06, |
|
"loss": 0.052, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 8.417508417508418e-07, |
|
"loss": 0.0571, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0449, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9936204146730463, |
|
"eval_f1": 0.9936204146730463, |
|
"eval_loss": 0.022031184285879135, |
|
"eval_precision": 0.9936204146730463, |
|
"eval_recall": 0.9936204146730463, |
|
"eval_runtime": 377.6762, |
|
"eval_samples_per_second": 8.301, |
|
"eval_steps_per_second": 0.259, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 660, |
|
"total_flos": 6.627407168486523e+18, |
|
"train_loss": 0.17479625046253205, |
|
"train_runtime": 48488.9343, |
|
"train_samples_per_second": 1.746, |
|
"train_steps_per_second": 0.014 |
|
} |
|
], |
|
"max_steps": 660, |
|
"num_train_epochs": 3, |
|
"total_flos": 6.627407168486523e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|