|
{ |
|
"best_metric": 0.8314176245210728, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-vit0/checkpoint-296", |
|
"epoch": 19.45945945945946, |
|
"eval_steps": 500, |
|
"global_step": 360, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 1.13, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_accuracy": 0.4329501915708812, |
|
"eval_loss": 1.0297268629074097, |
|
"eval_runtime": 147.8115, |
|
"eval_samples_per_second": 1.766, |
|
"eval_steps_per_second": 0.061, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 1.0527, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.9066, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6590038314176245, |
|
"eval_loss": 0.8349204659461975, |
|
"eval_runtime": 13.2219, |
|
"eval_samples_per_second": 19.74, |
|
"eval_steps_per_second": 0.681, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 4.938271604938271e-05, |
|
"loss": 0.8065, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.783950617283951e-05, |
|
"loss": 0.7157, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_accuracy": 0.6743295019157088, |
|
"eval_loss": 0.8049836754798889, |
|
"eval_runtime": 13.1267, |
|
"eval_samples_per_second": 19.883, |
|
"eval_steps_per_second": 0.686, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.6831, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 4.4753086419753084e-05, |
|
"loss": 0.6446, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7164750957854407, |
|
"eval_loss": 0.6934032440185547, |
|
"eval_runtime": 13.1723, |
|
"eval_samples_per_second": 19.814, |
|
"eval_steps_per_second": 0.683, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 4.3209876543209875e-05, |
|
"loss": 0.5756, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.5707, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"eval_accuracy": 0.7432950191570882, |
|
"eval_loss": 0.6323675513267517, |
|
"eval_runtime": 13.1405, |
|
"eval_samples_per_second": 19.862, |
|
"eval_steps_per_second": 0.685, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 4.012345679012346e-05, |
|
"loss": 0.5341, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 3.8580246913580246e-05, |
|
"loss": 0.5042, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.735632183908046, |
|
"eval_loss": 0.6155727505683899, |
|
"eval_runtime": 12.7114, |
|
"eval_samples_per_second": 20.533, |
|
"eval_steps_per_second": 0.708, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.4714, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"eval_accuracy": 0.7241379310344828, |
|
"eval_loss": 0.6825360655784607, |
|
"eval_runtime": 13.3632, |
|
"eval_samples_per_second": 19.531, |
|
"eval_steps_per_second": 0.673, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 3.5493827160493834e-05, |
|
"loss": 0.4471, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"learning_rate": 3.395061728395062e-05, |
|
"loss": 0.4225, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7624521072796935, |
|
"eval_loss": 0.5692493915557861, |
|
"eval_runtime": 12.4031, |
|
"eval_samples_per_second": 21.043, |
|
"eval_steps_per_second": 0.726, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 3.240740740740741e-05, |
|
"loss": 0.4066, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 3.08641975308642e-05, |
|
"loss": 0.3912, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"eval_accuracy": 0.7586206896551724, |
|
"eval_loss": 0.615011990070343, |
|
"eval_runtime": 13.0398, |
|
"eval_samples_per_second": 20.016, |
|
"eval_steps_per_second": 0.69, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 9.19, |
|
"learning_rate": 2.9320987654320992e-05, |
|
"loss": 0.3342, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.3442, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8007662835249042, |
|
"eval_loss": 0.4901227355003357, |
|
"eval_runtime": 12.6622, |
|
"eval_samples_per_second": 20.613, |
|
"eval_steps_per_second": 0.711, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"learning_rate": 2.623456790123457e-05, |
|
"loss": 0.3381, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 10.81, |
|
"learning_rate": 2.4691358024691357e-05, |
|
"loss": 0.289, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 10.97, |
|
"eval_accuracy": 0.7739463601532567, |
|
"eval_loss": 0.5579754114151001, |
|
"eval_runtime": 13.0103, |
|
"eval_samples_per_second": 20.061, |
|
"eval_steps_per_second": 0.692, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 11.35, |
|
"learning_rate": 2.314814814814815e-05, |
|
"loss": 0.2695, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"learning_rate": 2.1604938271604937e-05, |
|
"loss": 0.2827, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.7969348659003831, |
|
"eval_loss": 0.5307646989822388, |
|
"eval_runtime": 12.5136, |
|
"eval_samples_per_second": 20.857, |
|
"eval_steps_per_second": 0.719, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 12.43, |
|
"learning_rate": 2.006172839506173e-05, |
|
"loss": 0.2516, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 12.97, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.2375, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 12.97, |
|
"eval_accuracy": 0.8045977011494253, |
|
"eval_loss": 0.5273889899253845, |
|
"eval_runtime": 12.9936, |
|
"eval_samples_per_second": 20.087, |
|
"eval_steps_per_second": 0.693, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"learning_rate": 1.697530864197531e-05, |
|
"loss": 0.2493, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.8045977011494253, |
|
"eval_loss": 0.5433180332183838, |
|
"eval_runtime": 12.7166, |
|
"eval_samples_per_second": 20.524, |
|
"eval_steps_per_second": 0.708, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 14.05, |
|
"learning_rate": 1.54320987654321e-05, |
|
"loss": 0.2301, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 14.59, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.2309, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 14.97, |
|
"eval_accuracy": 0.7931034482758621, |
|
"eval_loss": 0.5354548692703247, |
|
"eval_runtime": 13.0668, |
|
"eval_samples_per_second": 19.974, |
|
"eval_steps_per_second": 0.689, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"learning_rate": 1.2345679012345678e-05, |
|
"loss": 0.2187, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 15.68, |
|
"learning_rate": 1.0802469135802469e-05, |
|
"loss": 0.1963, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.8314176245210728, |
|
"eval_loss": 0.483632355928421, |
|
"eval_runtime": 13.0481, |
|
"eval_samples_per_second": 20.003, |
|
"eval_steps_per_second": 0.69, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 16.22, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.2209, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 16.76, |
|
"learning_rate": 7.71604938271605e-06, |
|
"loss": 0.2162, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 16.97, |
|
"eval_accuracy": 0.8237547892720306, |
|
"eval_loss": 0.4972710609436035, |
|
"eval_runtime": 13.0793, |
|
"eval_samples_per_second": 19.955, |
|
"eval_steps_per_second": 0.688, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 17.3, |
|
"learning_rate": 6.172839506172839e-06, |
|
"loss": 0.1948, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 17.84, |
|
"learning_rate": 4.6296296296296296e-06, |
|
"loss": 0.2256, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.8275862068965517, |
|
"eval_loss": 0.49177083373069763, |
|
"eval_runtime": 13.1863, |
|
"eval_samples_per_second": 19.793, |
|
"eval_steps_per_second": 0.683, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 18.38, |
|
"learning_rate": 3.0864197530864196e-06, |
|
"loss": 0.2093, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 18.92, |
|
"learning_rate": 1.5432098765432098e-06, |
|
"loss": 0.2124, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 18.97, |
|
"eval_accuracy": 0.8160919540229885, |
|
"eval_loss": 0.5070743560791016, |
|
"eval_runtime": 13.1883, |
|
"eval_samples_per_second": 19.79, |
|
"eval_steps_per_second": 0.682, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 19.46, |
|
"learning_rate": 0.0, |
|
"loss": 0.1797, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 19.46, |
|
"eval_accuracy": 0.8199233716475096, |
|
"eval_loss": 0.4984593987464905, |
|
"eval_runtime": 12.9999, |
|
"eval_samples_per_second": 20.077, |
|
"eval_steps_per_second": 0.692, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 19.46, |
|
"step": 360, |
|
"total_flos": 1.133589052022999e+18, |
|
"train_loss": 0.422045146756702, |
|
"train_runtime": 3802.191, |
|
"train_samples_per_second": 12.324, |
|
"train_steps_per_second": 0.095 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 360, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 1.133589052022999e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|