|
{ |
|
"best_metric": 0.9592, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-cifar10/checkpoint-264", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 264, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 2.3771, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 2.1143, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.936708860759494e-05, |
|
"loss": 1.5311, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.725738396624473e-05, |
|
"loss": 1.0618, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.5147679324894514e-05, |
|
"loss": 0.8814, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.3037974683544305e-05, |
|
"loss": 0.7527, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.0928270042194096e-05, |
|
"loss": 0.6469, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.881856540084388e-05, |
|
"loss": 0.6304, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.93, |
|
"eval_loss": 0.21966831386089325, |
|
"eval_runtime": 27.3393, |
|
"eval_samples_per_second": 182.887, |
|
"eval_steps_per_second": 1.463, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.670886075949367e-05, |
|
"loss": 0.5971, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.459915611814346e-05, |
|
"loss": 0.5672, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 3.248945147679325e-05, |
|
"loss": 0.5286, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.0379746835443042e-05, |
|
"loss": 0.5397, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 2.8270042194092826e-05, |
|
"loss": 0.5127, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 2.616033755274262e-05, |
|
"loss": 0.4767, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 2.4050632911392405e-05, |
|
"loss": 0.4775, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.1940928270042196e-05, |
|
"loss": 0.4686, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.9831223628691984e-05, |
|
"loss": 0.4802, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9558, |
|
"eval_loss": 0.14049997925758362, |
|
"eval_runtime": 27.3436, |
|
"eval_samples_per_second": 182.858, |
|
"eval_steps_per_second": 1.463, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.7721518987341772e-05, |
|
"loss": 0.4383, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.5611814345991563e-05, |
|
"loss": 0.4394, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.3502109704641349e-05, |
|
"loss": 0.4426, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.139240506329114e-05, |
|
"loss": 0.444, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 9.28270042194093e-06, |
|
"loss": 0.4171, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 7.1729957805907175e-06, |
|
"loss": 0.4329, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 5.063291139240506e-06, |
|
"loss": 0.4367, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 2.9535864978902956e-06, |
|
"loss": 0.4151, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 8.438818565400843e-07, |
|
"loss": 0.4114, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9592, |
|
"eval_loss": 0.1276138424873352, |
|
"eval_runtime": 27.1749, |
|
"eval_samples_per_second": 183.993, |
|
"eval_steps_per_second": 1.472, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 264, |
|
"total_flos": 3.35630861217792e+18, |
|
"train_loss": 0.7078906785358082, |
|
"train_runtime": 1898.5865, |
|
"train_samples_per_second": 71.106, |
|
"train_steps_per_second": 0.139 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 264, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3.35630861217792e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|