|
{ |
|
"best_metric": 0.9612, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-cifar10/checkpoint-264", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 264, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 2.3155, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 2.0565, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.936708860759494e-05, |
|
"loss": 1.4845, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.725738396624473e-05, |
|
"loss": 1.0076, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.5147679324894514e-05, |
|
"loss": 0.8139, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.3037974683544305e-05, |
|
"loss": 0.7133, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.0928270042194096e-05, |
|
"loss": 0.6384, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.881856540084388e-05, |
|
"loss": 0.6141, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.931, |
|
"eval_loss": 0.20727095007896423, |
|
"eval_runtime": 32.7794, |
|
"eval_samples_per_second": 152.535, |
|
"eval_steps_per_second": 1.22, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.670886075949367e-05, |
|
"loss": 0.5816, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.459915611814346e-05, |
|
"loss": 0.5254, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 3.248945147679325e-05, |
|
"loss": 0.5173, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.0379746835443042e-05, |
|
"loss": 0.5545, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 2.8270042194092826e-05, |
|
"loss": 0.5089, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 2.616033755274262e-05, |
|
"loss": 0.4906, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 2.4050632911392405e-05, |
|
"loss": 0.4969, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.1940928270042196e-05, |
|
"loss": 0.471, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.9831223628691984e-05, |
|
"loss": 0.4767, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.959, |
|
"eval_loss": 0.13500115275382996, |
|
"eval_runtime": 33.1717, |
|
"eval_samples_per_second": 150.731, |
|
"eval_steps_per_second": 1.206, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.7721518987341772e-05, |
|
"loss": 0.424, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.5611814345991563e-05, |
|
"loss": 0.4367, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.3502109704641349e-05, |
|
"loss": 0.4228, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.139240506329114e-05, |
|
"loss": 0.4556, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 9.28270042194093e-06, |
|
"loss": 0.4401, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 7.1729957805907175e-06, |
|
"loss": 0.445, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 5.063291139240506e-06, |
|
"loss": 0.4204, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 2.9535864978902956e-06, |
|
"loss": 0.423, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 8.438818565400843e-07, |
|
"loss": 0.4153, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9612, |
|
"eval_loss": 0.12175925076007843, |
|
"eval_runtime": 32.5962, |
|
"eval_samples_per_second": 153.392, |
|
"eval_steps_per_second": 1.227, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 264, |
|
"total_flos": 3.35630861217792e+18, |
|
"train_loss": 0.6934980005025864, |
|
"train_runtime": 2145.9759, |
|
"train_samples_per_second": 62.908, |
|
"train_steps_per_second": 0.123 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 264, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3.35630861217792e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|