|
{
|
|
"best_metric": 0.8620689655172413,
|
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-noh\\checkpoint-92",
|
|
"epoch": 9.577777777777778,
|
|
"eval_steps": 500,
|
|
"global_step": 220,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.4444444444444444,
|
|
"grad_norm": 12.336226463317871,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.7914,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.8888888888888888,
|
|
"grad_norm": 8.176236152648926,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.5322,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.7619047619047619,
|
|
"eval_loss": 0.46465402841567993,
|
|
"eval_runtime": 64.6197,
|
|
"eval_samples_per_second": 9.424,
|
|
"eval_steps_per_second": 0.604,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 1.3111111111111111,
|
|
"grad_norm": 12.591184616088867,
|
|
"learning_rate": 4.797979797979798e-05,
|
|
"loss": 0.4748,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.7555555555555555,
|
|
"grad_norm": 11.170787811279297,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.4535,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.819376026272578,
|
|
"eval_loss": 0.4359256625175476,
|
|
"eval_runtime": 64.0615,
|
|
"eval_samples_per_second": 9.506,
|
|
"eval_steps_per_second": 0.609,
|
|
"step": 46
|
|
},
|
|
{
|
|
"epoch": 2.1777777777777776,
|
|
"grad_norm": 13.398783683776855,
|
|
"learning_rate": 4.292929292929293e-05,
|
|
"loss": 0.323,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.6222222222222222,
|
|
"grad_norm": 14.519952774047852,
|
|
"learning_rate": 4.0404040404040405e-05,
|
|
"loss": 0.3854,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.8538587848932676,
|
|
"eval_loss": 0.3514404892921448,
|
|
"eval_runtime": 64.0412,
|
|
"eval_samples_per_second": 9.51,
|
|
"eval_steps_per_second": 0.609,
|
|
"step": 69
|
|
},
|
|
{
|
|
"epoch": 3.0444444444444443,
|
|
"grad_norm": 9.444785118103027,
|
|
"learning_rate": 3.787878787878788e-05,
|
|
"loss": 0.3279,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 3.488888888888889,
|
|
"grad_norm": 25.448450088500977,
|
|
"learning_rate": 3.535353535353535e-05,
|
|
"loss": 0.3003,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 3.9333333333333336,
|
|
"grad_norm": 14.186548233032227,
|
|
"learning_rate": 3.282828282828283e-05,
|
|
"loss": 0.302,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.8620689655172413,
|
|
"eval_loss": 0.4349357783794403,
|
|
"eval_runtime": 64.2242,
|
|
"eval_samples_per_second": 9.482,
|
|
"eval_steps_per_second": 0.607,
|
|
"step": 92
|
|
},
|
|
{
|
|
"epoch": 4.355555555555555,
|
|
"grad_norm": 10.739775657653809,
|
|
"learning_rate": 3.0303030303030306e-05,
|
|
"loss": 0.2932,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 4.8,
|
|
"grad_norm": 25.793798446655273,
|
|
"learning_rate": 2.777777777777778e-05,
|
|
"loss": 0.2571,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.8095238095238095,
|
|
"eval_loss": 0.5112400054931641,
|
|
"eval_runtime": 64.0219,
|
|
"eval_samples_per_second": 9.512,
|
|
"eval_steps_per_second": 0.609,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 5.222222222222222,
|
|
"grad_norm": 14.213773727416992,
|
|
"learning_rate": 2.5252525252525256e-05,
|
|
"loss": 0.2101,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 5.666666666666667,
|
|
"grad_norm": 48.91055679321289,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.2104,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.825944170771757,
|
|
"eval_loss": 0.445329487323761,
|
|
"eval_runtime": 64.6759,
|
|
"eval_samples_per_second": 9.416,
|
|
"eval_steps_per_second": 0.603,
|
|
"step": 138
|
|
},
|
|
{
|
|
"epoch": 6.088888888888889,
|
|
"grad_norm": 17.380577087402344,
|
|
"learning_rate": 2.0202020202020203e-05,
|
|
"loss": 0.2352,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 6.533333333333333,
|
|
"grad_norm": 14.665043830871582,
|
|
"learning_rate": 1.7676767676767676e-05,
|
|
"loss": 0.2105,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 6.977777777777778,
|
|
"grad_norm": 16.076261520385742,
|
|
"learning_rate": 1.5151515151515153e-05,
|
|
"loss": 0.1702,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.7832512315270936,
|
|
"eval_loss": 0.554968535900116,
|
|
"eval_runtime": 64.6137,
|
|
"eval_samples_per_second": 9.425,
|
|
"eval_steps_per_second": 0.604,
|
|
"step": 161
|
|
},
|
|
{
|
|
"epoch": 7.4,
|
|
"grad_norm": 20.720272064208984,
|
|
"learning_rate": 1.2626262626262628e-05,
|
|
"loss": 0.1453,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 7.844444444444444,
|
|
"grad_norm": 15.213704109191895,
|
|
"learning_rate": 1.0101010101010101e-05,
|
|
"loss": 0.1682,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.7947454844006568,
|
|
"eval_loss": 0.5312520265579224,
|
|
"eval_runtime": 65.466,
|
|
"eval_samples_per_second": 9.303,
|
|
"eval_steps_per_second": 0.596,
|
|
"step": 184
|
|
},
|
|
{
|
|
"epoch": 8.266666666666667,
|
|
"grad_norm": 32.07905197143555,
|
|
"learning_rate": 7.5757575757575764e-06,
|
|
"loss": 0.123,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 8.71111111111111,
|
|
"grad_norm": 12.448338508605957,
|
|
"learning_rate": 5.050505050505051e-06,
|
|
"loss": 0.136,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_accuracy": 0.8275862068965517,
|
|
"eval_loss": 0.5452161431312561,
|
|
"eval_runtime": 64.3054,
|
|
"eval_samples_per_second": 9.47,
|
|
"eval_steps_per_second": 0.606,
|
|
"step": 207
|
|
},
|
|
{
|
|
"epoch": 9.133333333333333,
|
|
"grad_norm": 24.97603416442871,
|
|
"learning_rate": 2.5252525252525253e-06,
|
|
"loss": 0.1358,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"grad_norm": 27.814254760742188,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.1415,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"eval_accuracy": 0.8210180623973727,
|
|
"eval_loss": 0.5352054834365845,
|
|
"eval_runtime": 66.8384,
|
|
"eval_samples_per_second": 9.112,
|
|
"eval_steps_per_second": 0.583,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"step": 220,
|
|
"total_flos": 3.4259009619205325e+17,
|
|
"train_loss": 0.2875923281366175,
|
|
"train_runtime": 2347.1095,
|
|
"train_samples_per_second": 6.131,
|
|
"train_steps_per_second": 0.094
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 220,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.4259009619205325e+17,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|