ARtOrias11's picture
End of training
cc43fa0 verified
raw
history blame
6.48 kB
{
"best_metric": 0.9737037037037037,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-285",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 285,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10526315789473684,
"grad_norm": 5.289836406707764,
"learning_rate": 1.7241379310344828e-05,
"loss": 2.2434,
"step": 10
},
{
"epoch": 0.21052631578947367,
"grad_norm": 5.927619457244873,
"learning_rate": 3.4482758620689657e-05,
"loss": 1.7979,
"step": 20
},
{
"epoch": 0.3157894736842105,
"grad_norm": 8.896160125732422,
"learning_rate": 4.9804687500000004e-05,
"loss": 1.032,
"step": 30
},
{
"epoch": 0.42105263157894735,
"grad_norm": 22.719310760498047,
"learning_rate": 4.78515625e-05,
"loss": 0.5576,
"step": 40
},
{
"epoch": 0.5263157894736842,
"grad_norm": 13.316530227661133,
"learning_rate": 4.58984375e-05,
"loss": 0.4798,
"step": 50
},
{
"epoch": 0.631578947368421,
"grad_norm": 13.046772003173828,
"learning_rate": 4.3945312500000005e-05,
"loss": 0.3895,
"step": 60
},
{
"epoch": 0.7368421052631579,
"grad_norm": 10.398356437683105,
"learning_rate": 4.1992187500000003e-05,
"loss": 0.342,
"step": 70
},
{
"epoch": 0.8421052631578947,
"grad_norm": 11.918193817138672,
"learning_rate": 4.00390625e-05,
"loss": 0.3263,
"step": 80
},
{
"epoch": 0.9473684210526315,
"grad_norm": 7.351226329803467,
"learning_rate": 3.80859375e-05,
"loss": 0.281,
"step": 90
},
{
"epoch": 1.0,
"eval_accuracy": 0.9562962962962963,
"eval_loss": 0.15288086235523224,
"eval_runtime": 19.6267,
"eval_samples_per_second": 137.568,
"eval_steps_per_second": 2.191,
"step": 95
},
{
"epoch": 1.0526315789473684,
"grad_norm": 8.774168968200684,
"learning_rate": 3.6132812500000005e-05,
"loss": 0.2871,
"step": 100
},
{
"epoch": 1.1578947368421053,
"grad_norm": 8.237377166748047,
"learning_rate": 3.41796875e-05,
"loss": 0.2664,
"step": 110
},
{
"epoch": 1.263157894736842,
"grad_norm": 12.695280075073242,
"learning_rate": 3.22265625e-05,
"loss": 0.2378,
"step": 120
},
{
"epoch": 1.368421052631579,
"grad_norm": 8.310792922973633,
"learning_rate": 3.02734375e-05,
"loss": 0.2349,
"step": 130
},
{
"epoch": 1.4736842105263157,
"grad_norm": 10.032830238342285,
"learning_rate": 2.83203125e-05,
"loss": 0.2248,
"step": 140
},
{
"epoch": 1.5789473684210527,
"grad_norm": 7.90118932723999,
"learning_rate": 2.63671875e-05,
"loss": 0.2076,
"step": 150
},
{
"epoch": 1.6842105263157894,
"grad_norm": 15.789655685424805,
"learning_rate": 2.44140625e-05,
"loss": 0.1953,
"step": 160
},
{
"epoch": 1.7894736842105263,
"grad_norm": 8.423171043395996,
"learning_rate": 2.24609375e-05,
"loss": 0.1949,
"step": 170
},
{
"epoch": 1.8947368421052633,
"grad_norm": 10.328082084655762,
"learning_rate": 2.05078125e-05,
"loss": 0.1943,
"step": 180
},
{
"epoch": 2.0,
"grad_norm": 14.849194526672363,
"learning_rate": 1.85546875e-05,
"loss": 0.1976,
"step": 190
},
{
"epoch": 2.0,
"eval_accuracy": 0.9722222222222222,
"eval_loss": 0.09291564673185349,
"eval_runtime": 13.9085,
"eval_samples_per_second": 194.125,
"eval_steps_per_second": 3.092,
"step": 190
},
{
"epoch": 2.1052631578947367,
"grad_norm": 9.324716567993164,
"learning_rate": 1.66015625e-05,
"loss": 0.1966,
"step": 200
},
{
"epoch": 2.2105263157894735,
"grad_norm": 8.505390167236328,
"learning_rate": 1.4648437500000001e-05,
"loss": 0.2039,
"step": 210
},
{
"epoch": 2.3157894736842106,
"grad_norm": 6.697166919708252,
"learning_rate": 1.2695312500000001e-05,
"loss": 0.1835,
"step": 220
},
{
"epoch": 2.4210526315789473,
"grad_norm": 14.402400016784668,
"learning_rate": 1.0742187500000001e-05,
"loss": 0.1704,
"step": 230
},
{
"epoch": 2.526315789473684,
"grad_norm": 8.63730239868164,
"learning_rate": 8.789062500000001e-06,
"loss": 0.1799,
"step": 240
},
{
"epoch": 2.6315789473684212,
"grad_norm": 6.670950412750244,
"learning_rate": 6.8359375e-06,
"loss": 0.1442,
"step": 250
},
{
"epoch": 2.736842105263158,
"grad_norm": 8.355303764343262,
"learning_rate": 4.8828125e-06,
"loss": 0.1708,
"step": 260
},
{
"epoch": 2.8421052631578947,
"grad_norm": 6.931336879730225,
"learning_rate": 2.9296875e-06,
"loss": 0.1881,
"step": 270
},
{
"epoch": 2.9473684210526314,
"grad_norm": 6.851834297180176,
"learning_rate": 9.765625e-07,
"loss": 0.1805,
"step": 280
},
{
"epoch": 3.0,
"eval_accuracy": 0.9737037037037037,
"eval_loss": 0.07975314557552338,
"eval_runtime": 14.0525,
"eval_samples_per_second": 192.136,
"eval_steps_per_second": 3.06,
"step": 285
},
{
"epoch": 3.0,
"step": 285,
"total_flos": 1.8124066505760768e+18,
"train_loss": 0.39992518445901704,
"train_runtime": 819.9071,
"train_samples_per_second": 88.913,
"train_steps_per_second": 0.348
}
],
"logging_steps": 10,
"max_steps": 285,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8124066505760768e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}