nandyc commited on
Commit
b73d566
1 Parent(s): 4bb9f60

Model save

Browse files
Files changed (5) hide show
  1. README.md +68 -0
  2. config.json +109 -0
  3. preprocessor_config.json +23 -0
  4. pytorch_model.bin +3 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: microsoft/swin-tiny-patch4-window7-224
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: swin-tiny-patch4-window7-224-finetuned_ASL_Isolated_Swin_dataset2
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # swin-tiny-patch4-window7-224-finetuned_ASL_Isolated_Swin_dataset2
17
+
18
+ This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0702
21
+ - Accuracy: 0.9808
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0002
41
+ - train_batch_size: 16
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 10
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
+ | 1.39 | 1.09 | 100 | 1.1827 | 0.6346 |
53
+ | 0.8972 | 2.17 | 200 | 0.6287 | 0.7808 |
54
+ | 0.4884 | 3.26 | 300 | 0.2927 | 0.8962 |
55
+ | 0.4179 | 4.35 | 400 | 0.1795 | 0.9423 |
56
+ | 0.4169 | 5.43 | 500 | 0.1564 | 0.95 |
57
+ | 0.3427 | 6.52 | 600 | 0.1426 | 0.95 |
58
+ | 0.2939 | 7.61 | 700 | 0.1174 | 0.9731 |
59
+ | 0.1605 | 8.7 | 800 | 0.0640 | 0.9846 |
60
+ | 0.1865 | 9.78 | 900 | 0.0702 | 0.9808 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.34.0
66
+ - Pytorch 2.0.1+cu118
67
+ - Datasets 2.14.5
68
+ - Tokenizers 0.14.1
config.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/swin-tiny-patch4-window7-224",
3
+ "architectures": [
4
+ "SwinForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 6,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 96,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": "A",
21
+ "1": "B",
22
+ "2": "C",
23
+ "3": "D",
24
+ "4": "E",
25
+ "5": "F",
26
+ "6": "G",
27
+ "7": "H",
28
+ "8": "I",
29
+ "9": "J",
30
+ "10": "K",
31
+ "11": "L",
32
+ "12": "M",
33
+ "13": "N",
34
+ "14": "O",
35
+ "15": "P",
36
+ "16": "Q",
37
+ "17": "R",
38
+ "18": "S",
39
+ "19": "T",
40
+ "20": "U",
41
+ "21": "V",
42
+ "22": "W",
43
+ "23": "X",
44
+ "24": "Y",
45
+ "25": "Z"
46
+ },
47
+ "image_size": 224,
48
+ "initializer_range": 0.02,
49
+ "label2id": {
50
+ "A": 0,
51
+ "B": 1,
52
+ "C": 2,
53
+ "D": 3,
54
+ "E": 4,
55
+ "F": 5,
56
+ "G": 6,
57
+ "H": 7,
58
+ "I": 8,
59
+ "J": 9,
60
+ "K": 10,
61
+ "L": 11,
62
+ "M": 12,
63
+ "N": 13,
64
+ "O": 14,
65
+ "P": 15,
66
+ "Q": 16,
67
+ "R": 17,
68
+ "S": 18,
69
+ "T": 19,
70
+ "U": 20,
71
+ "V": 21,
72
+ "W": 22,
73
+ "X": 23,
74
+ "Y": 24,
75
+ "Z": 25
76
+ },
77
+ "layer_norm_eps": 1e-05,
78
+ "mlp_ratio": 4.0,
79
+ "model_type": "swin",
80
+ "num_channels": 3,
81
+ "num_heads": [
82
+ 3,
83
+ 6,
84
+ 12,
85
+ 24
86
+ ],
87
+ "num_layers": 4,
88
+ "out_features": [
89
+ "stage4"
90
+ ],
91
+ "out_indices": [
92
+ 4
93
+ ],
94
+ "patch_size": 4,
95
+ "path_norm": true,
96
+ "problem_type": "single_label_classification",
97
+ "qkv_bias": true,
98
+ "stage_names": [
99
+ "stem",
100
+ "stage1",
101
+ "stage2",
102
+ "stage3",
103
+ "stage4"
104
+ ],
105
+ "torch_dtype": "float32",
106
+ "transformers_version": "4.34.0",
107
+ "use_absolute_embeddings": false,
108
+ "window_size": 7
109
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "feature_extractor_type": "ViTFeatureExtractor",
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 3,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95b9a5a546e3a36077fe778eb8de4a0906aa9c8610793051a0c6665bbd7c96b2
3
+ size 110468657
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9208ca9886ddb8906775b14a01f07a62d8e65bd8610ca88450e54f1659cfa13
3
+ size 4155