vishalkatheriya18 commited on
Commit
e7a1c4d
·
verified ·
1 Parent(s): 3eb8f6c

End of training

Browse files
README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/convnextv2-tiny-1k-224
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ metrics:
9
+ - accuracy
10
+ - precision
11
+ model-index:
12
+ - name: convnextv2-tiny-1k-224-finetuned-bottomwear-v2
13
+ results:
14
+ - task:
15
+ name: Image Classification
16
+ type: image-classification
17
+ dataset:
18
+ name: imagefolder
19
+ type: imagefolder
20
+ config: default
21
+ split: train
22
+ args: default
23
+ metrics:
24
+ - name: Accuracy
25
+ type: accuracy
26
+ value: 0.8981481481481481
27
+ - name: Precision
28
+ type: precision
29
+ value: 0.9001054377012231
30
+ ---
31
+
32
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
33
+ should probably proofread and complete it, then remove this comment. -->
34
+
35
+ # convnextv2-tiny-1k-224-finetuned-bottomwear-v2
36
+
37
+ This model is a fine-tuned version of [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) on the imagefolder dataset.
38
+ It achieves the following results on the evaluation set:
39
+ - Loss: 0.3267
40
+ - Accuracy: 0.8981
41
+ - Precision: 0.9001
42
+
43
+ ## Model description
44
+
45
+ More information needed
46
+
47
+ ## Intended uses & limitations
48
+
49
+ More information needed
50
+
51
+ ## Training and evaluation data
52
+
53
+ More information needed
54
+
55
+ ## Training procedure
56
+
57
+ ### Training hyperparameters
58
+
59
+ The following hyperparameters were used during training:
60
+ - learning_rate: 2e-05
61
+ - train_batch_size: 10
62
+ - eval_batch_size: 4
63
+ - seed: 42
64
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
65
+ - lr_scheduler_type: linear
66
+ - num_epochs: 100
67
+
68
+ ### Training results
69
+
70
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision |
71
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|
72
+ | No log | 1.0 | 87 | 1.3349 | 0.6852 | 0.7395 |
73
+ | No log | 2.0 | 174 | 0.7869 | 0.8426 | 0.8543 |
74
+ | No log | 3.0 | 261 | 0.6571 | 0.8472 | 0.8712 |
75
+ | No log | 4.0 | 348 | 0.4293 | 0.9028 | 0.9122 |
76
+ | No log | 5.0 | 435 | 0.4030 | 0.8935 | 0.8953 |
77
+ | 0.916 | 6.0 | 522 | 0.4251 | 0.8657 | 0.8787 |
78
+ | 0.916 | 7.0 | 609 | 0.3536 | 0.8889 | 0.8936 |
79
+ | 0.916 | 8.0 | 696 | 0.3611 | 0.8796 | 0.8833 |
80
+ | 0.916 | 9.0 | 783 | 0.3267 | 0.8981 | 0.9001 |
81
+ | 0.916 | 10.0 | 870 | 0.3526 | 0.8796 | 0.8972 |
82
+ | 0.916 | 11.0 | 957 | 0.3694 | 0.8981 | 0.9100 |
83
+ | 0.3192 | 12.0 | 1044 | 0.3694 | 0.8935 | 0.9007 |
84
+
85
+
86
+ ### Framework versions
87
+
88
+ - Transformers 4.44.0
89
+ - Pytorch 2.4.0
90
+ - Datasets 2.21.0
91
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 12.0,
3
+ "eval_accuracy": 0.8981481481481481,
4
+ "eval_loss": 0.32665541768074036,
5
+ "eval_precision": 0.9001054377012231,
6
+ "eval_runtime": 4.371,
7
+ "eval_samples_per_second": 49.417,
8
+ "eval_steps_per_second": 12.354
9
+ }
config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/convnextv2-tiny-1k-224",
3
+ "architectures": [
4
+ "ConvNextV2ForImageClassification"
5
+ ],
6
+ "depths": [
7
+ 3,
8
+ 3,
9
+ 9,
10
+ 3
11
+ ],
12
+ "drop_path_rate": 0.0,
13
+ "hidden_act": "gelu",
14
+ "hidden_sizes": [
15
+ 96,
16
+ 192,
17
+ 384,
18
+ 768
19
+ ],
20
+ "id2label": {
21
+ "0": "Joggers",
22
+ "1": "capri",
23
+ "2": "jeans",
24
+ "3": "legging",
25
+ "4": "plazzo",
26
+ "5": "salwar",
27
+ "6": "shorts",
28
+ "7": "skirt",
29
+ "8": "trouser"
30
+ },
31
+ "image_size": 224,
32
+ "initializer_range": 0.02,
33
+ "label2id": {
34
+ "Joggers": 0,
35
+ "capri": 1,
36
+ "jeans": 2,
37
+ "legging": 3,
38
+ "plazzo": 4,
39
+ "salwar": 5,
40
+ "shorts": 6,
41
+ "skirt": 7,
42
+ "trouser": 8
43
+ },
44
+ "layer_norm_eps": 1e-12,
45
+ "model_type": "convnextv2",
46
+ "num_channels": 3,
47
+ "num_stages": 4,
48
+ "out_features": [
49
+ "stage4"
50
+ ],
51
+ "out_indices": [
52
+ 4
53
+ ],
54
+ "patch_size": 4,
55
+ "problem_type": "single_label_classification",
56
+ "stage_names": [
57
+ "stem",
58
+ "stage1",
59
+ "stage2",
60
+ "stage3",
61
+ "stage4"
62
+ ],
63
+ "torch_dtype": "float32",
64
+ "transformers_version": "4.44.0"
65
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 12.0,
3
+ "eval_accuracy": 0.8981481481481481,
4
+ "eval_loss": 0.32665541768074036,
5
+ "eval_precision": 0.9001054377012231,
6
+ "eval_runtime": 4.371,
7
+ "eval_samples_per_second": 49.417,
8
+ "eval_steps_per_second": 12.354
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8121506c3a1457cd93405ef8f82b739b12732099a9525b0645b7dd3219052a2a
3
+ size 111517364
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_pct": 0.875,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "ConvNextImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 3,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "shortest_edge": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:139eb5ffb7e04f4d1eb688a1420f8202796bb4d137884794859258ade1da3950
3
+ size 5176