Binou commited on
Commit
3416ab1
1 Parent(s): dcf30c5

🍻 cheers

Browse files
Files changed (4) hide show
  1. README.md +3 -2
  2. all_results.json +5 -5
  3. train_results.json +5 -5
  4. trainer_state.json +251 -26
README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: other
3
  base_model: apple/mobilevit-xx-small
4
  tags:
 
5
  - generated_from_trainer
6
  datasets:
7
  - imagefolder
@@ -14,7 +15,7 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: imagefolder
18
  type: imagefolder
19
  config: default
20
  split: train
@@ -30,7 +31,7 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # vit-base-plankton
32
 
33
- This model is a fine-tuned version of [apple/mobilevit-xx-small](https://huggingface.co/apple/mobilevit-xx-small) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 0.7642
36
  - Accuracy: 0.8051
 
2
  license: other
3
  base_model: apple/mobilevit-xx-small
4
  tags:
5
+ - image-classification
6
  - generated_from_trainer
7
  datasets:
8
  - imagefolder
 
15
  name: Image Classification
16
  type: image-classification
17
  dataset:
18
+ name: plankton_fairscope
19
  type: imagefolder
20
  config: default
21
  split: train
 
31
 
32
  # vit-base-plankton
33
 
34
+ This model is a fine-tuned version of [apple/mobilevit-xx-small](https://huggingface.co/apple/mobilevit-xx-small) on the plankton_fairscope dataset.
35
  It achieves the following results on the evaluation set:
36
  - Loss: 0.7642
37
  - Accuracy: 0.8051
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
- "total_flos": 6.762570523759411e+16,
4
- "train_loss": 2.928572859082903,
5
- "train_runtime": 1229.5876,
6
- "train_samples_per_second": 0.709,
7
- "train_steps_per_second": 0.046
8
  }
 
1
  {
2
  "epoch": 2.0,
3
+ "total_flos": 6911768078843904.0,
4
+ "train_loss": 1.2305065219600995,
5
+ "train_runtime": 2179.2388,
6
+ "train_samples_per_second": 2.815,
7
+ "train_steps_per_second": 0.176
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
- "total_flos": 6.762570523759411e+16,
4
- "train_loss": 2.928572859082903,
5
- "train_runtime": 1229.5876,
6
- "train_samples_per_second": 0.709,
7
- "train_steps_per_second": 0.046
8
  }
 
1
  {
2
  "epoch": 2.0,
3
+ "total_flos": 6911768078843904.0,
4
+ "train_loss": 1.2305065219600995,
5
+ "train_runtime": 2179.2388,
6
+ "train_samples_per_second": 2.815,
7
+ "train_steps_per_second": 0.176
8
  }
trainer_state.json CHANGED
@@ -1,58 +1,283 @@
1
  {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
  "epoch": 2.0,
5
  "eval_steps": 100,
6
- "global_step": 56,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.36,
13
- "learning_rate": 0.00016428571428571428,
14
- "loss": 3.6178,
15
  "step": 10
16
  },
17
  {
18
- "epoch": 0.71,
19
- "learning_rate": 0.00012857142857142858,
20
- "loss": 3.2104,
21
  "step": 20
22
  },
23
  {
24
- "epoch": 1.07,
25
- "learning_rate": 9.285714285714286e-05,
26
- "loss": 2.8772,
27
  "step": 30
28
  },
29
  {
30
- "epoch": 1.43,
31
- "learning_rate": 5.714285714285714e-05,
32
- "loss": 2.6572,
33
  "step": 40
34
  },
35
  {
36
- "epoch": 1.79,
37
- "learning_rate": 2.1428571428571428e-05,
38
- "loss": 2.554,
39
  "step": 50
40
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  {
42
  "epoch": 2.0,
43
- "step": 56,
44
- "total_flos": 6.762570523759411e+16,
45
- "train_loss": 2.928572859082903,
46
- "train_runtime": 1229.5876,
47
- "train_samples_per_second": 0.709,
48
- "train_steps_per_second": 0.046
49
  }
50
  ],
51
  "logging_steps": 10,
52
- "max_steps": 56,
53
  "num_train_epochs": 2,
54
  "save_steps": 100,
55
- "total_flos": 6.762570523759411e+16,
56
  "trial_name": null,
57
  "trial_params": null
58
  }
 
1
  {
2
+ "best_metric": 0.7642104625701904,
3
+ "best_model_checkpoint": "./vit-base-plankton/checkpoint-300",
4
  "epoch": 2.0,
5
  "eval_steps": 100,
6
+ "global_step": 384,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.05,
13
+ "learning_rate": 0.00019479166666666668,
14
+ "loss": 2.4716,
15
  "step": 10
16
  },
17
  {
18
+ "epoch": 0.1,
19
+ "learning_rate": 0.00018958333333333332,
20
+ "loss": 2.296,
21
  "step": 20
22
  },
23
  {
24
+ "epoch": 0.16,
25
+ "learning_rate": 0.000184375,
26
+ "loss": 2.1599,
27
  "step": 30
28
  },
29
  {
30
+ "epoch": 0.21,
31
+ "learning_rate": 0.0001791666666666667,
32
+ "loss": 1.9804,
33
  "step": 40
34
  },
35
  {
36
+ "epoch": 0.26,
37
+ "learning_rate": 0.00017395833333333334,
38
+ "loss": 1.7886,
39
  "step": 50
40
  },
41
+ {
42
+ "epoch": 0.31,
43
+ "learning_rate": 0.00016875,
44
+ "loss": 1.7123,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.36,
49
+ "learning_rate": 0.00016354166666666668,
50
+ "loss": 1.6042,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.42,
55
+ "learning_rate": 0.00015833333333333332,
56
+ "loss": 1.6366,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 0.47,
61
+ "learning_rate": 0.000153125,
62
+ "loss": 1.5387,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 0.52,
67
+ "learning_rate": 0.0001479166666666667,
68
+ "loss": 1.5476,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.52,
73
+ "eval_accuracy": 0.741851368970013,
74
+ "eval_loss": 1.2744733095169067,
75
+ "eval_runtime": 213.9361,
76
+ "eval_samples_per_second": 14.341,
77
+ "eval_steps_per_second": 1.795,
78
+ "step": 100
79
+ },
80
+ {
81
+ "epoch": 0.57,
82
+ "learning_rate": 0.00014270833333333334,
83
+ "loss": 1.3298,
84
+ "step": 110
85
+ },
86
+ {
87
+ "epoch": 0.62,
88
+ "learning_rate": 0.0001375,
89
+ "loss": 1.19,
90
+ "step": 120
91
+ },
92
+ {
93
+ "epoch": 0.68,
94
+ "learning_rate": 0.00013229166666666668,
95
+ "loss": 1.0872,
96
+ "step": 130
97
+ },
98
+ {
99
+ "epoch": 0.73,
100
+ "learning_rate": 0.00012708333333333332,
101
+ "loss": 1.1765,
102
+ "step": 140
103
+ },
104
+ {
105
+ "epoch": 0.78,
106
+ "learning_rate": 0.00012187500000000001,
107
+ "loss": 1.1879,
108
+ "step": 150
109
+ },
110
+ {
111
+ "epoch": 0.83,
112
+ "learning_rate": 0.00011666666666666668,
113
+ "loss": 1.0871,
114
+ "step": 160
115
+ },
116
+ {
117
+ "epoch": 0.89,
118
+ "learning_rate": 0.00011145833333333334,
119
+ "loss": 1.1028,
120
+ "step": 170
121
+ },
122
+ {
123
+ "epoch": 0.94,
124
+ "learning_rate": 0.00010625000000000001,
125
+ "loss": 1.1018,
126
+ "step": 180
127
+ },
128
+ {
129
+ "epoch": 0.99,
130
+ "learning_rate": 0.00010104166666666668,
131
+ "loss": 1.1769,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 1.04,
136
+ "learning_rate": 9.583333333333334e-05,
137
+ "loss": 1.0997,
138
+ "step": 200
139
+ },
140
+ {
141
+ "epoch": 1.04,
142
+ "eval_accuracy": 0.7842242503259452,
143
+ "eval_loss": 0.8653025031089783,
144
+ "eval_runtime": 205.776,
145
+ "eval_samples_per_second": 14.909,
146
+ "eval_steps_per_second": 1.866,
147
+ "step": 200
148
+ },
149
+ {
150
+ "epoch": 1.09,
151
+ "learning_rate": 9.062500000000001e-05,
152
+ "loss": 0.9956,
153
+ "step": 210
154
+ },
155
+ {
156
+ "epoch": 1.15,
157
+ "learning_rate": 8.541666666666666e-05,
158
+ "loss": 1.0083,
159
+ "step": 220
160
+ },
161
+ {
162
+ "epoch": 1.2,
163
+ "learning_rate": 8.020833333333334e-05,
164
+ "loss": 0.9768,
165
+ "step": 230
166
+ },
167
+ {
168
+ "epoch": 1.25,
169
+ "learning_rate": 7.500000000000001e-05,
170
+ "loss": 0.965,
171
+ "step": 240
172
+ },
173
+ {
174
+ "epoch": 1.3,
175
+ "learning_rate": 6.979166666666666e-05,
176
+ "loss": 0.9682,
177
+ "step": 250
178
+ },
179
+ {
180
+ "epoch": 1.35,
181
+ "learning_rate": 6.458333333333334e-05,
182
+ "loss": 0.9361,
183
+ "step": 260
184
+ },
185
+ {
186
+ "epoch": 1.41,
187
+ "learning_rate": 5.9375e-05,
188
+ "loss": 0.9819,
189
+ "step": 270
190
+ },
191
+ {
192
+ "epoch": 1.46,
193
+ "learning_rate": 5.4166666666666664e-05,
194
+ "loss": 0.8521,
195
+ "step": 280
196
+ },
197
+ {
198
+ "epoch": 1.51,
199
+ "learning_rate": 4.8958333333333335e-05,
200
+ "loss": 0.9011,
201
+ "step": 290
202
+ },
203
+ {
204
+ "epoch": 1.56,
205
+ "learning_rate": 4.375e-05,
206
+ "loss": 0.9498,
207
+ "step": 300
208
+ },
209
+ {
210
+ "epoch": 1.56,
211
+ "eval_accuracy": 0.8050847457627118,
212
+ "eval_loss": 0.7642104625701904,
213
+ "eval_runtime": 203.4405,
214
+ "eval_samples_per_second": 15.081,
215
+ "eval_steps_per_second": 1.888,
216
+ "step": 300
217
+ },
218
+ {
219
+ "epoch": 1.61,
220
+ "learning_rate": 3.854166666666667e-05,
221
+ "loss": 0.9927,
222
+ "step": 310
223
+ },
224
+ {
225
+ "epoch": 1.67,
226
+ "learning_rate": 3.3333333333333335e-05,
227
+ "loss": 0.8678,
228
+ "step": 320
229
+ },
230
+ {
231
+ "epoch": 1.72,
232
+ "learning_rate": 2.8125000000000003e-05,
233
+ "loss": 0.8756,
234
+ "step": 330
235
+ },
236
+ {
237
+ "epoch": 1.77,
238
+ "learning_rate": 2.2916666666666667e-05,
239
+ "loss": 0.8625,
240
+ "step": 340
241
+ },
242
+ {
243
+ "epoch": 1.82,
244
+ "learning_rate": 1.7708333333333335e-05,
245
+ "loss": 0.9351,
246
+ "step": 350
247
+ },
248
+ {
249
+ "epoch": 1.88,
250
+ "learning_rate": 1.25e-05,
251
+ "loss": 0.9369,
252
+ "step": 360
253
+ },
254
+ {
255
+ "epoch": 1.93,
256
+ "learning_rate": 7.2916666666666674e-06,
257
+ "loss": 0.9611,
258
+ "step": 370
259
+ },
260
+ {
261
+ "epoch": 1.98,
262
+ "learning_rate": 2.0833333333333334e-06,
263
+ "loss": 0.7286,
264
+ "step": 380
265
+ },
266
  {
267
  "epoch": 2.0,
268
+ "step": 384,
269
+ "total_flos": 6911768078843904.0,
270
+ "train_loss": 1.2305065219600995,
271
+ "train_runtime": 2179.2388,
272
+ "train_samples_per_second": 2.815,
273
+ "train_steps_per_second": 0.176
274
  }
275
  ],
276
  "logging_steps": 10,
277
+ "max_steps": 384,
278
  "num_train_epochs": 2,
279
  "save_steps": 100,
280
+ "total_flos": 6911768078843904.0,
281
  "trial_name": null,
282
  "trial_params": null
283
  }