add configs
Browse files- Caltech-101.py +28 -2
Caltech-101.py
CHANGED
@@ -156,6 +156,22 @@ class Caltech101(datasets.GeneratorBasedBuilder):
|
|
156 |
|
157 |
VERSION = datasets.Version("1.0.0")
|
158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
def _info(self):
|
160 |
return datasets.DatasetInfo(
|
161 |
description=_DESCRIPTION,
|
@@ -188,6 +204,7 @@ class Caltech101(datasets.GeneratorBasedBuilder):
|
|
188 |
gen_kwargs={
|
189 |
"filepath": data_dir,
|
190 |
"split": "train",
|
|
|
191 |
},
|
192 |
),
|
193 |
datasets.SplitGenerator(
|
@@ -195,13 +212,14 @@ class Caltech101(datasets.GeneratorBasedBuilder):
|
|
195 |
gen_kwargs={
|
196 |
"filepath": data_dir,
|
197 |
"split": "test",
|
|
|
198 |
},
|
199 |
),
|
200 |
]
|
201 |
|
202 |
-
def _generate_examples(self, filepath, split):
|
203 |
# Same stratagy as the one proposed in TF datasets: 30 random examples from each class are added to the train
|
204 |
-
# split, and the remainder are added to the test split.
|
205 |
# Source: https://github.com/tensorflow/datasets/blob/1106d587f97c4fca68c5b593dc7dc48c790ffa8c/tensorflow_datasets/image_classification/caltech.py#L88-L140
|
206 |
|
207 |
is_train_split = split == "train"
|
@@ -212,6 +230,7 @@ class Caltech101(datasets.GeneratorBasedBuilder):
|
|
212 |
np.random.seed(1234)
|
213 |
|
214 |
for class_dir in data_dir.iterdir():
|
|
|
215 |
fnames = [
|
216 |
image_path
|
217 |
for image_path in class_dir.iterdir()
|
@@ -231,6 +250,13 @@ class Caltech101(datasets.GeneratorBasedBuilder):
|
|
231 |
test_fnames = set(fnames).difference(train_fnames)
|
232 |
fnames_to_emit = train_fnames if is_train_split else test_fnames
|
233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
for image_file in fnames_to_emit:
|
235 |
record = {
|
236 |
"image": str(image_file),
|
|
|
156 |
|
157 |
VERSION = datasets.Version("1.0.0")
|
158 |
|
159 |
+
_BUILDER_CONFIG_WITH_BACKGROUND = datasets.BuilderConfig(
|
160 |
+
name="with_background_category",
|
161 |
+
version=VERSION,
|
162 |
+
description="Dataset containing only the 101 categories.",
|
163 |
+
)
|
164 |
+
_BUILDER_CONFIG_WITHOUT_BACKGROUND = datasets.BuilderConfig(
|
165 |
+
name="without_background_category",
|
166 |
+
version=VERSION,
|
167 |
+
description="Dataset containing the 101 categories and the additonnal background one.",
|
168 |
+
)
|
169 |
+
|
170 |
+
BUILDER_CONFIGS = [
|
171 |
+
_BUILDER_CONFIG_WITH_BACKGROUND,
|
172 |
+
_BUILDER_CONFIG_WITHOUT_BACKGROUND,
|
173 |
+
]
|
174 |
+
|
175 |
def _info(self):
|
176 |
return datasets.DatasetInfo(
|
177 |
description=_DESCRIPTION,
|
|
|
204 |
gen_kwargs={
|
205 |
"filepath": data_dir,
|
206 |
"split": "train",
|
207 |
+
"config_name": self.config.name,
|
208 |
},
|
209 |
),
|
210 |
datasets.SplitGenerator(
|
|
|
212 |
gen_kwargs={
|
213 |
"filepath": data_dir,
|
214 |
"split": "test",
|
215 |
+
"config_name": self.config.name,
|
216 |
},
|
217 |
),
|
218 |
]
|
219 |
|
220 |
+
def _generate_examples(self, filepath, split, config_name):
|
221 |
# Same stratagy as the one proposed in TF datasets: 30 random examples from each class are added to the train
|
222 |
+
# split, and the remainder are added to the test split.
|
223 |
# Source: https://github.com/tensorflow/datasets/blob/1106d587f97c4fca68c5b593dc7dc48c790ffa8c/tensorflow_datasets/image_classification/caltech.py#L88-L140
|
224 |
|
225 |
is_train_split = split == "train"
|
|
|
230 |
np.random.seed(1234)
|
231 |
|
232 |
for class_dir in data_dir.iterdir():
|
233 |
+
# print(class_dir)
|
234 |
fnames = [
|
235 |
image_path
|
236 |
for image_path in class_dir.iterdir()
|
|
|
250 |
test_fnames = set(fnames).difference(train_fnames)
|
251 |
fnames_to_emit = train_fnames if is_train_split else test_fnames
|
252 |
|
253 |
+
if (
|
254 |
+
class_dir.name == "BACKGROUND_Google"
|
255 |
+
and config_name == self._BUILDER_CONFIG_WITHOUT_BACKGROUND.name
|
256 |
+
):
|
257 |
+
print("skip BACKGROUND_Google")
|
258 |
+
continue
|
259 |
+
|
260 |
for image_file in fnames_to_emit:
|
261 |
record = {
|
262 |
"image": str(image_file),
|