|
import json |
|
import os |
|
|
|
import datasets |
|
from datasets.tasks import ImageClassification |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """""" |
|
|
|
_DESCRIPTION = """\ |
|
Ornithoscope dataset is the dataset used to train the model for the Ornithoscope project. |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_FOLDERS = [ |
|
'iNatv1', |
|
'iNatv2', |
|
'PhotoFeederv1/task_05-01-2021', |
|
] |
|
|
|
|
|
class OrnithoscopeConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Ornithoscope.""" |
|
|
|
def __init__( |
|
self, |
|
classes: list[str], |
|
train_json: str, |
|
validation_json: str, |
|
test_json: str, |
|
**kwargs |
|
): |
|
"""BuilderConfig for Ornithoscope. |
|
|
|
Args: |
|
classes: list of classes. |
|
train_json: path to the json file containing the train annotations. |
|
validation_json: path to the json file containing the validation annotations. |
|
test_json: path to the json file containing the test annotations. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.classes = classes |
|
|
|
|
|
class Ornithoscope(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [ |
|
OrnithoscopeConfig( |
|
name="DS3", |
|
description="The main dataset.", |
|
classes=[], |
|
train_json="sets/DS3_train.json", |
|
validation_json="sets/DS3_val.json", |
|
test_json="sets/DS3_test.json", |
|
), |
|
] |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label": datasets.ClassLabel(names=self.config.classes), |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
task_templates=[ImageClassification( |
|
image_column="image", |
|
label_column="label", |
|
)], |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]: |
|
"""Returns SplitGenerators.""" |
|
archives = self._get_archives(dl_manager) |
|
|
|
|
|
train_json = json.load( |
|
open(dl_manager.download_and_extract(self.config.train_json), 'r')) |
|
train_vals = [] |
|
for id_path, value in train_json.items(): |
|
root, file = os.path.split(id_path) |
|
path = os.path.join(archives[root], file) |
|
val = { |
|
"id_path": id_path, |
|
"path": path, |
|
"boxes": value['boxes'], |
|
"size": value['size'], |
|
} |
|
train_vals.append(val) |
|
|
|
|
|
validation_json = json.load( |
|
open(dl_manager.download_and_extract(self.config.validation_json), 'r')) |
|
validation_vals = [] |
|
for id_path, value in validation_json.items(): |
|
root, file = os.path.split(id_path) |
|
path = os.path.join(archives[root], file) |
|
val = { |
|
"id_path": id_path, |
|
"path": path, |
|
"boxes": value['boxes'], |
|
"size": value['size'], |
|
} |
|
validation_vals.append(val) |
|
|
|
|
|
test_json = json.load( |
|
open(dl_manager.download_and_extract(self.config.test_json), 'r')) |
|
test_vals = [] |
|
for id_path, value in test_json.item(): |
|
root, file = os.path.split(id_path) |
|
path = os.path.join(archives[root], file) |
|
val = { |
|
"id_path": id_path, |
|
"path": path, |
|
"boxes": value['boxes'], |
|
"size": value['size'], |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"values": train_vals, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"values": validation_vals, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"values": test_vals, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, values: list) -> tuple: |
|
"""Yields examples.""" |
|
idx = 0 |
|
for val in values: |
|
example = { |
|
"id_path": val["id_path"], |
|
"path": val["path"], |
|
"boxes": val["boxes"], |
|
"size": val["size"], |
|
} |
|
yield idx, example |
|
idx += 1 |
|
|
|
def _get_archives(self, dl_manager: datasets.DownloadManager) -> dict: |
|
"""Get the archives containing the images.""" |
|
archives = {} |
|
for folder in _FOLDERS: |
|
i = 0 |
|
while True: |
|
try: |
|
archives[folder] = dl_manager.download_and_extract( |
|
f'data/{folder}.tar' |
|
) |
|
i += 1 |
|
except: |
|
break |
|
return archives |
|
|