|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """""" |
|
|
|
_DESCRIPTION = """\ |
|
Ornithoscope dataset is the dataset used to train the model for the Ornithoscope project. |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_FOLDERS = [ |
|
'iNatv1', |
|
'iNatv2/10069', |
|
'iNatv2/13851', |
|
'iNatv2/145303', |
|
'iNatv2/14850', |
|
'iNatv2/17871', |
|
'iNatv2/18911', |
|
'iNatv2/204496', |
|
'iNatv2/3017', |
|
'iNatv2/7278', |
|
'iNatv2/792985', |
|
'iNatv2/8088', |
|
'iNatv2/9398', |
|
'iNatv2/9801', |
|
'PhotoFeederv1/task_05-01-2021', |
|
'PhotoFeederv1/task_06-01-2021', |
|
'PhotoFeederv1/task_18-01-2021', |
|
'PhotoFeederv1/task_19-01-2021', |
|
'PhotoFeederv1/task_20210205', |
|
'PhotoFeederv1/task_20210217', |
|
'PhotoFeederv1/task_20210227', |
|
'PhotoFeederv1/task_20210228', |
|
'PhotoFeederv1/task_2021-03-01_07', |
|
'PhotoFeederv1/task_2021-03-01_08', |
|
'PhotoFeederv1/task_2021-03-01_09', |
|
'PhotoFeederv1/task_2021-03-01_10', |
|
'PhotoFeederv1/task_2021-03-01_11', |
|
'PhotoFeederv1/task_2021-03-01_12', |
|
'PhotoFeederv1/task_2021-03-01_13', |
|
'PhotoFeederv1/task_2021-03-01_14', |
|
'PhotoFeederv1/task_2021-03-01_15', |
|
'PhotoFeederv1/task_2021-03-01_16', |
|
'PhotoFeederv1/task_2021-03-01_17', |
|
'PhotoFeederv1/task_2021-03-01_18', |
|
'PhotoFeederv1/task_20210409', |
|
'PhotoFeederv1/task_20210411', |
|
'PhotoFeederv1/task_20210412', |
|
'PhotoFeederv1/task_20210413_UPS', |
|
'PhotoFeederv1/task_20210414', |
|
'PhotoFeederv1/task_20210415_UPS', |
|
'PhotoFeederv1/task_20210416_UPS', |
|
'PhotoFeederv1/task_20210417_UPS', |
|
'PhotoFeederv1/task_20210418_UPS', |
|
'PhotoFeederv1/task_20210419_UPS', |
|
'PhotoFeederv1/task_20210420', |
|
'PhotoFeederv1/task_20210523_UPS', |
|
'PhotoFeederv1/task_20210525_UPS', |
|
'PhotoFeederv1/task_20210526_UPS', |
|
'PhotoFeederv1/task_20210611_Lab', |
|
'PhotoFeederv1/task_20210612_1_Lab', |
|
'PhotoFeederv1/task_20210615_Lab', |
|
'PhotoFeederv1/task_20210616_Lab', |
|
'PhotoFeederv1/task_20210623_balacet', |
|
'PhotoFeederv1/task_20210624_balacet', |
|
'PhotoFeederv1/task_20210625_balacet', |
|
'PhotoFeederv1/task_20210705-07_balacet', |
|
'PhotoFeederv1/task_20211008_Moulis', |
|
'PhotoFeederv1/task_2021_11_03-04_cescau4', |
|
'PhotoFeederv1/task_20211109_cescau1', |
|
'PhotoFeederv1/task_20211204_Orlu', |
|
'PhotoFeederv1/task_21-01-2021', |
|
'PhotoFeederv1/task_berggris_dordogne', |
|
'PhotoFeederv1/task_berggris', |
|
'PhotoFeederv1/task_MOIDOM_ODJ', |
|
'PhotoFeederv2/Balacet_session1', |
|
'PhotoFeederv2/Balacet_session4', |
|
'PhotoFeederv2/C1_session1', |
|
'PhotoFeederv2/C1_session3', |
|
'PhotoFeederv2/C1_session4', |
|
'PhotoFeederv2/C4_session1', |
|
'PhotoFeederv2/C4_session4', |
|
'PhotoFeederv2/Francon_session1', |
|
'PhotoFeederv2/Francon_session4', |
|
'PhotoFeederv2/Montpellier_session1', |
|
'PhotoFeederv2/Montpellier_session4', |
|
'PhotoFeederv2/Moulis_session4', |
|
'PhotoFeederv2/Orlu_session4', |
|
] |
|
|
|
|
|
class OrnithoscopeConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Ornithoscope.""" |
|
|
|
def __init__( |
|
self, |
|
train_json: str, |
|
validation_json: str, |
|
test_json: str, |
|
**kwargs |
|
): |
|
"""BuilderConfig for Ornithoscope. |
|
|
|
Args: |
|
train_json: path to the json file containing the train annotations. |
|
validation_json: path to the json file containing the validation annotations. |
|
test_json: path to the json file containing the test annotations. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.train_json = train_json |
|
self.validation_json = validation_json |
|
self.test_json = test_json |
|
|
|
|
|
class Ornithoscope(datasets.GeneratorBasedBuilder): |
|
|
|
NAMES = [ |
|
'DS1', |
|
'DS2', |
|
'DS3', |
|
'DS4', |
|
'DS5', |
|
'DS6', |
|
'DS7', |
|
'DS8', |
|
'DS9.0', |
|
'DS9.1', |
|
'DS9.2', |
|
'DS9.3', |
|
'DS9.4', |
|
'DS9.5', |
|
] |
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [ |
|
OrnithoscopeConfig( |
|
name=name, |
|
description=f'{name} ornithoscope dataset.', |
|
train_json=f'sets/{name}_train.json', |
|
validation_json=f'sets/{name}_val.json', |
|
test_json=f'sets/{name}_test.json', |
|
) |
|
for name in NAMES |
|
] |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]: |
|
"""Returns SplitGenerators.""" |
|
archives = self._get_archives(dl_manager) |
|
|
|
|
|
train_json = json.load( |
|
open(dl_manager.download_and_extract(self.config.train_json), 'r')) |
|
train_vals = [] |
|
for id_path, value in train_json.items(): |
|
root, file = os.path.split(id_path) |
|
path = os.path.join(archives[root], file) |
|
val = { |
|
"id_path": id_path, |
|
"path": path, |
|
"boxes": value['boxes'], |
|
"size": value['size'], |
|
} |
|
train_vals.append(val) |
|
|
|
|
|
validation_json = json.load( |
|
open(dl_manager.download_and_extract(self.config.validation_json), 'r')) |
|
validation_vals = [] |
|
for id_path, value in validation_json.items(): |
|
root, file = os.path.split(id_path) |
|
path = os.path.join(archives[root], file) |
|
val = { |
|
"id_path": id_path, |
|
"path": path, |
|
"boxes": value['boxes'], |
|
"size": value['size'], |
|
} |
|
validation_vals.append(val) |
|
|
|
|
|
test_json = json.load( |
|
open(dl_manager.download_and_extract(self.config.test_json), 'r')) |
|
test_vals = [] |
|
for id_path, value in test_json.items(): |
|
root, file = os.path.split(id_path) |
|
path = os.path.join(archives[root], file) |
|
val = { |
|
"id_path": id_path, |
|
"path": path, |
|
"boxes": value['boxes'], |
|
"size": value['size'], |
|
} |
|
test_vals.append(val) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"values": train_vals, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"values": validation_vals, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"values": test_vals, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, values: list) -> tuple: |
|
"""Yields examples.""" |
|
idx = 0 |
|
for val in values: |
|
example = { |
|
"id_path": val["id_path"], |
|
"path": val["path"], |
|
"boxes": val["boxes"], |
|
"size": val["size"], |
|
} |
|
yield idx, example |
|
idx += 1 |
|
|
|
def _get_archives(self, dl_manager: datasets.DownloadManager) -> dict: |
|
"""Get the archives containing the images.""" |
|
archives = {} |
|
for folder in _FOLDERS: |
|
archives[folder] = dl_manager.download_and_extract( |
|
f'data/{folder}.tar' |
|
) |
|
return archives |
|
|