ornithoscope / ornithoscope.py
gdamms's picture
fix: iNatv2 directory
4f005f7
raw
history blame
9.06 kB
import json
import os
import datasets
from datasets.tasks import ImageClassification
logger = datasets.logging.get_logger(__name__)
_CITATION = """"""
_DESCRIPTION = """\
Ornithoscope dataset is the dataset used to train the model for the Ornithoscope project.
"""
_HOMEPAGE = ""
_FOLDERS = [
'iNatv1',
'iNatv2',
'PhotoFeederv1/task_05-01-2021',
'PhotoFeederv1/task_06-01-2021',
'PhotoFeederv1/task_18-01-2021',
'PhotoFeederv1/task_19-01-2021',
'PhotoFeederv1/task_20210205',
'PhotoFeederv1/task_20210217',
'PhotoFeederv1/task_20210227',
'PhotoFeederv1/task_20210228',
'PhotoFeederv1/task_2021-03-01_07',
'PhotoFeederv1/task_2021-03-01_08',
'PhotoFeederv1/task_2021-03-01_09',
'PhotoFeederv1/task_2021-03-01_10',
'PhotoFeederv1/task_2021-03-01_11',
'PhotoFeederv1/task_2021-03-01_12',
'PhotoFeederv1/task_2021-03-01_13',
'PhotoFeederv1/task_2021-03-01_14',
'PhotoFeederv1/task_2021-03-01_15',
'PhotoFeederv1/task_2021-03-01_16',
'PhotoFeederv1/task_2021-03-01_17',
'PhotoFeederv1/task_2021-03-01_18',
'PhotoFeederv1/task_20210409',
'PhotoFeederv1/task_20210411',
'PhotoFeederv1/task_20210412',
'PhotoFeederv1/task_20210413_UPS',
'PhotoFeederv1/task_20210414',
'PhotoFeederv1/task_20210415_UPS',
'PhotoFeederv1/task_20210416_UPS',
'PhotoFeederv1/task_20210417_UPS',
'PhotoFeederv1/task_20210418_UPS',
'PhotoFeederv1/task_20210419_UPS',
'PhotoFeederv1/task_20210420',
'PhotoFeederv1/task_20210523_UPS',
'PhotoFeederv1/task_20210525_UPS',
'PhotoFeederv1/task_20210526_UPS',
'PhotoFeederv1/task_20210611_Lab',
'PhotoFeederv1/task_20210612_1_Lab',
'PhotoFeederv1/task_20210615_Lab',
'PhotoFeederv1/task_20210616_Lab',
'PhotoFeederv1/task_20210623_balacet',
'PhotoFeederv1/task_20210624_balacet',
'PhotoFeederv1/task_20210625_balacet',
'PhotoFeederv1/task_20210705-07_balacet',
'PhotoFeederv1/task_20211008_Moulis',
'PhotoFeederv1/task_2021_11_03-04_cescau4',
'PhotoFeederv1/task_20211109_cescau1',
'PhotoFeederv1/task_20211204_Orlu',
'PhotoFeederv1/task_21-01-2021',
'PhotoFeederv1/task_berggris_dordogne',
'PhotoFeederv1/task_berggris',
'PhotoFeederv1/task_MOIDOM_ODJ',
'PhotoFeederv2/Balacet_session1',
'PhotoFeederv2/Balacet_session4',
'PhotoFeederv2/C1_session1',
'PhotoFeederv2/C1_session3',
'PhotoFeederv2/C1_session4',
'PhotoFeederv2/C4_session1',
'PhotoFeederv2/C4_session4',
'PhotoFeederv2/Francon_session1',
'PhotoFeederv2/Francon_session4',
'PhotoFeederv2/Montpellier_session1',
'PhotoFeederv2/Montpellier_session4',
'PhotoFeederv2/Moulis_session4',
'PhotoFeederv2/Orlu_session4',
]
class OrnithoscopeConfig(datasets.BuilderConfig):
"""BuilderConfig for Ornithoscope."""
def __init__(
self,
train_json: str,
validation_json: str,
test_json: str,
**kwargs
):
"""BuilderConfig for Ornithoscope.
Args:
train_json: path to the json file containing the train annotations.
validation_json: path to the json file containing the validation annotations.
test_json: path to the json file containing the test annotations.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.train_json = train_json
self.validation_json = validation_json
self.test_json = test_json
class Ornithoscope(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
OrnithoscopeConfig(
name="DS3",
description="The main dataset.",
train_json="sets/DS3_train.json",
validation_json="sets/DS3_val.json",
test_json="sets/DS3_test.json",
),
OrnithoscopeConfig(
name="DS4",
description="The new dataset.",
train_json="sets/DS4_train.json",
validation_json="sets/DS4_val.json",
test_json="sets/DS4_test.json",
),
OrnithoscopeConfig(
name="DS5",
description="The new dataset.",
train_json="sets/DS5_train.json",
validation_json="sets/DS5_val.json",
test_json="sets/DS5_test.json",
),
OrnithoscopeConfig(
name="DS6",
description="The new dataset.",
train_json="sets/DS6_train.json",
validation_json="sets/DS6_val.json",
test_json="sets/DS6_test.json",
),
OrnithoscopeConfig(
name="DS7",
description="The new dataset.",
train_json="sets/DS7_train.json",
validation_json="sets/DS7_val.json",
test_json="sets/DS7_test.json",
),
]
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
# features=datasets.Features(
# {
# "id_path": datasets.Value("string"),
# "path": datasets.Value("string"),
# "boxes": datasets.Sequence(
# {
# "label": datasets.Value("string"),
# "xmin": datasets.Value("float32"),
# "xmax": datasets.Value("float32"),
# "ymin": datasets.Value("float32"),
# "ymax": datasets.Value("float32"),
# }
# ),
# "size": {
# "width": datasets.Value("int32"),
# "height": datasets.Value("int32"),
# },
# },
# ),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
"""Returns SplitGenerators."""
archives = self._get_archives(dl_manager)
# Get train paths.
train_json = json.load(
open(dl_manager.download_and_extract(self.config.train_json), 'r'))
train_vals = []
for id_path, value in train_json.items():
root, file = os.path.split(id_path)
path = os.path.join(archives[root], file)
val = {
"id_path": id_path,
"path": path,
"boxes": value['boxes'],
"size": value['size'],
}
train_vals.append(val)
# Get validation paths.
validation_json = json.load(
open(dl_manager.download_and_extract(self.config.validation_json), 'r'))
validation_vals = []
for id_path, value in validation_json.items():
root, file = os.path.split(id_path)
path = os.path.join(archives[root], file)
val = {
"id_path": id_path,
"path": path,
"boxes": value['boxes'],
"size": value['size'],
}
validation_vals.append(val)
# Get test paths.
test_json = json.load(
open(dl_manager.download_and_extract(self.config.test_json), 'r'))
test_vals = []
for id_path, value in test_json.items():
root, file = os.path.split(id_path)
path = os.path.join(archives[root], file)
val = {
"id_path": id_path,
"path": path,
"boxes": value['boxes'],
"size": value['size'],
}
test_vals.append(val)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"values": train_vals,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"values": validation_vals,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"values": test_vals,
},
),
]
def _generate_examples(self, values: list) -> tuple:
"""Yields examples."""
idx = 0
for val in values:
example = {
"id_path": val["id_path"],
"path": val["path"],
"boxes": val["boxes"],
"size": val["size"],
}
yield idx, example
idx += 1
def _get_archives(self, dl_manager: datasets.DownloadManager) -> dict:
"""Get the archives containing the images."""
archives = {}
for folder in _FOLDERS:
archives[folder] = dl_manager.download_and_extract(
f'data/{folder}.tar'
)
return archives