|
|
|
"""TODO: Add a description here.""" |
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
import xml.etree.ElementTree as ET |
|
import os |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={Shixuan An |
|
}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class RDD2020_Dataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"image_id": datasets.Value("string"), |
|
"country": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"image_resolution": datasets.Features({ |
|
"width": datasets.Value("int32"), |
|
"height": datasets.Value("int32"), |
|
"depth": datasets.Value("int32"), |
|
}), |
|
"image_path": datasets.Value("string"), |
|
|
|
"crack_type": datasets.Sequence(datasets.Value("string")), |
|
"crack_coordinates": datasets.Sequence(datasets.Features({ |
|
"x_min": datasets.Value("int32"), |
|
"x_max": datasets.Value("int32"), |
|
"y_min": datasets.Value("int32"), |
|
"y_max": datasets.Value("int32"), |
|
})), |
|
}), |
|
homepage='https://data.mendeley.com/datasets/5ty2wb6gvg/1', |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
urls_to_download = { |
|
"train": 'https://huggingface.co./datasets/ShixuanAn/RDD_2020/resolve/main/train.zip', |
|
"test1": "https://huggingface.co./datasets/ShixuanAn/RDD_2020/resolve/main/test1.zip", |
|
"test2": "https://huggingface.co./datasets/ShixuanAn/RDD_2020/resolve/main/test2.zip" |
|
} |
|
|
|
downloaded_files = { |
|
name: dl_manager.download_and_extract(url) |
|
for name, url in urls_to_download.items() |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(downloaded_files["train"], "train"), |
|
"split": "train", |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(downloaded_files["test1"], "test1"), |
|
"split": "test1", |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath":os.path.join(downloaded_files["test2"], "test2"), |
|
"split": "test2", |
|
} |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
for country_dir in ['Czech', 'India', 'Japan']: |
|
images_dir = f"{filepath}/{country_dir}/images" |
|
|
|
|
|
print(os.listdir(filepath)) |
|
print(os.listdir(f"{filepath}/{country_dir}")) |
|
|
|
annotations_dir = f"{filepath}/{country_dir}/annotations/xmls" if split == "train" else None |
|
|
|
|
|
for image_file in os.listdir(images_dir): |
|
if not image_file.endswith('.jpg'): |
|
continue |
|
|
|
image_id = f"{image_file.split('.')[0]}" |
|
|
|
image_path = os.path.join(images_dir, image_file) |
|
if annotations_dir: |
|
annotation_file = image_id + '.xml' |
|
annotation_path = os.path.join(annotations_dir, annotation_file) |
|
if not os.path.exists(annotation_path): |
|
continue |
|
tree = ET.parse(annotation_path) |
|
root = tree.getroot() |
|
crack_type = [] |
|
crack_coordinates = [] |
|
for obj in root.findall('object'): |
|
crack_type.append(obj.find('name').text) |
|
bndbox = obj.find('bndbox') |
|
coordinates = { |
|
"x_min": int(bndbox.find('xmin').text), |
|
"x_max": int(bndbox.find('xmax').text), |
|
"y_min": int(bndbox.find('ymin').text), |
|
"y_max": int(bndbox.find('ymax').text), |
|
} |
|
crack_coordinates.append(coordinates) |
|
else: |
|
crack_type = [] |
|
crack_coordinates = [] |
|
|
|
yield image_id, { |
|
"image_id": image_id, |
|
"country": country_dir, |
|
"type": split, |
|
"image_path": image_path, |
|
"crack_type": crack_type, |
|
"crack_coordinates": crack_coordinates, |
|
} |