import os import shutil import json from pathlib import Path from tqdm import tqdm from glob import glob from typing import Dict, Any, List, Union, Iterator import yaml from yaml.loader import SafeLoader import datasets from datasets.download.download_manager import DownloadManager, ArchiveIterable from pylabel import importer _DESCRIPTION = """\ Training image sets and labels/bounding box coordinates for detecting brain tumors in MR images. - The datasets JPGs exported at their native size and are separated by plan (Axial, Coronal and Sagittal). - Tumors were hand labeled using https://makesense.ai - Bounding box coordinates and MGMT positive labels were marked on ~400 images for each plane in the T1wCE series from the RSNA-MICCAI competition data. """ _URLS = { "yolo": "https://huggingface.co./datasets/chanelcolgate/tumorsbrain/resolve/main/data/archive.zip" } _CLASSES = ["negative", "positive"] # move all into one folder def copy_yolo_files(from_folder, to_folder, images_labels, train_test): from_path = os.path.join(from_folder, images_labels, train_test) to_path = os.path.join(to_folder, images_labels, train_test) os.makedirs(to_path, exist_ok=True) # get files file_ext = "*.jpg" if images_labels == "images" else "*.txt" files = glob(os.path.join(from_path, file_ext)) # move files for file in tqdm(files): shutil.copy(file, to_path) def yolo_to_coco(input_folder, output_folder, train_test): labels_path = os.path.join(input_folder, "labels", train_test) images_path = os.path.join(input_folder, "images", train_test) coco_dir = os.path.join(output_folder, train_test) os.makedirs(coco_dir, exist_ok=True) txt_files = glob(os.path.join(labels_path, "*.txt")) img_files = glob(os.path.join(images_path, "*.jpg")) # copy annotations for f in tqdm(txt_files): shutil.copy(f, coco_dir) # copy images for f in tqdm(img_files): shutil.copy(f, coco_dir) # get the classes with open(os.path.join(input_folder, "classes.txt"), "r") as f: classes = f.read().split("\n") # load dataset dataset = importer.ImportYoloV5( path=coco_dir, cat_names=classes, name="brain tumors" ) # export coco_file = os.path.join(coco_dir, "_annotations.coco.json") # Detection requires starting index from 1 dataset.export.ExportToCoco(coco_file, cat_id_index=0) # now delete yolo annotations in coco set for f in txt_files: os.remove(f.replace(labels_path, coco_dir)) def round_box_values(box, decimals=2): return [round(val, decimals) for val in box] class COCOHelper: """Helper class to load COCO annotations""" def __init__(self, annotation_path: Path, images_dir: Path) -> None: with open(annotation_path, "r") as file: data = json.load(file) self.data = data dict_id2annot: Dict[int, Any] = {} for annot in self.annotations: dict_id2annot.setdefault(annot["image_id"], []).append(annot) # Sort by id dict_id2annot = { k: list(sorted(v, key=lambda a: a["id"])) for k, v in dict_id2annot.items() } self.dict_path2annot: Dict[str, Any] = {} self.dict_path2id: Dict[str, Any] = {} for img in self.images: path_img = os.path.join(images_dir, img["file_name"]) path_img_str = os.path.normpath(path_img) idx = int(img["id"]) annot = dict_id2annot.get(idx, []) self.dict_path2annot[path_img_str] = annot self.dict_path2id[path_img_str] = img["id"] def __len__(self) -> int: return len(self.data["images"]) @property def images(self) -> List[Dict[str, Union[str, int]]]: return self.data["images"] @property def annotations(self) -> List[Any]: return self.data["annotations"] @property def categories(self) -> List[Dict[str, Union[str, int]]]: return self.data["categories"] def get_annotations(self, image_path: str) -> List[Any]: return self.dict_path2annot.get(image_path, []) def get_image_id(self, image_path: str) -> int: return self.dict_path2id.get(image_path, -1) class COCOBrainTumor(datasets.GeneratorBasedBuilder): """COCO Brain Tumor dataset""" VERSION = datasets.Version("1.0.1") def _info(self) -> datasets.DatasetInfo: """ Return the dataset metadata and features. Returns: DatasetInfo: Metadata and features of the dataset. """ return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "image_id": datasets.Value("int64"), "objects": datasets.Sequence( { "id": datasets.Value("int64"), "area": datasets.Value("float64"), "bbox": datasets.Sequence( datasets.Value("float32"), length=4 ), "label": datasets.ClassLabel(names=_CLASSES), "iscrowd": datasets.Value("bool"), } ), } ), ) def _split_generators( self, dl_manager: DownloadManager ) -> List[datasets.SplitGenerator]: """ Provides the split information and downloads the data. Args: dl_manager (DownloadManager): The DownloadManager to use for downloading and extracting data. Returns: List[SplitGenerator]: List of SplitGenrator objects representing the data splits. """ archive_yolo = dl_manager.download(_URLS["yolo"]) archive_yolo = dl_manager.extract(archive_yolo) data_folder = "braintumors" data_folder_yolo = data_folder + "_yolo" data_folder_coco = data_folder + "_coco" folders = os.listdir(str(archive_yolo)) # copy for from_folder in folders: from_folder = os.path.join(archive_yolo, from_folder) to_folder = os.path.join(archive_yolo, data_folder_yolo) for images_labels in ["images", "labels"]: for train_test in ["train", "test"]: copy_yolo_files( from_folder, to_folder, images_labels, train_test ) # Open the file and load the file with open( os.path.join(archive_yolo, folders[0], folders[0] + ".yaml") ) as f: classes = yaml.load(f, Loader=SafeLoader)["names"] # Write classes.txt with open( os.path.join(archive_yolo, data_folder_yolo, "classes.txt"), "w" ) as f: f.write("\n".join(classes)) data_folder_yolo = os.path.join(archive_yolo, data_folder_yolo) data_folder_coco = os.path.join(archive_yolo, data_folder_coco) yolo_to_coco(data_folder_yolo, data_folder_coco, "train") yolo_to_coco(data_folder_yolo, data_folder_coco, "test") name_ds = str(archive_yolo) + "/braintumors_coco" image_root_train = name_ds + "/train" image_root_test = name_ds + "/test" af = "_annotations.coco.json" json_file_train = name_ds + "/train/" + af json_file_test = name_ds + "/test/" + af return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "annotation_path": json_file_train, "images_dir": image_root_train, "images": dl_manager.iter_files(image_root_train), }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "annotation_path": json_file_test, "images_dir": image_root_test, "images": dl_manager.iter_files(image_root_test), }, ), ] def _generate_examples( self, annotation_path: Path, images_dir: Path, images: ArchiveIterable ) -> Iterator: """ Generates examples for the dataset. Args: annotation_path (Path): The path to the annotation file. images_dir (Path): The path to the directory containing the images. images: (ArchiveIterable): An iterable containing the images. Yields: Dict[str, Union[str, Image]]: A dictionary containing the generated examples. """ coco_annotation = COCOHelper(annotation_path, images_dir) for image_path in images: image_path = os.path.normpath(image_path) if "_annotations.coco.json" not in image_path: f = open(image_path, "rb") annotations = coco_annotation.get_annotations(image_path) ret = { "image": {"path": image_path, "bytes": f.read()}, "image_id": coco_annotation.get_image_id(image_path), "objects": [ { "id": annot["id"], "area": annot["area"], "bbox": round_box_values( annot["bbox"], 2 ), # [x, y, w, h] "label": annot["category_id"], "iscrowd": bool(annot["iscrowd"]), } for annot in annotations ], } yield image_path, ret