chanelcolgate commited on
Commit
d12d104
·
1 Parent(s): 2130214

new file: braintumor.py

Browse files
Files changed (3) hide show
  1. README.md +32 -3
  2. braintumor.py +276 -0
  3. requirements.txt +1 -0
README.md CHANGED
@@ -1,7 +1,36 @@
1
  ---
2
- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1
3
- # Doc / guide: https://huggingface.co/docs/hub/datasets-cards
4
- {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ---
6
 
7
  # Dataset Card for Dataset Name
 
1
  ---
2
+ dataset_info:
3
+ features:
4
+ - name: image
5
+ dtype: image
6
+ - name: image_id
7
+ dtype: int64
8
+ - name: objects
9
+ sequence:
10
+ - name: id
11
+ dtype: int64
12
+ - name: area
13
+ dtype: float64
14
+ - name: bbox
15
+ sequence: float32
16
+ length: 4
17
+ - name: label
18
+ dtype:
19
+ class_label:
20
+ names:
21
+ '0': negative
22
+ '1': positive
23
+ - name: iscrowd
24
+ dtype: bool
25
+ splits:
26
+ - name: train
27
+ num_bytes: 222568
28
+ num_examples: 893
29
+ - name: test
30
+ num_bytes: 55697
31
+ num_examples: 223
32
+ download_size: 12896319
33
+ dataset_size: 278265
34
  ---
35
 
36
  # Dataset Card for Dataset Name
braintumor.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import json
4
+ from pathlib import Path
5
+ from tqdm import tqdm
6
+ from glob import glob
7
+ from typing import Dict, Any, List, Union, Iterator
8
+
9
+ import yaml
10
+ from yaml.loader import SafeLoader
11
+ import datasets
12
+ from datasets.download.download_manager import DownloadManager, ArchiveIterable
13
+ from pylabel import importer
14
+
15
+
16
+ _DESCRIPTION = """\
17
+ Training image sets and labels/bounding box coordinates for detecting brain
18
+ tumors in MR images.
19
+ - The datasets JPGs exported at their native size and are separated by plan
20
+ (Axial, Coronal and Sagittal).
21
+ - Tumors were hand labeled using https://makesense.ai
22
+ - Bounding box coordinates and MGMT positive labels were marked on ~400 images
23
+ for each plane in the T1wCE series from the RSNA-MICCAI competition data.
24
+ """
25
+
26
+ _URLS = {
27
+ "yolo": "https://huggingface.co/datasets/chanelcolgate/tumorsbrain/resolve/main/data/archive.zip"
28
+ }
29
+
30
+ _CLASSES = ["negative", "positive"]
31
+
32
+
33
+ # move all into one folder
34
+ def copy_yolo_files(from_folder, to_folder, images_labels, train_test):
35
+ from_path = os.path.join(from_folder, images_labels, train_test)
36
+ to_path = os.path.join(to_folder, images_labels, train_test)
37
+ os.makedirs(to_path, exist_ok=True)
38
+ # get files
39
+ file_ext = "*.jpg" if images_labels == "images" else "*.txt"
40
+ files = glob(os.path.join(from_path, file_ext))
41
+ # move files
42
+ for file in tqdm(files):
43
+ shutil.copy(file, to_path)
44
+
45
+
46
+ def yolo_to_coco(input_folder, output_folder, train_test):
47
+ labels_path = os.path.join(input_folder, "labels", train_test)
48
+ images_path = os.path.join(input_folder, "images", train_test)
49
+ coco_dir = os.path.join(output_folder, train_test)
50
+ os.makedirs(coco_dir, exist_ok=True)
51
+
52
+ txt_files = glob(os.path.join(labels_path, "*.txt"))
53
+ img_files = glob(os.path.join(images_path, "*.jpg"))
54
+ # copy annotations
55
+ for f in tqdm(txt_files):
56
+ shutil.copy(f, coco_dir)
57
+ # copy images
58
+ for f in tqdm(img_files):
59
+ shutil.copy(f, coco_dir)
60
+
61
+ # get the classes
62
+ with open(os.path.join(input_folder, "classes.txt"), "r") as f:
63
+ classes = f.read().split("\n")
64
+
65
+ # load dataset
66
+ dataset = importer.ImportYoloV5(
67
+ path=coco_dir, cat_names=classes, name="brain tumors"
68
+ )
69
+ # export
70
+ coco_file = os.path.join(coco_dir, "_annotations.coco.json")
71
+ # Detection requires starting index from 1
72
+ dataset.export.ExportToCoco(coco_file, cat_id_index=0)
73
+ # now delete yolo annotations in coco set
74
+ for f in txt_files:
75
+ os.remove(f.replace(labels_path, coco_dir))
76
+
77
+
78
+ def round_box_values(box, decimals=2):
79
+ return [round(val, decimals) for val in box]
80
+
81
+
82
+ class COCOHelper:
83
+ """Helper class to load COCO annotations"""
84
+
85
+ def __init__(self, annotation_path: Path, images_dir: Path) -> None:
86
+ with open(annotation_path, "r") as file:
87
+ data = json.load(file)
88
+ self.data = data
89
+
90
+ dict_id2annot: Dict[int, Any] = {}
91
+ for annot in self.annotations:
92
+ dict_id2annot.setdefault(annot["image_id"], []).append(annot)
93
+
94
+ # Sort by id
95
+ dict_id2annot = {
96
+ k: list(sorted(v, key=lambda a: a["id"]))
97
+ for k, v in dict_id2annot.items()
98
+ }
99
+
100
+ self.dict_path2annot: Dict[str, Any] = {}
101
+ self.dict_path2id: Dict[str, Any] = {}
102
+ for img in self.images:
103
+ path_img = os.path.join(images_dir, img["file_name"])
104
+ path_img_str = os.path.normpath(path_img)
105
+ idx = int(img["id"])
106
+ annot = dict_id2annot.get(idx, [])
107
+ self.dict_path2annot[path_img_str] = annot
108
+ self.dict_path2id[path_img_str] = img["id"]
109
+
110
+ def __len__(self) -> int:
111
+ return len(self.data["images"])
112
+
113
+ @property
114
+ def images(self) -> List[Dict[str, Union[str, int]]]:
115
+ return self.data["images"]
116
+
117
+ @property
118
+ def annotations(self) -> List[Any]:
119
+ return self.data["annotations"]
120
+
121
+ @property
122
+ def categories(self) -> List[Dict[str, Union[str, int]]]:
123
+ return self.data["categories"]
124
+
125
+ def get_annotations(self, image_path: str) -> List[Any]:
126
+ return self.dict_path2annot.get(image_path, [])
127
+
128
+ def get_image_id(self, image_path: str) -> int:
129
+ return self.dict_path2id.get(image_path, -1)
130
+
131
+
132
+ class COCOBrainTumor(datasets.GeneratorBasedBuilder):
133
+ """COCO Brain Tumor dataset"""
134
+
135
+ VERSION = datasets.Version("1.0.1")
136
+
137
+ def _info(self) -> datasets.DatasetInfo:
138
+ """
139
+ Return the dataset metadata and features.
140
+
141
+ Returns:
142
+ DatasetInfo: Metadata and features of the dataset.
143
+ """
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=datasets.Features(
147
+ {
148
+ "image": datasets.Image(),
149
+ "image_id": datasets.Value("int64"),
150
+ "objects": datasets.Sequence(
151
+ {
152
+ "id": datasets.Value("int64"),
153
+ "area": datasets.Value("float64"),
154
+ "bbox": datasets.Sequence(
155
+ datasets.Value("float32"), length=4
156
+ ),
157
+ "label": datasets.ClassLabel(names=_CLASSES),
158
+ "iscrowd": datasets.Value("bool"),
159
+ }
160
+ ),
161
+ }
162
+ ),
163
+ )
164
+
165
+ def _split_generators(
166
+ self, dl_manager: DownloadManager
167
+ ) -> List[datasets.SplitGenerator]:
168
+ """
169
+ Provides the split information and downloads the data.
170
+
171
+ Args:
172
+ dl_manager (DownloadManager): The DownloadManager to use for
173
+ downloading and extracting data.
174
+
175
+ Returns:
176
+ List[SplitGenerator]: List of SplitGenrator objects representing
177
+ the data splits.
178
+ """
179
+ archive_yolo = dl_manager.download_and_extract(_URLS["yolo"])
180
+ data_folder = "braintumors"
181
+ data_folder_yolo = data_folder + "_yolo"
182
+ data_folder_coco = data_folder + "_coco"
183
+ folders = os.listdir(str(archive_yolo))
184
+
185
+ # copy
186
+ for from_folder in folders:
187
+ from_folder = os.path.join(archive_yolo, from_folder)
188
+ to_folder = os.path.join(archive_yolo, data_folder_yolo)
189
+ for images_labels in ["images", "labels"]:
190
+ for train_test in ["train", "test"]:
191
+ copy_yolo_files(
192
+ from_folder, to_folder, images_labels, train_test
193
+ )
194
+
195
+ # Open the file and load the file
196
+ with open(
197
+ os.path.join(archive_yolo, folders[0], folders[0] + ".yaml")
198
+ ) as f:
199
+ classes = yaml.load(f, Loader=SafeLoader)["names"]
200
+
201
+ # Write classes.txt
202
+ with open(
203
+ os.path.join(archive_yolo, data_folder_yolo, "classes.txt"), "w"
204
+ ) as f:
205
+ f.write("\n".join(classes))
206
+
207
+ data_folder_yolo = os.path.join(archive_yolo, data_folder_yolo)
208
+ data_folder_coco = os.path.join(archive_yolo, data_folder_coco)
209
+ yolo_to_coco(data_folder_yolo, data_folder_coco, "train")
210
+ yolo_to_coco(data_folder_yolo, data_folder_coco, "test")
211
+
212
+ name_ds = str(archive_yolo) + "/braintumors_coco"
213
+ image_root_train = name_ds + "/train"
214
+ image_root_test = name_ds + "/test"
215
+ af = "_annotations.coco.json"
216
+ json_file_train = name_ds + "/train/" + af
217
+ json_file_test = name_ds + "/test/" + af
218
+
219
+ return [
220
+ datasets.SplitGenerator(
221
+ name=datasets.Split("train"),
222
+ gen_kwargs={
223
+ "annotation_path": json_file_train,
224
+ "images_dir": image_root_train,
225
+ "images": dl_manager.iter_files(image_root_train),
226
+ },
227
+ ),
228
+ datasets.SplitGenerator(
229
+ name=datasets.Split("test"),
230
+ gen_kwargs={
231
+ "annotation_path": json_file_test,
232
+ "images_dir": image_root_test,
233
+ "images": dl_manager.iter_files(image_root_test),
234
+ },
235
+ ),
236
+ ]
237
+
238
+ def _generate_examples(
239
+ self, annotation_path: Path, images_dir: Path, images: ArchiveIterable
240
+ ) -> Iterator:
241
+ """
242
+ Generates examples for the dataset.
243
+
244
+ Args:
245
+ annotation_path (Path): The path to the annotation file.
246
+ images_dir (Path): The path to the directory containing the images.
247
+ images: (ArchiveIterable): An iterable containing the images.
248
+
249
+ Yields:
250
+ Dict[str, Union[str, Image]]: A dictionary containing the
251
+ generated examples.
252
+ """
253
+ coco_annotation = COCOHelper(annotation_path, images_dir)
254
+
255
+ for image_path in images:
256
+ image_path = os.path.normpath(image_path)
257
+ if "_annotations.coco.json" not in image_path:
258
+ f = open(image_path, "rb")
259
+ annotations = coco_annotation.get_annotations(image_path)
260
+ ret = {
261
+ "image": {"path": image_path, "bytes": f.read()},
262
+ "image_id": coco_annotation.get_image_id(image_path),
263
+ "objects": [
264
+ {
265
+ "id": annot["id"],
266
+ "area": annot["area"],
267
+ "bbox": round_box_values(
268
+ annot["bbox"], 2
269
+ ), # [x, y, w, h]
270
+ "label": annot["category_id"],
271
+ "iscrowd": bool(annot["iscrowd"]),
272
+ }
273
+ for annot in annotations
274
+ ],
275
+ }
276
+ yield image_path, ret
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pylabel