ShixuanAn commited on
Commit
c7a7861
1 Parent(s): 9e445f0

Update RDD_2020.py

Browse files
Files changed (1) hide show
  1. RDD_2020.py +37 -45
RDD_2020.py CHANGED
@@ -36,9 +36,6 @@ _LICENSE = ""
36
  # TODO: Add link to the official dataset URLs here
37
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
39
- _URLS = {
40
- "dataset": "https://prod-dcd-datasets-cache-zipfiles.s3.eu-west-1.amazonaws.com/5ty2wb6gvg-1.zip"
41
- }
42
 
43
 
44
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
@@ -73,70 +70,69 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
73
  homepage='https://data.mendeley.com/datasets/5ty2wb6gvg/1',
74
  citation=_CITATION,
75
  )
76
-
77
  def _split_generators(self, dl_manager):
78
- # The direct links to the zipped files on Hugging Face
79
  urls_to_download = {
80
- "test2": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/raw/main/test2.zip",
81
- "test1": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/raw/main/test1.zip",
82
- "train": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/raw/main/train.zip"
83
  }
84
 
85
- # Download and extract the dataset using the dl_manager
86
  downloaded_files = {
87
- key: dl_manager.download_and_extract(url) for key, url in urls_to_download.items()
 
88
  }
89
 
90
  return [
91
  datasets.SplitGenerator(
92
  name=datasets.Split.TRAIN,
93
  gen_kwargs={
94
- "images_dir": os.path.join(downloaded_files["train"], "images"),
95
- "annotations_dir": os.path.join(downloaded_files["train"], "annotations", "xmls"),
96
  "split": "train",
97
- },
98
  ),
99
  datasets.SplitGenerator(
100
  name=datasets.Split.TEST,
101
  gen_kwargs={
102
- "images_dir": os.path.join(downloaded_files["test1"], "images"),
103
- "annotations_dir": None, # No annotations for test1
104
  "split": "test1",
105
- },
106
  ),
107
  datasets.SplitGenerator(
108
  name=datasets.Split.VALIDATION,
109
  gen_kwargs={
110
- "images_dir": os.path.join(downloaded_files["test2"], "images"),
111
- "annotations_dir": None, # No annotations for test2
112
  "split": "test2",
113
- },
114
  ),
115
  ]
116
 
117
- def _generate_examples(self, images_dir, annotations_dir, split):
118
- # Loop over each country directory in the images_dir
119
- for country_dir in os.listdir(images_dir):
120
- country_images_dir = os.path.join(images_dir, country_dir)
121
- country_annotations_dir = os.path.join(annotations_dir, country_dir, "xmls") if annotations_dir else None
122
 
123
- # Now loop over each image in the country's image directory
124
- for image_file in os.listdir(country_images_dir):
125
- if not image_file.endswith('.jpg'):
126
- continue
127
- image_id = image_file.split('.')[0]
128
- annotation_file = image_id + '.xml'
129
- annotation_path = os.path.join(country_annotations_dir, annotation_file) if country_annotations_dir else None
130
 
131
- if annotation_path and not os.path.exists(annotation_path):
 
 
132
  continue
133
 
134
- # Parse the XML file for annotations if it exists
135
- crack_type = []
136
- crack_coordinates = []
137
- if annotation_path:
 
 
 
 
138
  tree = ET.parse(annotation_path)
139
  root = tree.getroot()
 
 
140
  for obj in root.findall('object'):
141
  crack_type.append(obj.find('name').text)
142
  bndbox = obj.find('bndbox')
@@ -147,19 +143,15 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
147
  "y_max": int(bndbox.find('ymax').text),
148
  }
149
  crack_coordinates.append(coordinates)
 
 
 
150
 
151
- # Assuming images are of uniform size, you might want to adjust this or extract from image directly
152
- image_resolution = {"width": 600, "height": 600, "depth": 3} if country_dir != "India" else {"width": 720, "height": 720, "depth": 3}
153
-
154
- # Yield the example as a key, value pair
155
  yield image_id, {
156
  "image_id": image_id,
157
  "country": country_dir,
158
  "type": split,
159
- "image_resolution": image_resolution,
160
- "image_path": os.path.join(country_images_dir, image_file),
161
  "crack_type": crack_type,
162
  "crack_coordinates": crack_coordinates,
163
- }
164
-
165
-
 
36
  # TODO: Add link to the official dataset URLs here
37
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
 
 
39
 
40
 
41
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
70
  homepage='https://data.mendeley.com/datasets/5ty2wb6gvg/1',
71
  citation=_CITATION,
72
  )
73
+
74
  def _split_generators(self, dl_manager):
75
+
76
  urls_to_download = {
77
+ "train": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/blob/main/train.zip",
78
+ "test1": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/blob/main/test1.zip",
79
+ "test2": "https://huggingface.co/datasets/ShixuanAn/RDD_2020/blob/main/test2.zip",
80
  }
81
 
 
82
  downloaded_files = {
83
+ name: dl_manager.download_and_extract(url)
84
+ for name, url in urls_to_download.items()
85
  }
86
 
87
  return [
88
  datasets.SplitGenerator(
89
  name=datasets.Split.TRAIN,
90
  gen_kwargs={
91
+ "filepath": downloaded_files["train"],
 
92
  "split": "train",
93
+ }
94
  ),
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TEST,
97
  gen_kwargs={
98
+ "filepath": downloaded_files["test1"],
 
99
  "split": "test1",
100
+ }
101
  ),
102
  datasets.SplitGenerator(
103
  name=datasets.Split.VALIDATION,
104
  gen_kwargs={
105
+ "filepath": downloaded_files["test2"],
 
106
  "split": "test2",
107
+ }
108
  ),
109
  ]
110
 
 
 
 
 
 
111
 
112
+ def _generate_examples(self, filepath, split):
113
+
114
+ # Iterate over each country directory
115
+ for country_dir in ['Czech', 'India', 'Japan']:
116
+ images_dir = f"{filepath}/{country_dir}/images"
117
+ annotations_dir = f"{filepath}/{country_dir}/annotations/xmls" if split == "train" else None
 
118
 
119
+ # Iterate over each image in the country's image directory
120
+ for image_file in os.listdir(images_dir):
121
+ if not image_file.endswith('.jpg'):
122
  continue
123
 
124
+ image_id = f"{image_file.split('.')[0]}"
125
+
126
+ image_path = os.path.join(images_dir, image_file)
127
+ if annotations_dir:
128
+ annotation_file = image_id + '.xml'
129
+ annotation_path = os.path.join(annotations_dir, annotation_file)
130
+ if not os.path.exists(annotation_path):
131
+ continue
132
  tree = ET.parse(annotation_path)
133
  root = tree.getroot()
134
+ crack_type = []
135
+ crack_coordinates = []
136
  for obj in root.findall('object'):
137
  crack_type.append(obj.find('name').text)
138
  bndbox = obj.find('bndbox')
 
143
  "y_max": int(bndbox.find('ymax').text),
144
  }
145
  crack_coordinates.append(coordinates)
146
+ else:
147
+ crack_type = []
148
+ crack_coordinates = []
149
 
 
 
 
 
150
  yield image_id, {
151
  "image_id": image_id,
152
  "country": country_dir,
153
  "type": split,
154
+ "image_path": image_path,
 
155
  "crack_type": crack_type,
156
  "crack_coordinates": crack_coordinates,
157
+ }