Datasets:

Modalities:
Image
Text
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
davanstrien HF staff commited on
Commit
8e33d10
1 Parent(s): 627ad4b

add coco config

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. yalt_ai_tabular_dataset.py +155 -27
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "TODO", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image": {"decode": true, "id": null, "_type": "Image"}, "objects": {"feature": {"label": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "bbox": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60704, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 7537, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 47159, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 115400, "size_in_bytes": 376305464}}
 
1
+ {"default": {"description": "TODO", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image": {"decode": true, "id": null, "_type": "Image"}, "objects": {"feature": {"label": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "bbox": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60704, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 7537, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 47159, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 115400, "size_in_bytes": 376305464}, "YOLO": {"description": "TODO", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image": {"decode": true, "id": null, "_type": "Image"}, "objects": {"feature": {"label": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "bbox": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "YOLO", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60704, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 7537, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 47159, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 115400, "size_in_bytes": 376305464}, "COCO": {"description": "TODO", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image_id": {"dtype": "int64", "id": null, "_type": "Value"}, "image": {"decode": true, "id": null, "_type": "Image"}, "width": {"dtype": "int32", "id": null, "_type": "Value"}, "height": {"dtype": "int32", "id": null, "_type": "Value"}, "objects": [{"category_id": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "image_id": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "int64", "id": null, "_type": "Value"}, "area": {"dtype": "int64", "id": null, "_type": "Value"}, "bbox": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}, "segmentation": [[{"dtype": "float32", "id": null, "_type": "Value"}]], "iscrowd": {"dtype": "bool", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "COCO", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 87171, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 11225, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 71491, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 169887, "size_in_bytes": 376359951}}
yalt_ai_tabular_dataset.py CHANGED
@@ -16,6 +16,7 @@
16
 
17
  import os
18
  from glob import glob
 
19
 
20
  import datasets
21
  from PIL import Image
@@ -44,15 +45,49 @@ _URL = "https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1"
44
  _CATEGORIES = ["Header", "Col", "Marginal", "text"]
45
 
46
 
 
 
 
 
 
 
 
 
 
 
47
  class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
48
  """Object Detection for historic manuscripts"""
49
 
50
- VERSION = datasets.Version("1.0.0")
 
 
 
51
 
52
  def _info(self):
53
- return datasets.DatasetInfo(
54
- features=datasets.Features(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  {
 
56
  "image": datasets.Image(),
57
  "objects": datasets.Sequence(
58
  {
@@ -63,7 +98,9 @@ class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
63
  }
64
  ),
65
  }
66
- ),
 
 
67
  supervised_keys=None,
68
  description=_DESCRIPTION,
69
  homepage=_HOMEPAGE,
@@ -93,31 +130,122 @@ class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
93
  ]
94
 
95
  def _generate_examples(self, data_dir):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  image_dir = os.path.join(data_dir, "images")
97
  label_dir = os.path.join(data_dir, "labels")
98
  image_paths = sorted(glob(f"{image_dir}/*.jpg"))
99
  label_paths = sorted(glob(f"{label_dir}/*.txt"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
- for idx, (image_path, label_path) in enumerate(zip(image_paths, label_paths)):
102
- im = Image.open(image_path)
103
- width, height = im.size
104
-
105
- with open(label_path, "r") as f:
106
- lines = f.readlines()
107
-
108
- objects = []
109
- for line in lines:
110
- line = line.strip().split()
111
- bbox_class = int(line[0])
112
- bbox_xcenter = int(float(line[1]) * width)
113
- bbox_ycenter = int(float(line[2]) * height)
114
- bbox_width = int(float(line[3]) * width)
115
- bbox_height = int(float(line[4]) * height)
116
- objects.append(
117
- {
118
- "label": bbox_class,
119
- "bbox": [bbox_xcenter, bbox_ycenter, bbox_width, bbox_height],
120
- }
121
- )
122
-
123
- yield idx, {"image": image_path, "objects": objects}
 
16
 
17
  import os
18
  from glob import glob
19
+ from re import L
20
 
21
  import datasets
22
  from PIL import Image
 
45
  _CATEGORIES = ["Header", "Col", "Marginal", "text"]
46
 
47
 
48
+ class YaltAiTabularDatasetConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for YaltAiTabularDataset."""
50
+
51
+ def __init__(self, name, **kwargs):
52
+ """BuilderConfig for YaltAiTabularDataset."""
53
+ super(YaltAiTabularDatasetConfig, self).__init__(
54
+ version=datasets.Version("1.0.0"), name=name, description=None, **kwargs
55
+ )
56
+
57
+
58
  class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
59
  """Object Detection for historic manuscripts"""
60
 
61
+ BUILDER_CONFIGS = [
62
+ YaltAiTabularDatasetConfig("YOLO"),
63
+ YaltAiTabularDatasetConfig("COCO"),
64
+ ]
65
 
66
  def _info(self):
67
+ if self.config.name == "COCO":
68
+ features = datasets.Features(
69
+ {
70
+ "image_id": datasets.Value("int64"),
71
+ "image": datasets.Image(),
72
+ "width": datasets.Value("int32"),
73
+ "height": datasets.Value("int32"),
74
+ # "url": datasets.Value("string"),
75
+ }
76
+ )
77
+ object_dict = {
78
+ "category_id": datasets.ClassLabel(names=_CATEGORIES),
79
+ "image_id": datasets.Value("string"),
80
+ "id": datasets.Value("int64"),
81
+ "area": datasets.Value("int64"),
82
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
83
+ "segmentation": [[datasets.Value("float32")]],
84
+ "iscrowd": datasets.Value("bool"),
85
+ }
86
+ features["objects"] = [object_dict]
87
+ if self.config.name == "YOLO":
88
+ features = datasets.Features(
89
  {
90
+ # "image_id": datasets.Value("int32"),
91
  "image": datasets.Image(),
92
  "objects": datasets.Sequence(
93
  {
 
98
  }
99
  ),
100
  }
101
+ )
102
+ return datasets.DatasetInfo(
103
+ features=features,
104
  supervised_keys=None,
105
  description=_DESCRIPTION,
106
  homepage=_HOMEPAGE,
 
130
  ]
131
 
132
  def _generate_examples(self, data_dir):
133
+ def create_annotation_from_yolo_format(
134
+ min_x,
135
+ min_y,
136
+ width,
137
+ height,
138
+ image_id,
139
+ category_id,
140
+ annotation_id,
141
+ segmentation=False,
142
+ ):
143
+ bbox = (float(min_x), float(min_y), float(width), float(height))
144
+ area = width * height
145
+ max_x = min_x + width
146
+ max_y = min_y + height
147
+ if segmentation:
148
+ seg = [[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]]
149
+ else:
150
+ seg = []
151
+ return {
152
+ "id": annotation_id,
153
+ "image_id": image_id,
154
+ "bbox": bbox,
155
+ "area": area,
156
+ "iscrowd": 0,
157
+ "category_id": category_id,
158
+ "segmentation": seg,
159
+ }
160
+
161
  image_dir = os.path.join(data_dir, "images")
162
  label_dir = os.path.join(data_dir, "labels")
163
  image_paths = sorted(glob(f"{image_dir}/*.jpg"))
164
  label_paths = sorted(glob(f"{label_dir}/*.txt"))
165
+ if self.config.name == "COCO":
166
+ for idx, (image_path, label_path) in enumerate(
167
+ zip(image_paths, label_paths)
168
+ ):
169
+ image_id = idx
170
+ annotations = []
171
+ image = Image.open(image_path) # .convert("RGB")
172
+ w, h = image.size
173
+ with open(label_path, "r") as f:
174
+ lines = f.readlines()
175
+ for line in lines:
176
+ line = line.strip().split()
177
+ # logger.warn(line)
178
+ category_id = line[
179
+ 0
180
+ ] # int(line[0]) + 1 # you start with annotation id with '1'
181
+ x_center = float(line[1])
182
+ y_center = float(line[2])
183
+ width = float(line[3])
184
+ height = float(line[4])
185
+
186
+ float_x_center = w * x_center
187
+ float_y_center = h * y_center
188
+ float_width = w * width
189
+ float_height = h * height
190
+
191
+ min_x = int(float_x_center - float_width / 2)
192
+ min_y = int(float_y_center - float_height / 2)
193
+ width = int(float_width)
194
+ height = int(float_height)
195
+
196
+ annotation = create_annotation_from_yolo_format(
197
+ min_x,
198
+ min_y,
199
+ width,
200
+ height,
201
+ image_id,
202
+ category_id,
203
+ image_id,
204
+ # segmentation=opt.box2seg,
205
+ )
206
+ annotations.append(annotation)
207
+ # annotation_id += 1
208
+
209
+ # image_id += 1 # if you finished annotation work, updates the image id.
210
+ example = {
211
+ "image_id": image_id,
212
+ "image": image,
213
+ "width": w,
214
+ "height": h,
215
+ "objects": annotations,
216
+ }
217
+ yield idx, example
218
+ if self.config.name == "YOLO":
219
+ for idx, (image_path, label_path) in enumerate(
220
+ zip(image_paths, label_paths)
221
+ ):
222
+ im = Image.open(image_path)
223
+ width, height = im.size
224
+ image_id = idx
225
+ annotations = []
226
+ with open(label_path, "r") as f:
227
+ lines = f.readlines()
228
+ objects = []
229
+ for line in lines:
230
+ line = line.strip().split()
231
+ bbox_class = int(line[0])
232
+ bbox_xcenter = int(float(line[1]) * width)
233
+ bbox_ycenter = int(float(line[2]) * height)
234
+ bbox_width = int(float(line[3]) * width)
235
+ bbox_height = int(float(line[4]) * height)
236
+ objects.append(
237
+ {
238
+ "label": bbox_class,
239
+ "bbox": [
240
+ bbox_xcenter,
241
+ bbox_ycenter,
242
+ bbox_width,
243
+ bbox_height,
244
+ ],
245
+ }
246
+ )
247
 
248
+ yield idx, {
249
+ "image": image_path,
250
+ "objects": objects,
251
+ }