ocr-receipts-text-detection / ocr-receipts-text-detection.py
vkashko's picture
fix: script
96033b0
raw
history blame
5.45 kB
from xml.etree import ElementTree as ET
import datasets
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {ocr-receipts-text-detection},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
The Grocery Store Receipts Dataset is a collection of photos captured from various
**grocery store receipts**. This dataset is specifically designed for tasks related to
**Optical Character Recognition (OCR)** and is useful for retail.
Each image in the dataset is accompanied by bounding box annotations, indicating the
precise locations of specific text segments on the receipts. The text segments are
categorized into four classes: **item, store, date_time and total**.
"""
_NAME = "ocr-receipts-text-detection"
_HOMEPAGE = f"https://huggingface.co./datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co./datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
_LABELS = ["receipt", "shop", "item", "date_time", "total"]
class BotoxInjectionsBeforeAndAfter(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"name": datasets.Value("string"),
"image": datasets.Image(),
"mask": datasets.Image(),
"width": datasets.Value("uint16"),
"height": datasets.Value("uint16"),
"shapes": datasets.Sequence(
{
"label": datasets.ClassLabel(
num_classes=len(_LABELS),
names=_LABELS,
),
"type": datasets.Value("string"),
"points": datasets.Sequence(
datasets.Sequence(
datasets.Value("float"),
),
),
"rotation": datasets.Value("float"),
"occluded": datasets.Value("uint8"),
"attributes": datasets.Sequence(
{
"name": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
masks = dl_manager.download(f"{_DATA}boxes.tar.gz")
annotations = dl_manager.download(f"{_DATA}annotations.xml")
images = dl_manager.iter_archive(images)
masks = dl_manager.iter_archive(masks)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
"masks": masks,
"annotations": annotations,
},
),
]
@staticmethod
def parse_shape(shape: ET.Element) -> dict:
label = shape.get("label")
shape_type = shape.tag
rotation = shape.get("rotation", 0.0)
occluded = shape.get("occluded", 0)
points = None
if shape_type == "points":
points = tuple(map(float, shape.get("points").split(",")))
elif shape_type == "box":
points = [
(float(shape.get("xtl")), float(shape.get("ytl"))),
(float(shape.get("xbr")), float(shape.get("ybr"))),
]
elif shape_type == "polygon":
points = [
tuple(map(float, point.split(",")))
for point in shape.get("points").split(";")
]
attributes = []
for attr in shape:
attr_name = attr.get("name")
attr_text = attr.text
attributes.append({"name": attr_name, "text": attr_text})
shape_data = {
"label": label,
"type": shape_type,
"points": points,
"rotation": rotation,
"occluded": occluded,
"attributes": attributes,
}
return shape_data
def _generate_examples(self, images, masks, annotations):
tree = ET.parse(annotations)
root = tree.getroot()
for idx, (
(image_path, image),
(mask_path, mask),
) in enumerate(zip(images, masks)):
image_name = image_path.split("/")[-1]
img = root.find(f"./image[@name='images/{image_name}']")
image_id = img.get("id")
name = img.get("name")
width = img.get("width")
height = img.get("height")
shapes = [self.parse_shape(shape) for shape in img]
yield idx, {
"id": image_id,
"name": name,
"image": {"path": image_path, "bytes": image.read()},
"mask": {"path": mask_path, "bytes": mask.read()},
"width": width,
"height": height,
"shapes": shapes,
}