File size: 4,300 Bytes
d2b51c2 c1ca73d d7fef5a c1ca73d d7fef5a c1ca73d d7fef5a c1ca73d d7fef5a c16befb d7fef5a c1ca73d d7fef5a c1ca73d d7fef5a c1ca73d d7fef5a c1ca73d d7fef5a c1ca73d d7fef5a c1ca73d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import os
import json
import collections
import datasets
from datasets import NamedSplit
from datasets.download.download_manager import DownloadManager
_DESCRIPTION = """\
This dataset contains all THIENVIET products images and annotations split in training
and validation.
"""
_URL = "https://huggingface.co./datasets/chanelcolgate/yenthienviet/resolve/main/data/yenthienviet_coco_hf.zip"
_CATEGORIES = [
"hop_dln",
"hop_jn",
"hop_vtg",
"hop_ytv",
"lo_kids",
"lo_ytv",
"loc_dln",
"loc_jn",
"loc_kids",
"loc_ytv",
]
class Yenthienviet(datasets.GeneratorBasedBuilder):
"""Yenthienviet dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"image_id": datasets.Value("int64"),
"image": datasets.Image(),
"width": datasets.Value("int32"),
"height": datasets.Value("int32"),
"objects": datasets.Sequence(
{
"id": datasets.Value("int64"),
"area": datasets.Value("int64"),
"bbox": datasets.Sequence(
datasets.Value("float32"), length=4
),
"category": datasets.ClassLabel(names=_CATEGORIES),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(self, dl_manager: DownloadManager):
archive = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"annotation_file_path": "annotations/train.json",
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"annotation_file_path": "annotations/test.json",
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=NamedSplit("val"),
gen_kwargs={
"annotation_file_path": "annotations/val.json",
"files": dl_manager.iter_archive(archive),
},
),
]
def _generate_examples(self, annotation_file_path, files):
def process_annot(annot, category_id_to_category):
return {
"id": annot["id"],
"area": annot["area"],
"bbox": annot["bbox"],
"category": category_id_to_category[annot["category_id"]],
}
image_id_to_image = []
idx = 0
# This loop relies on the ordering of the files in the archive:
# Annotation files come first, then the images.
for path, f in files:
file_name = os.path.basename(path)
if path == annotation_file_path:
annotations = json.load(f)
category_id_to_category = {
category["id"]: category["name"]
for category in annotations["categories"]
}
image_id_to_annotations = collections.defaultdict(list)
for annot in annotations["annotations"]:
image_id_to_annotations[annot["image_id"]].append(annot)
image_id_to_image = {
annot["file_name"]: annot for annot in annotations["images"]
}
elif file_name in image_id_to_image:
image = image_id_to_image[file_name]
objects = [
process_annot(annot, category_id_to_category)
for annot in image_id_to_annotations[image["id"]]
]
yield idx, {
"image_id": image["id"],
"image": {"path": path, "bytes": f.read()},
"width": image["width"],
"height": image["height"],
"objects": objects,
}
idx += 1
|