chanelcolgate
commited on
Commit
•
c1ca73d
1
Parent(s):
c16befb
modified: yenthienviet.py
Browse files- README.md +21 -20
- yenthienviet.py +95 -181
README.md
CHANGED
@@ -2,20 +2,24 @@
|
|
2 |
pretty_name: YENTHIENVIET
|
3 |
dataset_info:
|
4 |
features:
|
5 |
-
- name: image
|
6 |
-
dtype: image
|
7 |
- name: image_id
|
8 |
dtype: int64
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
- name: objects
|
10 |
sequence:
|
11 |
- name: id
|
12 |
dtype: int64
|
13 |
- name: area
|
14 |
-
dtype:
|
15 |
- name: bbox
|
16 |
sequence: float32
|
17 |
length: 4
|
18 |
-
- name:
|
19 |
dtype:
|
20 |
class_label:
|
21 |
names:
|
@@ -25,25 +29,22 @@ dataset_info:
|
|
25 |
'3': hop_ytv
|
26 |
'4': lo_kids
|
27 |
'5': lo_ytv
|
28 |
-
'6':
|
29 |
-
'7':
|
30 |
-
'8':
|
31 |
-
'9':
|
32 |
-
'10': loc_jn
|
33 |
-
- name: iscrowd
|
34 |
-
dtype: bool
|
35 |
splits:
|
36 |
- name: train
|
37 |
-
num_bytes:
|
38 |
-
num_examples:
|
39 |
-
- name: val
|
40 |
-
num_bytes: 16692841
|
41 |
-
num_examples: 79
|
42 |
- name: test
|
43 |
-
num_bytes:
|
44 |
-
num_examples:
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
47 |
---
|
48 |
|
49 |
# Dataset Card for Dataset Name
|
|
|
2 |
pretty_name: YENTHIENVIET
|
3 |
dataset_info:
|
4 |
features:
|
|
|
|
|
5 |
- name: image_id
|
6 |
dtype: int64
|
7 |
+
- name: image
|
8 |
+
dtype: image
|
9 |
+
- name: width
|
10 |
+
dtype: int32
|
11 |
+
- name: height
|
12 |
+
dtype: int32
|
13 |
- name: objects
|
14 |
sequence:
|
15 |
- name: id
|
16 |
dtype: int64
|
17 |
- name: area
|
18 |
+
dtype: int64
|
19 |
- name: bbox
|
20 |
sequence: float32
|
21 |
length: 4
|
22 |
+
- name: category
|
23 |
dtype:
|
24 |
class_label:
|
25 |
names:
|
|
|
29 |
'3': hop_ytv
|
30 |
'4': lo_kids
|
31 |
'5': lo_ytv
|
32 |
+
'6': loc_dln
|
33 |
+
'7': loc_jn
|
34 |
+
'8': loc_kids
|
35 |
+
'9': loc_ytv
|
|
|
|
|
|
|
36 |
splits:
|
37 |
- name: train
|
38 |
+
num_bytes: 649109264
|
39 |
+
num_examples: 1755
|
|
|
|
|
|
|
40 |
- name: test
|
41 |
+
num_bytes: 51031547
|
42 |
+
num_examples: 152
|
43 |
+
- name: val
|
44 |
+
num_bytes: 53894489
|
45 |
+
num_examples: 159
|
46 |
+
download_size: 727024589
|
47 |
+
dataset_size: 754035300
|
48 |
---
|
49 |
|
50 |
# Dataset Card for Dataset Name
|
yenthienviet.py
CHANGED
@@ -1,42 +1,20 @@
|
|
1 |
-
import json
|
2 |
import os
|
3 |
-
|
4 |
-
|
5 |
|
6 |
import datasets
|
7 |
-
from datasets
|
|
|
8 |
|
9 |
-
# Typing
|
10 |
-
_TYPING_BOX = Tuple[float, float, float, float]
|
11 |
|
12 |
_DESCRIPTION = """\
|
13 |
This dataset contains all THIENVIET products images and annotations split in training
|
14 |
and validation.
|
15 |
"""
|
16 |
|
17 |
-
|
18 |
-
"train": "https://huggingface.co/datasets/chanelcolgate/yenthienviet/resolve/main/data/coco2/train.zip",
|
19 |
-
"val": "https://huggingface.co/datasets/chanelcolgate/yenthienviet/resolve/main/data/coco2/val.zip",
|
20 |
-
"test": "https://huggingface.co/datasets/chanelcolgate/yenthienviet/resolve/main/data/coco2/test.zip",
|
21 |
-
"annotations": "https://huggingface.co/datasets/chanelcolgate/yenthienviet/resolve/main/data/coco2/annotations.zip",
|
22 |
-
}
|
23 |
-
|
24 |
-
_SPLITS = ["train", "val", "test"]
|
25 |
|
26 |
-
|
27 |
-
"annotations": {
|
28 |
-
"train": Path("_annotations.coco.train.json"),
|
29 |
-
"val": Path("_annotations.coco.val.json"),
|
30 |
-
"test": Path("_annotations.coco.test.json"),
|
31 |
-
},
|
32 |
-
"images": {
|
33 |
-
"train": Path("train"),
|
34 |
-
"val": Path("val"),
|
35 |
-
"test": Path("test"),
|
36 |
-
},
|
37 |
-
}
|
38 |
-
|
39 |
-
_CLASSES = [
|
40 |
"hop_dln",
|
41 |
"hop_jn",
|
42 |
"hop_vtg",
|
@@ -50,163 +28,99 @@ _CLASSES = [
|
|
50 |
]
|
51 |
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
class COCOHelper:
|
58 |
-
"""Helper class to load COCO annotations"""
|
59 |
-
|
60 |
-
def __init__(self, annotation_path: Path, images_dir: Path) -> None:
|
61 |
-
with open(annotation_path, "r") as file:
|
62 |
-
data = json.load(file)
|
63 |
-
self.data = data
|
64 |
-
|
65 |
-
dict_id2annot: Dict[int, Any] = {}
|
66 |
-
for annot in self.annotations:
|
67 |
-
dict_id2annot.setdefault(annot["image_id"], []).append(annot)
|
68 |
-
|
69 |
-
# Sort by id
|
70 |
-
dict_id2annot = {
|
71 |
-
k: list(sorted(v, key=lambda a: a["id"]))
|
72 |
-
for k, v in dict_id2annot.items()
|
73 |
-
}
|
74 |
-
|
75 |
-
self.dict_path2annot: Dict[str, Any] = {}
|
76 |
-
self.dict_path2id: Dict[str, Any] = {}
|
77 |
-
for img in self.images:
|
78 |
-
path_img = images_dir / str(img["file_name"])
|
79 |
-
path_img_str = str(path_img)
|
80 |
-
idx = int(img["id"])
|
81 |
-
annot = dict_id2annot.get(idx, [])
|
82 |
-
self.dict_path2annot[path_img_str] = annot
|
83 |
-
self.dict_path2id[path_img_str] = img["id"]
|
84 |
-
|
85 |
-
def __len__(self) -> int:
|
86 |
-
return len(self.data["images"])
|
87 |
|
88 |
-
|
89 |
-
def images(self) -> List[Dict[str, Union[str, int]]]:
|
90 |
-
return self.data["images"]
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
def _info(self) -> datasets.DatasetInfo:
|
113 |
-
"""
|
114 |
-
Return the dataset metadata and features.
|
115 |
-
|
116 |
-
Returns:
|
117 |
-
DatasetInfo: Metadata and features of the dataset.
|
118 |
-
"""
|
119 |
return datasets.DatasetInfo(
|
120 |
description=_DESCRIPTION,
|
121 |
-
features=
|
122 |
-
{
|
123 |
-
"image": datasets.Image(),
|
124 |
-
"image_id": datasets.Value("int64"),
|
125 |
-
"objects": datasets.Sequence(
|
126 |
-
{
|
127 |
-
"id": datasets.Value("int64"),
|
128 |
-
"area": datasets.Value("float64"),
|
129 |
-
"bbox": datasets.Sequence(
|
130 |
-
datasets.Value("float32"), length=4
|
131 |
-
),
|
132 |
-
"label": datasets.ClassLabel(names=_CLASSES),
|
133 |
-
"iscrowd": datasets.Value("bool"),
|
134 |
-
}
|
135 |
-
),
|
136 |
-
}
|
137 |
-
),
|
138 |
)
|
139 |
|
140 |
-
def _split_generators(
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
def _generate_examples(
|
175 |
-
self, annotation_path: Path, images_dir: Path, images: ArchiveIterable
|
176 |
-
) -> Iterator:
|
177 |
-
"""
|
178 |
-
Generates examples for the dataset.
|
179 |
-
|
180 |
-
Args:
|
181 |
-
annotation_path (Path): The path to the annotation file.
|
182 |
-
images_dir (Path): The path to the directory containing the images.
|
183 |
-
images: (ArchiveIterable): An iterable containing the images.
|
184 |
-
|
185 |
-
Yields:
|
186 |
-
Dict[str, Union[str, Image]]: A dictionary containing the generated examples.
|
187 |
-
"""
|
188 |
-
coco_annotation = COCOHelper(annotation_path, images_dir)
|
189 |
-
|
190 |
-
for image_path, f in images:
|
191 |
-
annotations = coco_annotation.get_annotations(
|
192 |
-
os.path.normpath(image_path)
|
193 |
-
)
|
194 |
-
ret = {
|
195 |
-
"image": {"path": image_path, "bytes": f.read()},
|
196 |
-
"image_id": coco_annotation.get_image_id(
|
197 |
-
os.path.normpath(image_path)
|
198 |
-
),
|
199 |
-
"objects": [
|
200 |
-
{
|
201 |
-
"id": annot["id"],
|
202 |
-
"area": annot["area"],
|
203 |
-
"bbox": round_box_values(
|
204 |
-
annot["bbox"], 2
|
205 |
-
), # [x, y, w, h]
|
206 |
-
"label": annot["category_id"],
|
207 |
-
"iscrowd": bool(annot["iscrowd"]),
|
208 |
-
}
|
209 |
-
for annot in annotations
|
210 |
-
],
|
211 |
}
|
212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
import json
|
3 |
+
import collections
|
4 |
|
5 |
import datasets
|
6 |
+
from datasets import NamedSplit
|
7 |
+
from datasets.download.download_manager import DownloadManager
|
8 |
|
|
|
|
|
9 |
|
10 |
_DESCRIPTION = """\
|
11 |
This dataset contains all THIENVIET products images and annotations split in training
|
12 |
and validation.
|
13 |
"""
|
14 |
|
15 |
+
_URL = "https://huggingface.co/datasets/chanelcolgate/yenthienviet/resolve/main/data/yenthienviet_coco_hf.zip"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
_CATEGORIES = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
"hop_dln",
|
19 |
"hop_jn",
|
20 |
"hop_vtg",
|
|
|
28 |
]
|
29 |
|
30 |
|
31 |
+
class Yenthienviet(datasets.GeneratorBasedBuilder):
|
32 |
+
"""Yenthienviet dataset."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
VERSION = datasets.Version("1.0.0")
|
|
|
|
|
35 |
|
36 |
+
def _info(self):
|
37 |
+
features = datasets.Features(
|
38 |
+
{
|
39 |
+
"image_id": datasets.Value("int64"),
|
40 |
+
"image": datasets.Image(),
|
41 |
+
"width": datasets.Value("int32"),
|
42 |
+
"height": datasets.Value("int32"),
|
43 |
+
"objects": datasets.Sequence(
|
44 |
+
{
|
45 |
+
"id": datasets.Value("int64"),
|
46 |
+
"area": datasets.Value("int64"),
|
47 |
+
"bbox": datasets.Sequence(
|
48 |
+
datasets.Value("float32"), length=4
|
49 |
+
),
|
50 |
+
"category": datasets.ClassLabel(names=_CATEGORIES),
|
51 |
+
}
|
52 |
+
),
|
53 |
+
}
|
54 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
return datasets.DatasetInfo(
|
56 |
description=_DESCRIPTION,
|
57 |
+
features=features,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
)
|
59 |
|
60 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
61 |
+
archive = dl_manager.download(_URL)
|
62 |
+
return [
|
63 |
+
datasets.SplitGenerator(
|
64 |
+
name=datasets.Split.TRAIN,
|
65 |
+
gen_kwargs={
|
66 |
+
"annotation_file_path": "annotations/train.json",
|
67 |
+
"files": dl_manager.iter_archive(archive),
|
68 |
+
},
|
69 |
+
),
|
70 |
+
datasets.SplitGenerator(
|
71 |
+
name=datasets.Split.TEST,
|
72 |
+
gen_kwargs={
|
73 |
+
"annotation_file_path": "annotations/test.json",
|
74 |
+
"files": dl_manager.iter_archive(archive),
|
75 |
+
},
|
76 |
+
),
|
77 |
+
datasets.SplitGenerator(
|
78 |
+
name=NamedSplit("val"),
|
79 |
+
gen_kwargs={
|
80 |
+
"annotation_file_path": "annotations/val.json",
|
81 |
+
"files": dl_manager.iter_archive(archive),
|
82 |
+
},
|
83 |
+
),
|
84 |
+
]
|
85 |
+
|
86 |
+
def _generate_examples(self, annotation_file_path, files):
|
87 |
+
def process_annot(annot, category_id_to_category):
|
88 |
+
return {
|
89 |
+
"id": annot["id"],
|
90 |
+
"area": annot["area"],
|
91 |
+
"bbox": annot["bbox"],
|
92 |
+
"category": category_id_to_category[annot["category_id"]],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
}
|
94 |
+
|
95 |
+
image_id_to_image = []
|
96 |
+
idx = 0
|
97 |
+
# This loop relies on the ordering of the files in the archive:
|
98 |
+
# Annotation files come first, then the images.
|
99 |
+
for path, f in files:
|
100 |
+
file_name = os.path.basename(path)
|
101 |
+
if path == annotation_file_path:
|
102 |
+
annotations = json.load(f)
|
103 |
+
category_id_to_category = {
|
104 |
+
category["id"]: category["name"]
|
105 |
+
for category in annotations["categories"]
|
106 |
+
}
|
107 |
+
image_id_to_annotations = collections.defaultdict(list)
|
108 |
+
for annot in annotations["annotations"]:
|
109 |
+
image_id_to_annotations[annot["image_id"]].append(annot)
|
110 |
+
image_id_to_image = {
|
111 |
+
annot["file_name"]: annot for annot in annotations["images"]
|
112 |
+
}
|
113 |
+
elif file_name in image_id_to_image:
|
114 |
+
image = image_id_to_image[file_name]
|
115 |
+
objects = [
|
116 |
+
process_annot(annot, category_id_to_category)
|
117 |
+
for annot in image_id_to_annotations[image["id"]]
|
118 |
+
]
|
119 |
+
yield idx, {
|
120 |
+
"image_id": image["id"],
|
121 |
+
"image": {"path": path, "bytes": f.read()},
|
122 |
+
"width": image["width"],
|
123 |
+
"height": image["height"],
|
124 |
+
"objects": objects,
|
125 |
+
}
|
126 |
+
idx += 1
|