File size: 1,443 Bytes
0f53cc8
3d9c555
63715b9
4d1c481
0f53cc8
 
 
 
 
 
4d1c481
0f53cc8
 
13959d9
 
0f53cc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f0593f
 
 
 
 
 
 
 
 
 
0f53cc8
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import datasets
from datasets import ImageClassification
import os

_DESCRIPTION = """
A small rocket images dataset.
"""
_HOMEPAGE = "https://huggingface.co./datasets/MilkCool/rockets"
_CITATION = ""
_LICENSE = "MIT"

_IMAGES_DIR = "train"

_NAMES = "Rockets" #TODO

class Rockets(datasets.GeneratorBasedBuilder):

	def _info(self):
		return datasets.DatasetInfo(
		description=_DESCRIPTION,
		features=datasets.Features(
			{
				"image": datasets.Image(),
				"label": datasets.ClassLabel(names=_NAMES),
			}
		),
		supervised_keys=("image", "label"),
		homepage=_HOMEPAGE,
		citation=_CITATION,
		license=_LICENSE,
		task_templates=[ImageClassification(image_column="image", label_column="label")],
	)

	def _split_generators(self, dl_manager):
		#TODO
		return [
			datasets.SplitGenerator(
				name=datasets.Split.TRAIN,
				# These kwargs will be passed to _generate_examples
				gen_kwargs={
					"filepath": os.path.join(data_dir, "train.jsonl"),
					"split": "train",
				},
			)
		]

	def _generate_examples(self, images, metadata_path):
		with open(metadata_path, encoding="utf-8") as f:
			files_to_keep = set(f.read().split("\n"))
		for file_path, file_obj in images:
			if file_path.startswith(_IMAGES_DIR):
				if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
					label = file_path.split("/")[2]
					yield file_path, {
						"image": {"path": file_path, "bytes": file_obj.read()},
						"label": label,
					}