arxyzan commited on
Commit
06c63b4
1 Parent(s): aaa5fdf

Create flickr30k-fa.py

Browse files
Files changed (1) hide show
  1. flickr30k-fa.py +81 -0
flickr30k-fa.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _CITATION = """"""
9
+
10
+ _DESCRIPTION = """Flickr30k filtered and translated to Persian made by Sajjad Ayoubi https://www.kaggle.com/datasets/sajjadayobi360/flickrfa"""
11
+
12
+ _DOWNLOAD_URLS = {
13
+ "train": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_train.csv",
14
+ "test": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/annotations_test.csv",
15
+ "train_dataset": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/flickr30k-fa_train.zip",
16
+ "test_dataset": "https://huggingface.co/datasets/hezarai/flickr30k-fa/resolve/main/flickr30k-fa_test.zip",
17
+ }
18
+
19
+
20
+ class Flickr30kFaConfig(datasets.BuilderConfig):
21
+ def __init__(self, **kwargs):
22
+ super(Flickr30kFaConfig, self).__init__(**kwargs)
23
+
24
+
25
+ class Flickr30kFa(datasets.GeneratorBasedBuilder):
26
+ BUILDER_CONFIGS = [
27
+ Flickr30kFaConfig(
28
+ name="Persian flickr30k",
29
+ version=datasets.Version("1.0.0"),
30
+ description=_DESCRIPTION,
31
+ ),
32
+ ]
33
+
34
+ def _info(self):
35
+ return datasets.DatasetInfo(
36
+ description=_DESCRIPTION,
37
+ features=datasets.Features(
38
+ {
39
+ "image_path": datasets.Value("string"),
40
+ "label": datasets.Value("string"),
41
+ }
42
+ ),
43
+ citation=_CITATION,
44
+ )
45
+
46
+ def _split_generators(self, dl_manager):
47
+ """
48
+ Return SplitGenerators.
49
+ """
50
+
51
+ train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
52
+ test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
53
+
54
+ archive_path = dl_manager.download(_DOWNLOAD_URLS["train_dataset"])
55
+ train_extracted_path = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
56
+
57
+ archive_path = dl_manager.download(_DOWNLOAD_URLS["test_dataset"])
58
+ test_extracted_path = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
59
+
60
+ return [
61
+ datasets.SplitGenerator(
62
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path, "dataset_dir": train_extracted_path}
63
+ ),
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path, "dataset_dir": test_extracted_path}
66
+ ),
67
+ ]
68
+
69
+ def _generate_examples(self, filepath, dataset_dir):
70
+ logger.info("⏳ Generating examples from = %s", filepath)
71
+
72
+ with open(filepath, encoding="utf-8") as csv_file:
73
+ csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
74
+
75
+ # Skip header
76
+ next(csv_reader, None)
77
+
78
+ for id_, row in enumerate(csv_reader):
79
+ label, filename = row
80
+ image_path = os.path.join(dataset_dir, filename)
81
+ yield id_, {"image_path": image_path, "label": label}