quaeast commited on
Commit
210d65d
·
1 Parent(s): 87c8d15
data/images.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c8b18946bad1a8f6e5de007ebab0d1a0b12a944a7c9dce62e68d3cbf7d36bf
3
+ size 2767959040
data/text/test2.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/text/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/text/valid2.txt ADDED
The diff for this file is too large to render. See raw diff
 
images.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c8b18946bad1a8f6e5de007ebab0d1a0b12a944a7c9dce62e68d3cbf7d36bf
3
+ size 2767959040
multimodal_sarcasm_detection.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets.features import ClassLabel, Features, Image
3
+ import copy
4
+ from dataclasses import dataclass, field
5
+ from typing import ClassVar, Dict
6
+ from datasets import TaskTemplate, ImageClassification
7
+
8
+ _DESCRIPTION = "none"
9
+ _NAMES = ["non-sarcastic", "sarcastic"]
10
+ _HOMEPAGE = "https://github.com/headacheboy/data-of-multimodal-sarcasm-detection"
11
+ _CITATION = "none"
12
+ _LICENSE = "none"
13
+ _BASE_URL = "data/images.tar"
14
+ _METADATA_URLS = {"train": "data/text/train.txt",
15
+ "test": "data/text/test2.txt",
16
+ "valid": "data/text/valid2.txt"}
17
+ _IMAGES_DIR = "images/"
18
+
19
+
20
+ @dataclass(frozen=True)
21
+ class ImageTextClassification(TaskTemplate):
22
+ task: str = field(default="image-text-classification", metadata={"include_in_asdict_even_if_is_default": True})
23
+ input_schema: ClassVar[Features] = Features({"image": Image()})
24
+ text_schema: ClassVar[Features] = Features({"text": datasets.Value("string")})
25
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
26
+ image_column: str = "image"
27
+ text_column: str = "text"
28
+ label_column: str = "labels"
29
+
30
+ def align_with_features(self, features):
31
+ if self.label_column not in features:
32
+ raise ValueError(f"Column {self.label_column} is not present in features.")
33
+ if not isinstance(features[self.label_column], ClassLabel):
34
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
35
+ task_template = copy.deepcopy(self)
36
+ label_schema = self.label_schema.copy()
37
+ label_schema["labels"] = features[self.label_column]
38
+ task_template.__dict__["label_schema"] = label_schema
39
+ return task_template
40
+
41
+ @property
42
+ def column_mapping(self) -> Dict[str, str]:
43
+ return {
44
+ self.image_column: "image",
45
+ self.text_column: "text",
46
+ self.label_column: "labels",
47
+ }
48
+
49
+
50
+ class MultimodalSarcasmDetection(datasets.GeneratorBasedBuilder):
51
+ """MultimodalSarcasmDetection Images dataset"""
52
+
53
+ def _info(self):
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=datasets.Features(
57
+ {
58
+ "image": datasets.Image(),
59
+ "text": datasets.Value("string"),
60
+ "label": datasets.ClassLabel(names=_NAMES),
61
+ }
62
+ ),
63
+ supervised_keys=(("image", "text"), "label"),
64
+ homepage=_HOMEPAGE,
65
+ citation=_CITATION,
66
+ license=_LICENSE,
67
+ task_templates=[ImageTextClassification(image_column="image", text_column="text", label_column="label")],
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ archive_path = dl_manager.download(_BASE_URL)
72
+ split_metadata_paths = dl_manager.download(_METADATA_URLS)
73
+ return [
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TRAIN,
76
+ gen_kwargs={
77
+ "images": dl_manager.iter_archive(archive_path),
78
+ "metadata_path": split_metadata_paths["train"],
79
+ },
80
+ ),
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TEST,
83
+ gen_kwargs={
84
+ "images": dl_manager.iter_archive(archive_path),
85
+ "metadata_path": split_metadata_paths["test"],
86
+ },
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.VALIDATION,
90
+ gen_kwargs={
91
+ "images": dl_manager.iter_archive(archive_path),
92
+ "metadata_path": split_metadata_paths["valid"],
93
+ },
94
+ ),
95
+ ]
96
+
97
+ def _generate_examples(self, images, metadata_path):
98
+ """Generate images and labels for splits."""
99
+ lines = {}
100
+ files_to_keep = set()
101
+ with open(metadata_path, encoding="utf-8") as f:
102
+ for line in f:
103
+ line = line.strip()
104
+ if line:
105
+ line = eval(line)
106
+ lines[line[0]] = line[1:]
107
+ files_to_keep.add(line[0])
108
+ for file_path, file_obj in images:
109
+ if file_path.startswith(_IMAGES_DIR):
110
+ image_id = file_path[len(_IMAGES_DIR): -len(".jpg")]
111
+ if image_id in files_to_keep:
112
+ line = lines[image_id]
113
+ yield file_path, {
114
+ "image": {"path": file_path, "bytes": file_obj.read()},
115
+ "text": line[0],
116
+ "label": line[-1]
117
+ }
text/test2.txt ADDED
The diff for this file is too large to render. See raw diff
 
text/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
text/valid2.txt ADDED
The diff for this file is too large to render. See raw diff