multimodal_sarcasm_detection / multimodal_sarcasm_detection.py
quaeast's picture
init
210d65d
raw
history blame
4.59 kB
import datasets
from datasets.features import ClassLabel, Features, Image
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from datasets import TaskTemplate, ImageClassification
_DESCRIPTION = "none"
_NAMES = ["non-sarcastic", "sarcastic"]
_HOMEPAGE = "https://github.com/headacheboy/data-of-multimodal-sarcasm-detection"
_CITATION = "none"
_LICENSE = "none"
_BASE_URL = "data/images.tar"
_METADATA_URLS = {"train": "data/text/train.txt",
"test": "data/text/test2.txt",
"valid": "data/text/valid2.txt"}
_IMAGES_DIR = "images/"
@dataclass(frozen=True)
class ImageTextClassification(TaskTemplate):
task: str = field(default="image-text-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"image": Image()})
text_schema: ClassVar[Features] = Features({"text": datasets.Value("string")})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
image_column: str = "image"
text_column: str = "text"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.image_column: "image",
self.text_column: "text",
self.label_column: "labels",
}
class MultimodalSarcasmDetection(datasets.GeneratorBasedBuilder):
"""MultimodalSarcasmDetection Images dataset"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"text": datasets.Value("string"),
"label": datasets.ClassLabel(names=_NAMES),
}
),
supervised_keys=(("image", "text"), "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
task_templates=[ImageTextClassification(image_column="image", text_column="text", label_column="label")],
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_BASE_URL)
split_metadata_paths = dl_manager.download(_METADATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["test"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["valid"],
},
),
]
def _generate_examples(self, images, metadata_path):
"""Generate images and labels for splits."""
lines = {}
files_to_keep = set()
with open(metadata_path, encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
line = eval(line)
lines[line[0]] = line[1:]
files_to_keep.add(line[0])
for file_path, file_obj in images:
if file_path.startswith(_IMAGES_DIR):
image_id = file_path[len(_IMAGES_DIR): -len(".jpg")]
if image_id in files_to_keep:
line = lines[image_id]
yield file_path, {
"image": {"path": file_path, "bytes": file_obj.read()},
"text": line[0],
"label": line[-1]
}