multimodal_sarcasm_detection / multimodal_sarcasm_detection.py
quaeast's picture
Update multimodal_sarcasm_detection.py
a31592d
raw
history blame
3.13 kB
import datasets
from datasets.features import ClassLabel, Features, Image
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
_DESCRIPTION = "none"
_NAMES = ["non-sarcastic", "sarcastic"]
_HOMEPAGE = "https://github.com/headacheboy/data-of-multimodal-sarcasm-detection"
_CITATION = "none"
_LICENSE = "none"
_BASE_URL = "data/images.tar"
_METADATA_URLS = {"train": "data/text/train.txt",
"test": "data/text/test2.txt",
"valid": "data/text/valid2.txt"}
_IMAGES_DIR = "images/"
class MultimodalSarcasmDetection(datasets.GeneratorBasedBuilder):
"""MultimodalSarcasmDetection Images dataset"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"text": datasets.Value("string"),
"label": datasets.ClassLabel(names=_NAMES),
}
),
supervised_keys=(("image", "text"), "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_BASE_URL)
split_metadata_paths = dl_manager.download(_METADATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["test"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["valid"],
},
),
]
def _generate_examples(self, images, metadata_path):
"""Generate images and labels for splits."""
lines = {}
files_to_keep = set()
with open(metadata_path, encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
line = eval(line)
lines[line[0]] = line[1:]
files_to_keep.add(line[0])
for file_path, file_obj in images:
if file_path.startswith(_IMAGES_DIR):
image_id = file_path[len(_IMAGES_DIR): -len(".jpg")]
if image_id in files_to_keep:
line = lines[image_id]
yield file_path, {
"image": {"path": file_path, "bytes": file_obj.read()},
"text": line[0],
"label": line[-1]
}