|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Waxal Wolof Dataset.""" |
|
|
|
|
|
import csv |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)" |
|
|
|
_MODALITIES_COMBINATION = [ |
|
["audio", "image", "text"], |
|
["audio", "text"], |
|
["audio", "image"], |
|
["image", "text"], |
|
["audio"], |
|
["image"], |
|
["text"], |
|
] |
|
|
|
_URLs = { |
|
"train-transcriptions": "train_transcriptions.csv", |
|
"test-transcriptions": "test_transcriptions.csv", |
|
"image-files": "images.tar.gz", |
|
"captioned-images": "captioned_images.tar.gz", |
|
"audio-files": "audios.tar.gz", |
|
"transcribed-audio": "transcribed_audio.tar.gz" |
|
} |
|
|
|
|
|
class WaxalConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Waxal dataset.""" |
|
|
|
def __init__(self, name, version, modalities, **kwargs): |
|
self.modalities = modalities |
|
self.language = kwargs.pop("language", None) |
|
|
|
modalities_str = " to ".join(self.modalities) |
|
description = f"Waxal {modalities_str} in {self.language}" |
|
|
|
super(WaxalConfig, self).__init__( |
|
name=name, |
|
version=version, |
|
description=description, |
|
**kwargs, |
|
) |
|
|
|
|
|
class WaxalWolof(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
WaxalConfig( |
|
name="-".join(modalities), |
|
version=datasets.Version("1.1.0"), |
|
modalities=modalities, |
|
language="wolof", |
|
) |
|
for modalities in _MODALITIES_COMBINATION |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "audio-text" |
|
|
|
def _info(self): |
|
features = {} |
|
|
|
if "audio" in self.config.modalities: |
|
features["audio"] = datasets.features.Audio() |
|
features["audio_duration"] = datasets.Value("float") |
|
features["participant"] = datasets.Value("int32") |
|
|
|
if "image" in self.config.modalities: |
|
features["image"] = datasets.features.Image() |
|
|
|
if "text" in self.config.modalities: |
|
features["text_annotation"] = datasets.Value("string") |
|
|
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=datasets.Features(features), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
@property |
|
def with_audio(self): |
|
return "audio" in self.config.modalities |
|
|
|
@property |
|
def with_image(self): |
|
return "image" in self.config.modalities |
|
|
|
@property |
|
def with_text(self): |
|
return "text" in self.config.modalities |
|
|
|
def _split_generators(self, dl_manager): |
|
audio_url_key = "transcribed-audio" if self.with_text else "audio-files" |
|
image_url_key = "captioned-images" if self.with_text else "image-files" |
|
|
|
audio_files = ( |
|
dl_manager.download_and_extract(_URLs[audio_url_key]) |
|
if self.with_audio |
|
else None |
|
) |
|
|
|
image_files = ( |
|
dl_manager.download_and_extract(_URLs[image_url_key]) |
|
if self.with_image |
|
else None |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"metadata_path": dl_manager.download( |
|
_URLs["train-transcriptions"] |
|
), |
|
"audio_files": audio_files, |
|
"image_files": image_files, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"metadata_path": dl_manager.download( |
|
_URLs["test-transcriptions"] |
|
), |
|
"audio_files": audio_files, |
|
"image_files": image_files, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
metadata_path, |
|
audio_files=None, |
|
path_to_audio="transcribed_audio", |
|
image_files=None, |
|
path_to_images="captioned_images", |
|
): |
|
metadata = {} |
|
|
|
with open(metadata_path) as buf: |
|
reader = csv.DictReader(buf) |
|
for row in reader: |
|
del row["prompt"] |
|
|
|
if self.with_text: |
|
if not row["transcription"]: |
|
continue |
|
|
|
if self.with_image: |
|
row["image_file_path"] = os.path.join( |
|
path_to_images, self.config.language, row["image_file_name"] |
|
) |
|
if self.with_audio: |
|
row["audio_file_path"] = os.path.join( |
|
path_to_audio, row["audio_file_name"] |
|
) |
|
|
|
metadata[row["idx"]] = row |
|
|
|
for idx, sample in metadata.items(): |
|
result = {} |
|
|
|
if self.with_audio: |
|
result["participant"] = sample["participant"] |
|
result["audio_duration"] = sample["duration"] |
|
audio_path = os.path.join(audio_files, sample["audio_file_path"]) |
|
with open(audio_path, "rb") as f: |
|
result["audio"] = {"path": audio_path, "bytes": f.read()} |
|
|
|
if self.with_image: |
|
image_path = os.path.join(image_files, sample["image_file_path"]) |
|
with open(image_path, "rb") as f: |
|
result["image"] = {"path": image_path, "bytes": f.read()} |
|
|
|
if self.with_text: |
|
result["text_annotation"] = sample["transcription"] |
|
|
|
yield idx, result |
|
|