|
from collections import defaultdict |
|
import os |
|
import json |
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
_DESCRIPTION = """ |
|
A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation. |
|
""" |
|
|
|
_CITATION = """ |
|
@inproceedings{wang-etal-2021-voxpopuli, |
|
title = "{V}ox{P}opuli: A Large-Scale Multilingual Speech Corpus for Representation Learning, |
|
Semi-Supervised Learning and Interpretation", |
|
author = "Wang, Changhan and |
|
Riviere, Morgane and |
|
Lee, Ann and |
|
Wu, Anne and |
|
Talnikar, Chaitanya and |
|
Haziza, Daniel and |
|
Williamson, Mary and |
|
Pino, Juan and |
|
Dupoux, Emmanuel", |
|
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics |
|
and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", |
|
month = aug, |
|
year = "2021", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.acl-long.80", |
|
doi = "10.18653/v1/2021.acl-long.80", |
|
pages = "993--1003", |
|
} |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/facebookresearch/voxpopuli" |
|
|
|
_LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/" |
|
|
|
_ASR_LANGUAGES = [ |
|
"hy" |
|
] |
|
_ASR_ACCENTED_LANGUAGES = [ |
|
"en_accented" |
|
] |
|
|
|
_LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES |
|
|
|
_BASE_DATA_DIR = "data/" |
|
|
|
_N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json" |
|
|
|
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{split}/{split}_dataset.tar.gz" |
|
|
|
_METADATA_PATH = _BASE_DATA_DIR + "{split}.tsv" |
|
|
|
|
|
|
|
class HySpeech(datasets.GeneratorBasedBuilder): |
|
"""The VoxPopuli dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
DEFAULT_WRITER_BATCH_SIZE = 256 |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"audio_id": datasets.Value("string"), |
|
"language": datasets.ClassLabel(names=_LANGUAGES), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"raw_text": datasets.Value("string"), |
|
"normalized_text": datasets.Value("string"), |
|
"gender": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"is_gold_transcript": datasets.Value("bool"), |
|
"accent": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_dir = "data/" |
|
train_data_dir = os.path.join(data_dir, "train") |
|
dev_data_dir = os.path.join(data_dir, "dev") |
|
test_data_dir = os.path.join(data_dir, "test") |
|
|
|
|
|
train_metadata_path = os.path.join(data_dir, "train.tsv") |
|
dev_metadata_path = os.path.join(data_dir, "dev.tsv") |
|
test_metadata_path = os.path.join(data_dir, "test.tsv") |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": train_data_dir, "metadata_path": train_metadata_path}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": dev_data_dir, "metadata_path": dev_metadata_path}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_dir": test_data_dir, "metadata_path": test_metadata_path}), |
|
] |
|
|
|
def _generate_examples(self, data_dir, metadata_path): |
|
|
|
with open(metadata_path, "r") as f: |
|
metadata = csv.DictReader(f, delimiter="\t") |
|
|
|
|
|
for row in metadata: |
|
audio_id = row["audio_id"] |
|
audio_path = os.path.join(data_dir, row["audio_path"]) |
|
|
|
with open(audio_path, "rb") as audio_file: |
|
yield audio_id, { |
|
"audio_id": audio_id, |
|
"language": row["language"], |
|
"audio": {"path": audio_path, "bytes": audio_file.read()}, |
|
|
|
} |
|
|