File size: 4,594 Bytes
afa3e32 503db74 afa3e32 503db74 afa3e32 662e94c afa3e32 662e94c afa3e32 662e94c afa3e32 3861838 afa3e32 3861838 afa3e32 bfef8de 5bc63c4 bfef8de 3f11aba bfef8de 662e94c bfef8de afa3e32 bfef8de 4d02ed1 bfef8de 4d02ed1 bfef8de 4d02ed1 bfef8de afa3e32 bfef8de 38eed69 bfef8de 09adbce bfef8de 09adbce bfef8de 4d02ed1 bfef8de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
from collections import defaultdict
import os
import json
import csv
import datasets
_DESCRIPTION = """
A large-scale speech corpus for representation learning, semi-supervised learning and interpretation.
"""
_CITATION = """
@inproceedings{}
"""
_HOMEPAGE = ""
_LICENSE = ""
_ASR_LANGUAGES = [
"hy"
]
_ASR_ACCENTED_LANGUAGES = [
""
]
_LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
_BASE_DATA_DIR = "data/"
_N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{split}/{split}_dataset.tar.gz"
_METADATA_PATH = _BASE_DATA_DIR + "{split}.tsv"
class HySpeech(datasets.GeneratorBasedBuilder):
"""The VoxPopuli dataset."""
VERSION = datasets.Version("1.1.0") # TODO: version
DEFAULT_WRITER_BATCH_SIZE = 256
def _info(self):
features = datasets.Features(
{
"audio_id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"raw_text": datasets.Value("string"),
"normalized_text": datasets.Value("string"),
"gender": datasets.Value("string"), # TODO: ClassVar?
"speaker_id": datasets.Value("string"),
"is_gold_transcript": datasets.Value("bool"),
"accent": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
with open(n_shards_path) as f:
n_shards = json.load(f)
splits = ["train", "dev", "test"]
audio_urls = defaultdict(dict)
for split in splits:
audio_urls[split] = [_AUDIO_ARCHIVE_PATH.format(split=split)]
meta_urls = defaultdict(dict)
for split in splits:
meta_urls[split] = _METADATA_PATH.format(split=split)
# dl_manager.download_config.num_proc = len(urls)
meta_paths = dl_manager.download_and_extract(meta_urls)
audio_paths = dl_manager.download(audio_urls)
local_extracted_audio_paths = (
dl_manager.extract(audio_paths)
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
"local_extracted_archives_paths": local_extracted_audio_paths["train"],
"metadata_paths": meta_paths["train"],
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
"local_extracted_archives_paths": local_extracted_audio_paths["dev"],
"metadata_paths": meta_paths["dev"],
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
"metadata_paths": meta_paths["test"],
}
),
]
def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
features = ["raw_text", "normalized_text", "speaker_id", "gender"]
meta_path = metadata_paths
with open(meta_path) as f:
metadata = {x["audio_path"].split(os.sep)[-1].split(".wav")[0]: x for x in csv.DictReader(f, delimiter="\t")}
for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
for audio_filename, audio_file in audio_archive:
audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
yield audio_id, {
"audio_id": audio_id,
**{feature: metadata[audio_id][feature] for feature in features},
"audio": {"path": path, "bytes": audio_file.read()},
}
|