ylacombe HF staff commited on
Commit
314c28d
1 Parent(s): 8a439ef

Delete multilingual_librispeech.py

Browse files
Files changed (1) hide show
  1. multilingual_librispeech.py +0 -226
multilingual_librispeech.py DELETED
@@ -1,226 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Multilingual Librispeech automatic speech recognition dataset."""
18
-
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @article{Pratap2020MLSAL,
26
- title={MLS: A Large-Scale Multilingual Dataset for Speech Research},
27
- author={Vineel Pratap and Qiantong Xu and Anuroop Sriram and Gabriel Synnaeve and Ronan Collobert},
28
- journal={ArXiv},
29
- year={2020},
30
- volume={abs/2012.03411}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- This is a streamable version of the Multilingual LibriSpeech (MLS) dataset.
36
- The data archives were restructured from the original ones from [OpenSLR](http://www.openslr.org/94)
37
- to make it easier to stream.
38
-
39
- MLS dataset is a large multilingual corpus suitable for speech research.
40
- The dataset is derived from read audiobooks from LibriVox and consists of 8 languages:
41
- English, German, Dutch, Spanish, French, Italian, Portuguese, Polish.
42
- """
43
-
44
- _URL = "http://www.openslr.org/94"
45
-
46
- _DL_URL_FORMAT = "data/mls_{name}"
47
-
48
-
49
- class MultilingualLibrispeechConfig(datasets.BuilderConfig):
50
- """BuilderConfig for MultilingualLibrispeech."""
51
-
52
- def __init__(self, name, **kwargs):
53
- """
54
- Args:
55
- name: `string`, name of dataset config (=language)
56
- **kwargs: keyword arguments forwarded to super.
57
- """
58
- super(MultilingualLibrispeechConfig, self).__init__(
59
- version=datasets.Version("2.1.0", ""), name=name, **kwargs
60
- )
61
- # relative path to full data inside a repo (for example `data/mls_german`)
62
- self.data_root_url = _DL_URL_FORMAT.format(name=name)
63
-
64
-
65
- class MultilingualLibrispeech(datasets.GeneratorBasedBuilder):
66
- """Multilingual Librispeech dataset."""
67
-
68
- BUILDER_CONFIGS = [
69
- MultilingualLibrispeechConfig(name="german", description="German LibriSpeech dataset"),
70
- MultilingualLibrispeechConfig(name="dutch", description="Dutch LibriSpeech dataset"),
71
- MultilingualLibrispeechConfig(name="french", description="French LibriSpeech dataset"),
72
- MultilingualLibrispeechConfig(name="spanish", description="Spanish LibriSpeech dataset"),
73
- MultilingualLibrispeechConfig(name="italian", description="Italian LibriSpeech dataset"),
74
- MultilingualLibrispeechConfig(name="portuguese", description="Portuguese LibriSpeech dataset"),
75
- MultilingualLibrispeechConfig(name="polish", description="Polish LibriSpeech dataset"),
76
- ]
77
-
78
- def _info(self):
79
- return datasets.DatasetInfo(
80
- description=_DESCRIPTION,
81
- features=datasets.Features(
82
- {
83
- "file": datasets.Value("string"),
84
- "audio": datasets.features.Audio(sampling_rate=16_000),
85
- "text": datasets.Value("string"),
86
- "speaker_id": datasets.Value("int64"),
87
- "chapter_id": datasets.Value("int64"),
88
- "id": datasets.Value("string"),
89
- }
90
- ),
91
- supervised_keys=("file", "text"),
92
- homepage=_URL,
93
- citation=_CITATION,
94
- task_templates=None,
95
- )
96
-
97
- def _split_generators(self, dl_manager):
98
-
99
- transcripts = dl_manager.download({
100
- "train": self.config.data_root_url + "/train/transcripts.txt",
101
- "dev": self.config.data_root_url + "/dev/transcripts.txt",
102
- "test": self.config.data_root_url + "/test/transcripts.txt",
103
- })
104
-
105
- # Download handles.txt files containing ids for limited supervision train sets
106
- limited_supervision_9h = dl_manager.download(
107
- [self.config.data_root_url + "/train/limited_supervision/9hr/handles.txt"],
108
- )
109
- # in our case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like:
110
- # "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ...
111
- limited_supervision_1h = dl_manager.download([
112
- self.config.data_root_url + f"/train/limited_supervision/1hr/{i}/handles.txt" for i in range(6)
113
- ])
114
-
115
- # each split contains many .tar.gz archives with its audio files
116
- # audio_filenames.txt contains the names of these archives
117
- audio_filenames_paths = dl_manager.download({
118
- "train": self.config.data_root_url + "/train/audio_filenames.txt",
119
- "dev": self.config.data_root_url + "/dev/audio_filenames.txt",
120
- "test": self.config.data_root_url + "/test/audio_filenames.txt",
121
- })
122
-
123
- audio_archives = {}
124
- for split in audio_filenames_paths:
125
- with open(audio_filenames_paths[split], encoding="utf-8") as f:
126
- audio_filenames = [line.strip() for line in f.readlines()]
127
- audio_archives[split] = dl_manager.download([
128
- self.config.data_root_url + "/" + split + "/audio/" + filename
129
- for filename in audio_filenames
130
- ])
131
-
132
- # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
133
- local_extracted_archives = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {}
134
-
135
- train_splits = [
136
- datasets.SplitGenerator(
137
- name=datasets.Split.TRAIN,
138
- gen_kwargs={
139
- "transcript_path": transcripts["train"],
140
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
141
- "local_extracted_archive": local_extracted_archives.get("train"),
142
- }
143
- ),
144
- datasets.SplitGenerator(
145
- name="train.9h",
146
- gen_kwargs={
147
- "transcript_path": transcripts["train"],
148
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
149
- "local_extracted_archive": local_extracted_archives.get("train"),
150
- "limited_ids_paths": tuple(limited_supervision_9h),
151
- },
152
- ),
153
- datasets.SplitGenerator(
154
- name="train.1h",
155
- gen_kwargs={
156
- "transcript_path": transcripts["train"],
157
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
158
- "local_extracted_archive": local_extracted_archives.get("train"),
159
- "limited_ids_paths": tuple(limited_supervision_1h),
160
- },
161
- ),
162
- ]
163
-
164
- return train_splits + [
165
- datasets.SplitGenerator(
166
- name=datasets.Split.VALIDATION, gen_kwargs={
167
- "transcript_path": transcripts["dev"],
168
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]],
169
- "local_extracted_archive": local_extracted_archives.get("dev"),
170
- }
171
- ),
172
- datasets.SplitGenerator(
173
- name=datasets.Split.TEST, gen_kwargs={
174
- "transcript_path": transcripts["test"],
175
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["test"]],
176
- "local_extracted_archive": local_extracted_archives.get("test"),
177
- }
178
- ),
179
- ]
180
-
181
- def _generate_examples(self, transcript_path, audio_archives, local_extracted_archive, limited_ids_paths=None):
182
- """Generate examples from a Multilingual LibriSpeech data dir."""
183
- transcripts = dict()
184
- with open(transcript_path, "r", encoding="utf-8") as file:
185
- for line in file:
186
- audio_id, transcript = line.strip().split("\t")
187
- transcripts[audio_id] = transcript
188
-
189
- limited_ids, limited_ids_archives_names = [], []
190
- if limited_ids_paths:
191
- for path in limited_ids_paths:
192
- with open(path, "r", encoding="utf-8") as file:
193
- limited_ids.extend([line.strip() for line in file.readlines()])
194
-
195
- limited_ids = set(limited_ids)
196
-
197
- for archive_idx, audio_archive in enumerate(audio_archives):
198
- # TODO: check that archive doesn't contain needed ids
199
- # if limited_ids and audio_archive not in limited_ids_archives_names:
200
- # continue
201
-
202
- for audio_filename, file in audio_archive:
203
- speaker_id, chapter_id = audio_filename.split("_")[:2]
204
- speaker_id, chapter_id = int(speaker_id), int(chapter_id)
205
- audio_id = audio_filename.split(".flac")[0]
206
- audio_transcript = transcripts[audio_id]
207
-
208
- if limited_ids and audio_id not in limited_ids:
209
- # this only can be true in limited supervision sets ("train.9h" and "train.1h")
210
- continue
211
-
212
- local_audio_file_path = os.path.join(
213
- local_extracted_archive[archive_idx], audio_filename
214
- ) if local_extracted_archive else None
215
-
216
- yield audio_filename, {
217
- "file": local_audio_file_path,
218
- "audio": {
219
- "path": local_audio_file_path if local_audio_file_path else audio_filename,
220
- "bytes": file.read()
221
- },
222
- "text": audio_transcript,
223
- "speaker_id": speaker_id,
224
- "chapter_id": chapter_id,
225
- "id": audio_id
226
- }