Datasets:

Modalities:
Audio
Text
ArXiv:
Libraries:
Datasets
License:
polinaeterna HF staff commited on
Commit
28cc287
1 Parent(s): 9f6d2ae

add segmentation (without saving file on disk)

Browse files
Files changed (1) hide show
  1. voxpopuli.py +75 -19
voxpopuli.py CHANGED
@@ -1,5 +1,9 @@
 
1
  import os
2
  import glob
 
 
 
3
 
4
  import datasets
5
 
@@ -14,6 +18,7 @@ _LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES]
14
 
15
  _YEARS = list(range(2009, 2020 + 1))
16
 
 
17
  _CONFIG_TO_LANGS = {
18
  "400k": _LANGUAGES,
19
  "100k": _LANGUAGES,
@@ -33,7 +38,20 @@ _BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"
33
 
34
  _DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar"
35
 
36
- _META_URL = _BASE_URL + "https://dl.fbaipublicfiles.com/voxpopuli/annotations/unlabelled_v2.tsv.gz"
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
 
39
  class Voxpopuli(datasets.GeneratorBasedBuilder):
@@ -41,7 +59,7 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
41
 
42
  VERSION = datasets.Version("1.0.0") # TODO ??
43
  BUILDER_CONFIGS = [
44
- datasets.BuilderConfig(
45
  name=name,
46
  # version=VERSION,
47
  description="", # TODO
@@ -58,6 +76,7 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
58
  "language": datasets.ClassLabel(names=_LANGUAGES),
59
  "year": datasets.Value("int16"),
60
  "audio": datasets.Audio(sampling_rate=16_000),
 
61
  }
62
  )
63
  return datasets.DatasetInfo(
@@ -68,38 +87,75 @@ class Voxpopuli(datasets.GeneratorBasedBuilder):
68
  # citation=_CITATION,
69
  )
70
 
71
- def _split_generators(self, dl_manager):
72
- # dl_manager.download_config.num_proc = len(_VOXPOPULI_AUDIO_URLS) # TODO
 
 
73
 
74
- # metadata_path = dl_manager.download_and_extract(_META_URL)
 
 
 
 
 
 
 
 
 
75
 
76
- languages = [self.config.name] if self.config.name in _LANGUAGES else _LANGUAGES
77
- years = _CONFIG_TO_YEARS[self.config.name]
78
- # urls = [_DATA_URL.format(lang=language, year=year) for language in ["hr", "et"] for year in [2020]]
79
- urls = [_DATA_URL.format(lang=language, year=year) for language in languages for year in years]
80
 
81
- langs_data_dirs = dl_manager.download_and_extract(urls)
82
- print(langs_data_dirs)
83
- print(glob.glob(f"{langs_data_dirs[0]}/**/*.ogg", recursive=True))
 
 
84
 
85
  return [
86
  datasets.SplitGenerator(
87
  name=datasets.Split.TRAIN,
88
  gen_kwargs={
89
- "data_dirs": langs_data_dirs,
 
90
  }
91
  ),
92
  ]
93
 
94
- def _generate_examples(self, data_dirs):
 
 
 
 
 
 
 
 
 
 
 
95
  for data_dir in data_dirs:
96
- for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True):
97
  path_components = file.split(os.sep)
98
- language, year = path_components[-3:-1]
99
- with open(file) as f:
100
- yield file, {
 
 
 
 
 
 
 
 
101
  "path": file,
102
  "language": language,
103
  "year": year,
104
- "audio": {"path": file}
 
 
 
 
105
  }
 
1
+ from collections import defaultdict
2
  import os
3
  import glob
4
+ import csv
5
+ from pathlib import Path
6
+ from tqdm.auto import tqdm
7
 
8
  import datasets
9
 
 
18
 
19
  _YEARS = list(range(2009, 2020 + 1))
20
 
21
+ # unnecessary
22
  _CONFIG_TO_LANGS = {
23
  "400k": _LANGUAGES,
24
  "100k": _LANGUAGES,
 
38
 
39
  _DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar"
40
 
41
+ _META_URL = _BASE_URL + "annotations/unlabelled_v2.tsv.gz"
42
+
43
+
44
+ class VoxpopuliConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for VoxPopuli."""
46
+
47
+ def __init__(self, name, **kwargs):
48
+ """
49
+ Args:
50
+ name: `string`, name of dataset config
51
+ **kwargs: keyword arguments forwarded to super.
52
+ """
53
+ super().__init__(name=name, **kwargs)
54
+ self.languages = [name] if name in _LANGUAGES else _LANGUAGES
55
 
56
 
57
  class Voxpopuli(datasets.GeneratorBasedBuilder):
 
59
 
60
  VERSION = datasets.Version("1.0.0") # TODO ??
61
  BUILDER_CONFIGS = [
62
+ VoxpopuliConfig(
63
  name=name,
64
  # version=VERSION,
65
  description="", # TODO
 
76
  "language": datasets.ClassLabel(names=_LANGUAGES),
77
  "year": datasets.Value("int16"),
78
  "audio": datasets.Audio(sampling_rate=16_000),
79
+ "segment_id": datasets.Value("int16"),
80
  }
81
  )
82
  return datasets.DatasetInfo(
 
87
  # citation=_CITATION,
88
  )
89
 
90
+ def _read_metadata(self, metadata_path):
91
+ # TODO: check for predicate??
92
+ # @ https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
93
+ metadata = defaultdict(list)
94
 
95
+ with open(metadata_path, encoding="utf-8") as csv_file:
96
+ csv_reader = csv.reader(csv_file, delimiter="\t")
97
+ for i, row in tqdm(enumerate(csv_reader)):
98
+ if i == 0:
99
+ continue
100
+ audio_id, segment_id, start, end = row
101
+ event_id, lang = audio_id.rsplit("_", 1)[-2:]
102
+ if lang in self.languages:
103
+ # if lang in ["hr", "et"]:
104
+ metadata[audio_id].append((float(start), float(end)))
105
 
106
+ return metadata
107
+
108
+ def _split_generators(self, dl_manager):
109
+ metadata_path = dl_manager.download_and_extract(_META_URL)
110
 
111
+ years = _CONFIG_TO_YEARS[self.config.name]
112
+ # urls = [_DATA_URL.format(lang=language, year=year) for language in ["hr", "et"] for year in [2020]] # , "et"]
113
+ urls = [_DATA_URL.format(lang=language, year=year) for language in self.languages for year in years]
114
+ dl_manager.download_config.num_proc = len(urls)
115
+ data_dirs = dl_manager.download_and_extract(urls)
116
 
117
  return [
118
  datasets.SplitGenerator(
119
  name=datasets.Split.TRAIN,
120
  gen_kwargs={
121
+ "data_dirs": data_dirs,
122
+ "metadata_path": metadata_path,
123
  }
124
  ),
125
  ]
126
 
127
+ def _generate_examples(self, data_dirs, metadata_path):
128
+ try:
129
+ import torch
130
+ import torchaudio
131
+ except ImportError as e:
132
+ raise ValueError(
133
+ "Loading voxpopuli requires `torchaudio` to be installed."
134
+ "You can install torchaudio with `pip install torchaudio`." + e
135
+ )
136
+
137
+ metadata = self._read_metadata(metadata_path)
138
+
139
  for data_dir in data_dirs:
140
+ for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True)[:5]:
141
  path_components = file.split(os.sep)
142
+ language, year, audio_filename = path_components[-3:]
143
+ audio_id, _ = os.path.splitext(audio_filename)
144
+ timestamps = metadata[audio_id]
145
+
146
+ waveform, sr = torchaudio.load(file)
147
+ duration = waveform.size(1)
148
+
149
+ for segment_id, (start, stop) in enumerate(timestamps):
150
+ segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]
151
+
152
+ yield f"{audio_filename}_{segment_id}", {
153
  "path": file,
154
  "language": language,
155
  "year": year,
156
+ "audio": {
157
+ "array": segment[0], # segment is a 2-dim array
158
+ "sampling_rate": 16_000
159
+ },
160
+ "segment_id": segment_id,
161
  }