ingerid commited on
Commit
8e3b1c0
1 Parent(s): ae7d5d9

fix loading script, rename dev data paths to validation

Browse files
data/{dev_bm_1.tar.gz → validation_bm_1.tar.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:798e05953acb17a50050447b106cc703902a8ef7d7696c5f408bae4089a1258a
3
- size 170237998
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd00721de3280d7badd3d21748449c2e6e85656b0f1efb43e06b3b13ef26b7f9
3
+ size 204513280
data/{dev_metadata.jsonl → validation_metadata.jsonl} RENAMED
The diff for this file is too large to render. See raw diff
 
data/{dev_nn_1.tar.gz → validation_nn_1.tar.gz} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e0152d1c323bf089fd9599f9de52742e107da208e4f921ab002bdcdcba8d796
3
- size 52968093
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d88d67097644ac8ab152d9698081a7f9186aa55ea1d7a38637b0b1031b4e5c2
3
+ size 63129600
nb_samtale.py CHANGED
@@ -75,6 +75,7 @@ def normalize_transcription(transcription: str, config="annotations"):
75
  return transcription
76
  return transcription
77
 
 
78
  class NBSamtaleConfig(datasets.BuilderConfig):
79
  """BuilderConfig for NBSamtale"""
80
 
@@ -82,23 +83,12 @@ class NBSamtaleConfig(datasets.BuilderConfig):
82
  # Version history:
83
  # 1.0.0: Initial version.
84
  super(NBSamtaleConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
85
- #self.language = language
86
 
87
 
88
  class NBSamtale(datasets.GeneratorBasedBuilder):
89
  """Norwegian conversational speech audio dataset with a total of 24 hours transcribed speech from 69 speakers. """
90
-
91
- # This is an example of a dataset with multiple configurations.
92
- # If you don't want/need to define several sub-sets in your dataset,
93
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
94
-
95
- # If you need to make complex sub-parts in the datasets with configurable options
96
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
97
  BUILDER_CONFIG_CLASS = NBSamtaleConfig
98
 
99
- # You will be able to load one or the other configurations in the following list with
100
- # data = datasets.load_dataset('Sprakbanken/nb_samtale', 'normalized')
101
- # data = datasets.load_dataset('Sprakbanken/nb_samtale', 'verbatim')
102
  BUILDER_CONFIGS = [
103
  NBSamtaleConfig(name="annotations", description="Transcriptions contain original annotations, including hesitations, laughter, interruptions etc. See https://www.nb.no/sbfil/taledata/NB_Samtale_About_the_corpus.pdf section 'Transcriptions' for more information."),
104
  NBSamtaleConfig(name="orthographic", description="Transcriptions have been normalized and word forms that comply with the orthographic standard are chosen, even for dialect specific words, e.g. 'korsen'/'kossen' is replaced with 'hvordan' in bokmål, or 'korleis' in nynorsk."),
@@ -107,7 +97,7 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
107
  #NBSamtaleConfig(name="nn", language="nynorsk", description="Normalized nynorsk transcriptions. Word forms that comply with the orthographic standard are chosen, e.g. 'kossen' is replaced with 'korleis'."),
108
  ]
109
 
110
- DEFAULT_CONFIG_NAME = "annotations" # It's not mandatory to have a default configuration. Just use one if it make sense.
111
 
112
  def _info(self):
113
  """This method specifies the datasets.DatasetInfo object
@@ -115,9 +105,7 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
115
  """
116
 
117
  return datasets.DatasetInfo(
118
- # This is the description that will appear on the datasets page.
119
  description=_DESCRIPTION,
120
- # This defines the different columns of the dataset and their types
121
  features=datasets.Features(
122
  {
123
  'source_file_id': datasets.Value(dtype='string'),
@@ -136,14 +124,9 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
136
  'audio': datasets.Audio(sampling_rate=16000, mono=True, decode=True),
137
  }
138
  ),
139
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
140
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
141
  supervised_keys=None,
142
- # Homepage of the dataset for documentation
143
  homepage=_HOMEPAGE,
144
- # License for the dataset if available
145
  license=_LICENSE,
146
- # Citation for the dataset
147
  citation=_CITATION,
148
  )
149
 
@@ -155,18 +138,17 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
155
  split_type = {
156
  "train": datasets.Split.TRAIN,
157
  "test": datasets.Split.TEST,
158
- "dev": datasets.Split.VALIDATION,
159
  }
160
  for split in split_type:
161
- #audio_path[split] = dl_manager.download([f"data/{split}_{lang}_1.tar.gz" for lang in ["bm", "nn"]])
162
- audio_path[split] = dl_manager.download(f"data/{split}_bm_1.tar.gz")
163
 
164
  return [
165
  datasets.SplitGenerator(
166
  name=split_type[split],
167
  gen_kwargs={
168
  "local_extracted_archive": dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None,
169
- "audio_files": dl_manager.iter_archive(audio_path[split]),#[dl_manager.iter_archive(archive) for archive in audio_path[split]],
170
  "metadata": dl_manager.download_and_extract(f"data/{split}_metadata.jsonl"),
171
  }
172
  ) for split in split_type
@@ -175,9 +157,6 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
175
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
176
  def _generate_examples(self, local_extracted_archive, audio_files, metadata):
177
  """Loads the data files and extract the features."""
178
- #data_fields = list(self._info().features.keys())
179
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
180
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
181
  meta = {}
182
  with open(metadata, encoding="utf-8") as mf:
183
  datalines = mf.read().splitlines()
@@ -189,15 +168,14 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
189
 
190
  id_ = 0
191
 
192
- #for archive in audio_files:
193
- for path, audio_file in audio_files:
194
- if not path in meta:
195
- print(f"{path} not in metadata")
196
- else:
197
- result = dict(meta[path])
198
- # set the audio feature and the path to the extracted file
199
- path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
200
- result["audio"] = {"path": path, "bytes": audio_file.read()}
201
- result["path"] = path
202
- yield id_, result
203
- id_ += 1
 
75
  return transcription
76
  return transcription
77
 
78
+
79
  class NBSamtaleConfig(datasets.BuilderConfig):
80
  """BuilderConfig for NBSamtale"""
81
 
 
83
  # Version history:
84
  # 1.0.0: Initial version.
85
  super(NBSamtaleConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
 
86
 
87
 
88
  class NBSamtale(datasets.GeneratorBasedBuilder):
89
  """Norwegian conversational speech audio dataset with a total of 24 hours transcribed speech from 69 speakers. """
 
 
 
 
 
 
 
90
  BUILDER_CONFIG_CLASS = NBSamtaleConfig
91
 
 
 
 
92
  BUILDER_CONFIGS = [
93
  NBSamtaleConfig(name="annotations", description="Transcriptions contain original annotations, including hesitations, laughter, interruptions etc. See https://www.nb.no/sbfil/taledata/NB_Samtale_About_the_corpus.pdf section 'Transcriptions' for more information."),
94
  NBSamtaleConfig(name="orthographic", description="Transcriptions have been normalized and word forms that comply with the orthographic standard are chosen, even for dialect specific words, e.g. 'korsen'/'kossen' is replaced with 'hvordan' in bokmål, or 'korleis' in nynorsk."),
 
97
  #NBSamtaleConfig(name="nn", language="nynorsk", description="Normalized nynorsk transcriptions. Word forms that comply with the orthographic standard are chosen, e.g. 'kossen' is replaced with 'korleis'."),
98
  ]
99
 
100
+ DEFAULT_CONFIG_NAME = "annotations"
101
 
102
  def _info(self):
103
  """This method specifies the datasets.DatasetInfo object
 
105
  """
106
 
107
  return datasets.DatasetInfo(
 
108
  description=_DESCRIPTION,
 
109
  features=datasets.Features(
110
  {
111
  'source_file_id': datasets.Value(dtype='string'),
 
124
  'audio': datasets.Audio(sampling_rate=16000, mono=True, decode=True),
125
  }
126
  ),
 
 
127
  supervised_keys=None,
 
128
  homepage=_HOMEPAGE,
 
129
  license=_LICENSE,
 
130
  citation=_CITATION,
131
  )
132
 
 
138
  split_type = {
139
  "train": datasets.Split.TRAIN,
140
  "test": datasets.Split.TEST,
141
+ "validation": datasets.Split.VALIDATION,
142
  }
143
  for split in split_type:
144
+ audio_path[split] = dl_manager.download([f"data/{split}_{lang}_1.tar.gz" for lang in ["bm", "nn"]])
 
145
 
146
  return [
147
  datasets.SplitGenerator(
148
  name=split_type[split],
149
  gen_kwargs={
150
  "local_extracted_archive": dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None,
151
+ "audio_files":[dl_manager.iter_archive(archive) for archive in audio_path[split]], #dl_manager.iter_archive(audio_path[split]),
152
  "metadata": dl_manager.download_and_extract(f"data/{split}_metadata.jsonl"),
153
  }
154
  ) for split in split_type
 
157
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
158
  def _generate_examples(self, local_extracted_archive, audio_files, metadata):
159
  """Loads the data files and extract the features."""
 
 
 
160
  meta = {}
161
  with open(metadata, encoding="utf-8") as mf:
162
  datalines = mf.read().splitlines()
 
168
 
169
  id_ = 0
170
 
171
+ for archive in audio_files:
172
+ for path, audio_file in archive:
173
+ if not path in meta:
174
+ print(f"{path} not in metadata")
175
+ else:
176
+ result = dict(meta[path])
177
+ # set the audio feature and the path to the extracted file
178
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
179
+ result["audio"] = {"path": path, "bytes": audio_file.read()}
180
+ yield id_, result
181
+ id_ += 1