carlosdanielhernandezmena commited on
Commit
56cc264
1 Parent(s): 330d598

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (908b83ab28028dfe76c76b9cb7ecdce6a65f5c1b)
- Delete data file (4af3049261e4b9df1a8e97ec0cd31d3beec13ed4)
- Delete data file (0780f12e2d7b18d047d85ce0a68d1c200a3fade8)
- Delete loading script (4fd54633022727978d9b9415ad1a05736271f11d)
- Delete data file (4b4e6a994008cb818df5105bdbbc71d4cc478c91)

README.md CHANGED
@@ -1,3 +1,32 @@
1
  ---
2
  license: cc-by-nc-nd-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: cc-by-nc-nd-4.0
3
+ dataset_info:
4
+ config_name: tedx_spanish
5
+ features:
6
+ - name: audio_id
7
+ dtype: string
8
+ - name: audio
9
+ dtype:
10
+ audio:
11
+ sampling_rate: 16000
12
+ - name: speaker_id
13
+ dtype: string
14
+ - name: gender
15
+ dtype: string
16
+ - name: duration
17
+ dtype: float32
18
+ - name: normalized_text
19
+ dtype: string
20
+ splits:
21
+ - name: train
22
+ num_bytes: 1597201769.901
23
+ num_examples: 11243
24
+ download_size: 1610347743
25
+ dataset_size: 1597201769.901
26
+ configs:
27
+ - config_name: tedx_spanish
28
+ data_files:
29
+ - split: train
30
+ path: tedx_spanish/train-*
31
+ default: true
32
  ---
corpus/files/metadata_train.tsv DELETED
The diff for this file is too large to render. See raw diff
 
corpus/files/tars_train.paths DELETED
@@ -1 +0,0 @@
1
- corpus/speech/train.tar.gz
 
 
tedx_spanish.py DELETED
@@ -1,119 +0,0 @@
1
- from collections import defaultdict
2
- import os
3
- import json
4
- import csv
5
- import datasets
6
-
7
- _NAME="tedx_spanish"
8
- _VERSION="1.0.0"
9
-
10
- _DESCRIPTION = """
11
- The TEDX SPANISH CORPUS is a dataset created from TEDx talks in Spanish and it
12
- aims to be used in the Automatic Speech Recognition (ASR) Task.
13
- """
14
-
15
- _CITATION = """
16
- @misc{carlosmenatedxspanish2019,
17
- title={TEDX SPANISH CORPUS: Audio and Transcripts in Spanish in a CIEMPIESS Corpus style, taken from the TEDx Talks.},
18
- author={Hernandez Mena, Carlos Daniel},
19
- year={2019},
20
- url={https://huggingface.co/ciempiess/tedx_spanish},
21
- }
22
- """
23
-
24
- _HOMEPAGE = "https://huggingface.co/ciempiess/tedx_spanish"
25
-
26
- _LICENSE = "CC-BY-NC-ND-4.0, See https://creativecommons.org/licenses/by-nc-nd/4.0/"
27
-
28
- _BASE_DATA_DIR = "corpus/"
29
- _METADATA_TRAIN = os.path.join(_BASE_DATA_DIR,"files", "metadata_train.tsv")
30
-
31
- _TARS_TRAIN = os.path.join(_BASE_DATA_DIR,"files", "tars_train.paths")
32
-
33
- class TedxSpanishConfig(datasets.BuilderConfig):
34
- """BuilderConfig for TEDX SPANISH CORPUS"""
35
-
36
- def __init__(self, name, **kwargs):
37
- name=_NAME
38
- super().__init__(name=name, **kwargs)
39
-
40
- class TedxSpanish(datasets.GeneratorBasedBuilder):
41
- """TEDX SPANISH CORPUS"""
42
-
43
- VERSION = datasets.Version(_VERSION)
44
- BUILDER_CONFIGS = [
45
- TedxSpanishConfig(
46
- name=_NAME,
47
- version=datasets.Version(_VERSION),
48
- )
49
- ]
50
-
51
- def _info(self):
52
- features = datasets.Features(
53
- {
54
- "audio_id": datasets.Value("string"),
55
- "audio": datasets.Audio(sampling_rate=16000),
56
- "speaker_id": datasets.Value("string"),
57
- "gender": datasets.Value("string"),
58
- "duration": datasets.Value("float32"),
59
- "normalized_text": datasets.Value("string"),
60
- }
61
- )
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=features,
65
- homepage=_HOMEPAGE,
66
- license=_LICENSE,
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
-
72
- metadata_train=dl_manager.download_and_extract(_METADATA_TRAIN)
73
-
74
- tars_train=dl_manager.download_and_extract(_TARS_TRAIN)
75
-
76
- hash_tar_files=defaultdict(dict)
77
-
78
- with open(tars_train,'r') as f:
79
- hash_tar_files['train']=[path.replace('\n','') for path in f]
80
-
81
- hash_meta_paths={"train":metadata_train}
82
- audio_paths = dl_manager.download(hash_tar_files)
83
-
84
- splits=["train"]
85
- local_extracted_audio_paths = (
86
- dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
87
- {
88
- split:[None] * len(audio_paths[split]) for split in splits
89
- }
90
- )
91
-
92
- return [
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TRAIN,
95
- gen_kwargs={
96
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
97
- "local_extracted_archives_paths": local_extracted_audio_paths["train"],
98
- "metadata_paths": hash_meta_paths["train"],
99
- }
100
- ),
101
- ]
102
-
103
- def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
104
-
105
- features = ["speaker_id","gender","duration","normalized_text"]
106
-
107
- with open(metadata_paths) as f:
108
- metadata = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
109
-
110
- for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
111
- for audio_filename, audio_file in audio_archive:
112
- audio_id =os.path.splitext(os.path.basename(audio_filename))[0]
113
- path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
114
-
115
- yield audio_id, {
116
- "audio_id": audio_id,
117
- **{feature: metadata[audio_id][feature] for feature in features},
118
- "audio": {"path": path, "bytes": audio_file.read()},
119
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
corpus/speech/train.tar.gz → tedx_spanish/train-00000-of-00004.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01aeabd5ec31ac92665a1a895606bc0ef44a9869491711c90aa502ad6a751619
3
- size 1610753276
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c84f0306e618f753eadb1107ba0d1895380287d5f4fc98f812d24ed8924cb908
3
+ size 404310866
tedx_spanish/train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bfeafd8c718fb0ddde5b58bebecde388163808e59306d7189a86795d46f53d4
3
+ size 399544824
tedx_spanish/train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b133c028619658783cb1b3505bca47f79f68a4b8cdc02596d3f127ff3a6f5137
3
+ size 396369080
tedx_spanish/train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8fa4e887b3c791a44ec6612716217a665e2cfcf1d04a69bfde2470adcc9f39a
3
+ size 410122973