File size: 3,223 Bytes
e47c946
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108d361
 
 
 
 
e47c946
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108d361
 
 
 
 
 
 
 
 
 
e47c946
 
 
108d361
e47c946
 
 
 
 
 
108d361
725b9f5
e47c946
 
 
 
 
 
2ac190a
e47c946
 
 
 
 
 
108d361
e47c946
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0a5f4fa
e47c946
 
 
 
725b9f5
e47c946
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# coding=utf-8

import csv
import os
import datasets

logger = datasets.logging.get_logger(__name__)


""" Self-use Dataset"""

_CITATION = """\
@article{nothing,
  title={Self-use DataSets},
  author={Stan}
  journal={},
  year={2023}
}
"""

_DESCRIPTION = """\
Self-use DataSets
"""

_HOMEPAGE_URL = "https://arxiv.org/abs/2104.08524"


_DATA_URL = "https://asr-1258129568.cos.ap-shanghai.myqcloud.com/DataSets-0.zip"


class Minds14Config(datasets.BuilderConfig):
    """BuilderConfig for xtreme-s"""

    def __init__(
        self, name, description, homepage, data_url
    ):
        super(Minds14Config, self).__init__(
            name=self.name,
            version=datasets.Version("1.0.0", ""),
            description=self.description,
        )
        self.name = name
        self.description = description
        self.homepage = homepage
        self.data_url = data_url


def _build_config(name):
    return Minds14Config(
        name=name,
        description=_DESCRIPTION,
        homepage=_HOMEPAGE_URL,
        data_url=_DATA_URL,
)



class Minds14(datasets.GeneratorBasedBuilder):

    DEFAULT_WRITER_BATCH_SIZE = 1000
    BUILDER_CONFIGS = [_build_config('demo')]

    def _info(self):
        task_templates = None
        features = datasets.Features(
            {
                "path": datasets.Value("string"),
                "audio": datasets.Audio(sampling_rate=16_000),
                "reference": datasets.Value("string"),                
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=("audio", "reference"),
            homepage='',
            citation=_CITATION,
            task_templates=task_templates,
        )

    def _split_generators(self, dl_manager):
        archive_path = dl_manager.download_and_extract(_DATA_URL)
        audio_path = dl_manager.extract(
            os.path.join(archive_path, "DataSets-0", "audio.zip")
        )
        text_path = dl_manager.extract(
            os.path.join(archive_path, "DataSets-0", "text.zip")
        )

        text_path ={'demo': os.path.join(text_path, f"demo.csv")}

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "audio_path": audio_path,
                    "text_paths": text_path,
                },
            )
        ]

    def _generate_examples(self, audio_path, text_paths):
            key = 0
            for lang in text_paths.keys():
                text_path = text_paths[lang]
                with open(text_path, encoding="utf-8") as csv_file:
                    csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
                    next(csv_reader)
                    for row in csv_reader:
                        filepath, reference = row
                        filepath = os.path.join(audio_path, *filepath.split("/"))
                        yield key, {
                            "path": filepath,
                            "audio": filepath,
                            "reference": reference,
                        }
                        key += 1