Oleg Somov commited on
Commit
e560e57
1 Parent(s): 4392fcb

delete load script

Browse files
Files changed (1) hide show
  1. pauq.py +0 -285
pauq.py DELETED
@@ -1,285 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """PAUQ: Text-to-SQL in Russian"""
16
-
17
- import json
18
- import os
19
-
20
- import datasets
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
- _CITATION = """\
25
- @inproceedings{bakshandaeva-etal-2022-pauq,
26
- title = "{PAUQ}: Text-to-{SQL} in {R}ussian",
27
- author = "Bakshandaeva, Daria and
28
- Somov, Oleg and
29
- Dmitrieva, Ekaterina and
30
- Davydova, Vera and
31
- Tutubalina, Elena",
32
- booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
33
- month = dec,
34
- year = "2022",
35
- address = "Abu Dhabi, United Arab Emirates",
36
- publisher = "Association for Computational Linguistics",
37
- url = "https://aclanthology.org/2022.findings-emnlp.175",
38
- """
39
-
40
- _DESCRIPTION = """\
41
- Pauq is a first Russian text-to-SQL dataset translated from original Spider dataset
42
- with corrections and refinements of question, queries and databases.
43
- """
44
-
45
- _LICENSE = "CC BY-SA 4.0"
46
-
47
- _HOMEPAGE = "https://github.com/ai-spiderweb/pauq"
48
-
49
- _URL = "https://huggingface.co/datasets/composite/pauq/resolve/main/formatted_pauq.zip"
50
-
51
- RUSSIAN_PAUQ_TRL_DESCRIPTION = "Russian PAUQ train/test split based on target length of SQL query. Long query template in train, short query template in test."
52
- ENGLISH_PAUQ_TRL_DESCRIPTION = "English PAUQ train/test split based on target length of SQL query. Long query template in train, short query template in test."
53
-
54
- RUSSIAN_PAUQ_TSL_DESCRIPTION = "Russian PAUQ train/test split based on target length of SQL query. Short query template in train, long query template in test."
55
- ENGLISH_PAUQ_TSL_DESCRIPTION = "English PAUQ train/test split based on target length of SQL query. Short query template in train, long query template in test."
56
-
57
- RUSSIAN_PAUQ_OS_DESCRIPTION = "Independent and identical Russian PAUQ train/test split. Сorresponds to original Spider splitting."
58
- ENGLISH_PAUQ_OS_DESCRIPTION = "Independent and identical English PAUQ train/test split. Сorresponds to original Spider splitting."
59
-
60
-
61
- class Pauq(datasets.GeneratorBasedBuilder):
62
- VERSION = datasets.Version("1.0.0")
63
-
64
- BUILDER_CONFIGS = [
65
- datasets.BuilderConfig(
66
- name="ru_trl",
67
- version=VERSION,
68
- description=RUSSIAN_PAUQ_TRL_DESCRIPTION,
69
- ),
70
- datasets.BuilderConfig(
71
- name="en_trl",
72
- version=VERSION,
73
- description=ENGLISH_PAUQ_TRL_DESCRIPTION,
74
- ),
75
- datasets.BuilderConfig(
76
- name="ru_tsl",
77
- version=VERSION,
78
- description=RUSSIAN_PAUQ_TSL_DESCRIPTION,
79
- ),
80
- datasets.BuilderConfig(
81
- name="en_tsl",
82
- version=VERSION,
83
- description=ENGLISH_PAUQ_TSL_DESCRIPTION,
84
- ),
85
- datasets.BuilderConfig(
86
- name="ru_os",
87
- version=VERSION,
88
- description=RUSSIAN_PAUQ_OS_DESCRIPTION,
89
- ),
90
- datasets.BuilderConfig(
91
- name="en_os",
92
- version=VERSION,
93
- description=ENGLISH_PAUQ_OS_DESCRIPTION,
94
- ),
95
- ]
96
-
97
- def _info(self):
98
- features = datasets.Features(
99
- {
100
- "id": datasets.Value("string"),
101
- "db_id": datasets.Value("string"),
102
- "source": datasets.Value("string"),
103
- "type": datasets.Value("string"),
104
- "question": datasets.Value("string"),
105
- "query": datasets.Value("string"),
106
- "sql": datasets.features.Sequence(datasets.Value("string")),
107
- "question_toks": datasets.features.Sequence(datasets.Value("string")),
108
- "query_toks": datasets.features.Sequence(datasets.Value("string")),
109
- "query_toks_no_values": datasets.features.Sequence(datasets.Value("string")),
110
- "template": datasets.Value("string")
111
- }
112
- )
113
- dataset_info = None
114
- if self.config.name == 'ru_trl':
115
- dataset_info = datasets.DatasetInfo(
116
- description=RUSSIAN_PAUQ_TRL_DESCRIPTION,
117
- features=features,
118
- supervised_keys=None,
119
- homepage=_HOMEPAGE,
120
- license=_LICENSE,
121
- citation=_CITATION,
122
- config_name="ru_trl")
123
- elif self.config.name == "en_trl":
124
- dataset_info = datasets.DatasetInfo(
125
- description=ENGLISH_PAUQ_TRL_DESCRIPTION,
126
- features=features,
127
- supervised_keys=None,
128
- homepage=_HOMEPAGE,
129
- license=_LICENSE,
130
- citation=_CITATION,
131
- config_name="en_trl")
132
- elif self.config.name == 'ru_os':
133
- dataset_info = datasets.DatasetInfo(
134
- description=RUSSIAN_PAUQ_OS_DESCRIPTION,
135
- features=features,
136
- supervised_keys=None,
137
- homepage=_HOMEPAGE,
138
- license=_LICENSE,
139
- citation=_CITATION,
140
- config_name="ru_os")
141
- elif self.config.name == 'en_os':
142
- dataset_info = datasets.DatasetInfo(
143
- description=ENGLISH_PAUQ_OS_DESCRIPTION,
144
- features=features,
145
- supervised_keys=None,
146
- homepage=_HOMEPAGE,
147
- license=_LICENSE,
148
- citation=_CITATION,
149
- config_name="en_os")
150
- elif self.config.name == 'ru_tsl':
151
- dataset_info = datasets.DatasetInfo(
152
- description=RUSSIAN_PAUQ_TSL_DESCRIPTION,
153
- features=features,
154
- supervised_keys=None,
155
- homepage=_HOMEPAGE,
156
- license=_LICENSE,
157
- citation=_CITATION,
158
- config_name="ru_tsl")
159
- elif self.config.name == "en_tsl":
160
- dataset_info = datasets.DatasetInfo(
161
- description=ENGLISH_PAUQ_TSL_DESCRIPTION,
162
- features=features,
163
- supervised_keys=None,
164
- homepage=_HOMEPAGE,
165
- license=_LICENSE,
166
- citation=_CITATION,
167
- config_name="en_tsl")
168
-
169
- return dataset_info
170
-
171
- def _split_generators(self, dl_manager):
172
- downloaded_filepath = dl_manager.download_and_extract(_URL)
173
-
174
- dataset_name = self.config.name
175
-
176
- splits = []
177
- if dataset_name == 'ru_trl':
178
- splits = [
179
- datasets.SplitGenerator(
180
- name=datasets.Split.TRAIN,
181
- gen_kwargs={
182
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_trl_train.json"),
183
- },
184
- ),
185
- datasets.SplitGenerator(
186
- name=datasets.Split.TEST,
187
- gen_kwargs={
188
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_trl_test.json"),
189
- },
190
- )
191
- ]
192
- elif dataset_name == 'en_trl':
193
- splits = [
194
- datasets.SplitGenerator(
195
- name=datasets.Split.TRAIN,
196
- gen_kwargs={
197
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_trl_train.json"),
198
- },
199
- ),
200
- datasets.SplitGenerator(
201
- name=datasets.Split.TEST,
202
- gen_kwargs={
203
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_trl_test.json"),
204
- },
205
- )]
206
- elif dataset_name == 'ru_os':
207
- splits = [
208
- datasets.SplitGenerator(
209
- name=datasets.Split.TRAIN,
210
- gen_kwargs={
211
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_os_train.json"),
212
- },
213
- ),
214
- datasets.SplitGenerator(
215
- name=datasets.Split.TEST,
216
- gen_kwargs={
217
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_os_test.json"),
218
- },
219
- )
220
- ]
221
- elif dataset_name == 'en_os':
222
- splits = [
223
- datasets.SplitGenerator(
224
- name=datasets.Split.TRAIN,
225
- gen_kwargs={
226
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_os_train.json"),
227
- },
228
- ),
229
- datasets.SplitGenerator(
230
- name=datasets.Split.TEST,
231
- gen_kwargs={
232
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_os_test.json"),
233
- },
234
- )
235
- ]
236
- elif dataset_name == 'ru_tsl':
237
- splits = [
238
- datasets.SplitGenerator(
239
- name=datasets.Split.TRAIN,
240
- gen_kwargs={
241
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_tsl_train.json"),
242
- },
243
- ),
244
- datasets.SplitGenerator(
245
- name=datasets.Split.TEST,
246
- gen_kwargs={
247
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_tsl_test.json"),
248
- },
249
- )
250
- ]
251
- elif dataset_name == 'en_tsl':
252
- splits = [
253
- datasets.SplitGenerator(
254
- name=datasets.Split.TRAIN,
255
- gen_kwargs={
256
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_tsl_train.json"),
257
- },
258
- ),
259
- datasets.SplitGenerator(
260
- name=datasets.Split.TEST,
261
- gen_kwargs={
262
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_tsl_test.json"),
263
- },
264
- )]
265
- return splits
266
-
267
- def _generate_examples(self, data_filepath):
268
- """This function returns the examples in the raw (text) form."""
269
- logger.info("generating examples from = %s", data_filepath)
270
- with open(data_filepath, encoding="utf-8") as f:
271
- pauq = json.load(f)
272
- for idx, sample in enumerate(pauq):
273
- yield idx, {
274
- "id": sample["id"],
275
- "db_id": sample["db_id"],
276
- "source": sample["source"],
277
- "type": sample["type"],
278
- "query": sample["query"],
279
- "sql": sample['sql'],
280
- "question": sample["question"],
281
- "question_toks": sample["question_toks"],
282
- "query_toks": sample["query_toks"],
283
- "query_toks_no_values": sample["query_toks_no_values"],
284
- "template": sample["template"]
285
- }