Oleg Somov commited on
Commit
551770f
1 Parent(s): 17d6e2b

update splits

Browse files
Files changed (2) hide show
  1. formatted_pauq.zip +2 -2
  2. pauq.py +98 -38
formatted_pauq.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d357ced32a9f8d737f4e1d256881da8832b30d7a9d2c3e782fc6f1135189508
3
- size 314755467
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e3f42f518bdc7f34dca9722b52c9f3fe9173ba776136c257917cb4e7a4dab0a
3
+ size 318919596
pauq.py CHANGED
@@ -48,11 +48,14 @@ _HOMEPAGE = "https://github.com/ai-spiderweb/pauq"
48
 
49
  _URL = "https://huggingface.co/datasets/composite/pauq/resolve/main/formatted_pauq.zip"
50
 
51
- RUSSIAN_PAUQ_TL_DESCRIPTION = "Russian PAUQ train/test split based on target length of SQL query. Long queries in train, short queries in test."
52
- ENGLISH_PAUQ_TL_DESCRIPTION = "English PAUQ train/test split based on target length of SQL query. Long queries in train, short queries in test."
53
 
54
- RUSSIAN_PAUQ_IID_DESCRIPTION = "Independent and identical Russian PAUQ train/test split. Сorresponds to original Spider splitting."
55
- ENGLISH_PAUQ_IID_DESCRIPTION = "Independent and identical English PAUQ train/test split. Сorresponds to original Spider splitting."
 
 
 
56
 
57
 
58
  class Pauq(datasets.GeneratorBasedBuilder):
@@ -60,24 +63,34 @@ class Pauq(datasets.GeneratorBasedBuilder):
60
 
61
  BUILDER_CONFIGS = [
62
  datasets.BuilderConfig(
63
- name="ru_pauq_tl",
 
 
 
 
 
64
  version=VERSION,
65
- description=RUSSIAN_PAUQ_TL_DESCRIPTION,
66
  ),
67
  datasets.BuilderConfig(
68
- name="en_pauq_tl",
69
  version=VERSION,
70
- description=ENGLISH_PAUQ_TL_DESCRIPTION,
71
  ),
72
  datasets.BuilderConfig(
73
- name="ru_pauq_iid",
74
  version=VERSION,
75
- description=RUSSIAN_PAUQ_IID_DESCRIPTION,
76
  ),
77
  datasets.BuilderConfig(
78
- name="en_pauq_iid",
79
  version=VERSION,
80
- description=ENGLISH_PAUQ_IID_DESCRIPTION,
 
 
 
 
 
81
  ),
82
  ]
83
 
@@ -94,46 +107,64 @@ class Pauq(datasets.GeneratorBasedBuilder):
94
  "question_toks": datasets.features.Sequence(datasets.Value("string")),
95
  "query_toks": datasets.features.Sequence(datasets.Value("string")),
96
  "query_toks_no_values": datasets.features.Sequence(datasets.Value("string")),
97
- "masked_query": datasets.Value("string")
98
  }
99
  )
100
  dataset_info = None
101
- if self.config.name == 'ru_pauq_tl':
102
  dataset_info = datasets.DatasetInfo(
103
- description=RUSSIAN_PAUQ_TL_DESCRIPTION,
104
  features=features,
105
  supervised_keys=None,
106
  homepage=_HOMEPAGE,
107
  license=_LICENSE,
108
  citation=_CITATION,
109
- config_name="ru_pauq_tl")
110
- elif self.config.name == "en_pauq_tl":
111
  dataset_info = datasets.DatasetInfo(
112
- description=ENGLISH_PAUQ_TL_DESCRIPTION,
113
  features=features,
114
  supervised_keys=None,
115
  homepage=_HOMEPAGE,
116
  license=_LICENSE,
117
  citation=_CITATION,
118
- config_name="en_pauq_tl")
119
- elif self.config.name == 'ru_pauq_iid':
120
  dataset_info = datasets.DatasetInfo(
121
- description=RUSSIAN_PAUQ_IID_DESCRIPTION,
122
  features=features,
123
  supervised_keys=None,
124
  homepage=_HOMEPAGE,
125
  license=_LICENSE,
126
  citation=_CITATION,
127
- config_name="ru_pauq_iid")
128
- elif self.config.name == 'en_pauq_iid':
129
  dataset_info = datasets.DatasetInfo(
130
- description=ENGLISH_PAUQ_IID_DESCRIPTION,
131
  features=features,
132
  supervised_keys=None,
133
  homepage=_HOMEPAGE,
134
  license=_LICENSE,
135
  citation=_CITATION,
136
- config_name="en_pauq_iid")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  return dataset_info
139
 
@@ -143,65 +174,94 @@ class Pauq(datasets.GeneratorBasedBuilder):
143
  dataset_name = self.config.name
144
 
145
  splits = []
146
- if dataset_name == 'ru_pauq_tl':
147
  splits = [
148
  datasets.SplitGenerator(
149
  name=datasets.Split.TRAIN,
150
  gen_kwargs={
151
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_tl_train.json"),
152
  },
153
  ),
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TEST,
156
  gen_kwargs={
157
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_tl_test.json"),
158
  },
159
  )
160
  ]
161
- elif dataset_name == 'en_pauq_tl':
162
  splits = [
163
  datasets.SplitGenerator(
164
  name=datasets.Split.TRAIN,
165
  gen_kwargs={
166
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_tl_train.json"),
167
  },
168
  ),
169
  datasets.SplitGenerator(
170
  name=datasets.Split.TEST,
171
  gen_kwargs={
172
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_tl_test.json"),
173
  },
174
  )]
175
- elif dataset_name == 'ru_pauq_iid':
176
  splits = [
177
  datasets.SplitGenerator(
178
  name=datasets.Split.TRAIN,
179
  gen_kwargs={
180
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_iid_train.json"),
181
  },
182
  ),
183
  datasets.SplitGenerator(
184
  name=datasets.Split.TEST,
185
  gen_kwargs={
186
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_iid_test.json"),
187
  },
188
  )
189
  ]
190
- elif dataset_name == 'en_pauq_iid':
191
  splits = [
192
  datasets.SplitGenerator(
193
  name=datasets.Split.TRAIN,
194
  gen_kwargs={
195
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_iid_train.json"),
196
  },
197
  ),
198
  datasets.SplitGenerator(
199
  name=datasets.Split.TEST,
200
  gen_kwargs={
201
- "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_iid_test.json"),
202
  },
203
  )
204
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  return splits
206
 
207
  def _generate_examples(self, data_filepath):
@@ -221,5 +281,5 @@ class Pauq(datasets.GeneratorBasedBuilder):
221
  "question_toks": sample["question_toks"],
222
  "query_toks": sample["query_toks"],
223
  "query_toks_no_values": sample["query_toks_no_values"],
224
- "masked_query": sample["masked_query"]
225
  }
 
48
 
49
  _URL = "https://huggingface.co/datasets/composite/pauq/resolve/main/formatted_pauq.zip"
50
 
51
+ RUSSIAN_PAUQ_TRL_DESCRIPTION = "Russian PAUQ train/test split based on target length of SQL query. Long query template in train, short query template in test."
52
+ ENGLISH_PAUQ_TRL_DESCRIPTION = "English PAUQ train/test split based on target length of SQL query. Long query template in train, short query template in test."
53
 
54
+ RUSSIAN_PAUQ_TSL_DESCRIPTION = "Russian PAUQ train/test split based on target length of SQL query. Short query template in train, long query template in test."
55
+ ENGLISH_PAUQ_TSL_DESCRIPTION = "English PAUQ train/test split based on target length of SQL query. Short query template in train, long query template in test."
56
+
57
+ RUSSIAN_PAUQ_OS_DESCRIPTION = "Independent and identical Russian PAUQ train/test split. Сorresponds to original Spider splitting."
58
+ ENGLISH_PAUQ_OS_DESCRIPTION = "Independent and identical English PAUQ train/test split. Сorresponds to original Spider splitting."
59
 
60
 
61
  class Pauq(datasets.GeneratorBasedBuilder):
 
63
 
64
  BUILDER_CONFIGS = [
65
  datasets.BuilderConfig(
66
+ name="ru_trl",
67
+ version=VERSION,
68
+ description=RUSSIAN_PAUQ_TRL_DESCRIPTION,
69
+ ),
70
+ datasets.BuilderConfig(
71
+ name="en_trl",
72
  version=VERSION,
73
+ description=ENGLISH_PAUQ_TRL_DESCRIPTION,
74
  ),
75
  datasets.BuilderConfig(
76
+ name="ru_tsl",
77
  version=VERSION,
78
+ description=RUSSIAN_PAUQ_TSL_DESCRIPTION,
79
  ),
80
  datasets.BuilderConfig(
81
+ name="en_tsl",
82
  version=VERSION,
83
+ description=ENGLISH_PAUQ_TSL_DESCRIPTION,
84
  ),
85
  datasets.BuilderConfig(
86
+ name="ru_os",
87
  version=VERSION,
88
+ description=RUSSIAN_PAUQ_OS_DESCRIPTION,
89
+ ),
90
+ datasets.BuilderConfig(
91
+ name="en_os",
92
+ version=VERSION,
93
+ description=ENGLISH_PAUQ_OS_DESCRIPTION,
94
  ),
95
  ]
96
 
 
107
  "question_toks": datasets.features.Sequence(datasets.Value("string")),
108
  "query_toks": datasets.features.Sequence(datasets.Value("string")),
109
  "query_toks_no_values": datasets.features.Sequence(datasets.Value("string")),
110
+ "template": datasets.Value("string")
111
  }
112
  )
113
  dataset_info = None
114
+ if self.config.name == 'ru_trl':
115
  dataset_info = datasets.DatasetInfo(
116
+ description=RUSSIAN_PAUQ_TRL_DESCRIPTION,
117
  features=features,
118
  supervised_keys=None,
119
  homepage=_HOMEPAGE,
120
  license=_LICENSE,
121
  citation=_CITATION,
122
+ config_name="ru_trl")
123
+ elif self.config.name == "en_trl":
124
  dataset_info = datasets.DatasetInfo(
125
+ description=ENGLISH_PAUQ_TRL_DESCRIPTION,
126
  features=features,
127
  supervised_keys=None,
128
  homepage=_HOMEPAGE,
129
  license=_LICENSE,
130
  citation=_CITATION,
131
+ config_name="en_trl")
132
+ elif self.config.name == 'ru_os':
133
  dataset_info = datasets.DatasetInfo(
134
+ description=RUSSIAN_PAUQ_OS_DESCRIPTION,
135
  features=features,
136
  supervised_keys=None,
137
  homepage=_HOMEPAGE,
138
  license=_LICENSE,
139
  citation=_CITATION,
140
+ config_name="ru_os")
141
+ elif self.config.name == 'en_os':
142
  dataset_info = datasets.DatasetInfo(
143
+ description=ENGLISH_PAUQ_OS_DESCRIPTION,
144
  features=features,
145
  supervised_keys=None,
146
  homepage=_HOMEPAGE,
147
  license=_LICENSE,
148
  citation=_CITATION,
149
+ config_name="en_os")
150
+ elif self.config.name == 'ru_tsl':
151
+ dataset_info = datasets.DatasetInfo(
152
+ description=RUSSIAN_PAUQ_TSL_DESCRIPTION,
153
+ features=features,
154
+ supervised_keys=None,
155
+ homepage=_HOMEPAGE,
156
+ license=_LICENSE,
157
+ citation=_CITATION,
158
+ config_name="ru_tsl")
159
+ elif self.config.name == "en_tsl":
160
+ dataset_info = datasets.DatasetInfo(
161
+ description=ENGLISH_PAUQ_TSL_DESCRIPTION,
162
+ features=features,
163
+ supervised_keys=None,
164
+ homepage=_HOMEPAGE,
165
+ license=_LICENSE,
166
+ citation=_CITATION,
167
+ config_name="en_tsl")
168
 
169
  return dataset_info
170
 
 
174
  dataset_name = self.config.name
175
 
176
  splits = []
177
+ if dataset_name == 'ru_trl':
178
  splits = [
179
  datasets.SplitGenerator(
180
  name=datasets.Split.TRAIN,
181
  gen_kwargs={
182
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_trl_train.json"),
183
  },
184
  ),
185
  datasets.SplitGenerator(
186
  name=datasets.Split.TEST,
187
  gen_kwargs={
188
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_trl_test.json"),
189
  },
190
  )
191
  ]
192
+ elif dataset_name == 'en_trl':
193
  splits = [
194
  datasets.SplitGenerator(
195
  name=datasets.Split.TRAIN,
196
  gen_kwargs={
197
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_trl_train.json"),
198
  },
199
  ),
200
  datasets.SplitGenerator(
201
  name=datasets.Split.TEST,
202
  gen_kwargs={
203
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_trl_test.json"),
204
  },
205
  )]
206
+ elif dataset_name == 'ru_os':
207
  splits = [
208
  datasets.SplitGenerator(
209
  name=datasets.Split.TRAIN,
210
  gen_kwargs={
211
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_os_train.json"),
212
  },
213
  ),
214
  datasets.SplitGenerator(
215
  name=datasets.Split.TEST,
216
  gen_kwargs={
217
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_os_test.json"),
218
  },
219
  )
220
  ]
221
+ elif dataset_name == 'en_os':
222
  splits = [
223
  datasets.SplitGenerator(
224
  name=datasets.Split.TRAIN,
225
  gen_kwargs={
226
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_os_train.json"),
227
  },
228
  ),
229
  datasets.SplitGenerator(
230
  name=datasets.Split.TEST,
231
  gen_kwargs={
232
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_os_test.json"),
233
  },
234
  )
235
  ]
236
+ elif dataset_name == 'ru_tsl':
237
+ splits = [
238
+ datasets.SplitGenerator(
239
+ name=datasets.Split.TRAIN,
240
+ gen_kwargs={
241
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_tsl_train.json"),
242
+ },
243
+ ),
244
+ datasets.SplitGenerator(
245
+ name=datasets.Split.TEST,
246
+ gen_kwargs={
247
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/ru_tsl_test.json"),
248
+ },
249
+ )
250
+ ]
251
+ elif dataset_name == 'en_tsl':
252
+ splits = [
253
+ datasets.SplitGenerator(
254
+ name=datasets.Split.TRAIN,
255
+ gen_kwargs={
256
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_tsl_train.json"),
257
+ },
258
+ ),
259
+ datasets.SplitGenerator(
260
+ name=datasets.Split.TEST,
261
+ gen_kwargs={
262
+ "data_filepath": os.path.join(downloaded_filepath, "formatted_pauq/splits/en_tsl_test.json"),
263
+ },
264
+ )]
265
  return splits
266
 
267
  def _generate_examples(self, data_filepath):
 
281
  "question_toks": sample["question_toks"],
282
  "query_toks": sample["query_toks"],
283
  "query_toks_no_values": sample["query_toks_no_values"],
284
+ "template": sample["masked_query"]
285
  }