albertvillanova HF staff commited on
Commit
1c99765
1 Parent(s): 33ba9ca

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (c874faf2cd0a45d517bf50dbad220938e1e018ae)
- Delete loading script (f404e49644b81ce74f7266ddec1f7c856943cec0)

README.md CHANGED
@@ -9,8 +9,6 @@ license:
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
12
- pretty_name: 'DISFL-QA: A Benchmark Dataset for Understanding Disfluencies in Question
13
- Answering'
14
  size_categories:
15
  - 10K<n<100K
16
  source_datasets:
@@ -20,6 +18,8 @@ task_categories:
20
  task_ids:
21
  - extractive-qa
22
  - open-domain-qa
 
 
23
  dataset_info:
24
  features:
25
  - name: squad_v2_id
@@ -40,16 +40,25 @@ dataset_info:
40
  dtype: int32
41
  splits:
42
  - name: train
43
- num_bytes: 7712523
44
  num_examples: 7182
45
  - name: test
46
- num_bytes: 3865097
47
  num_examples: 3643
48
  - name: validation
49
- num_bytes: 1072731
50
  num_examples: 1000
51
- download_size: 48935038
52
- dataset_size: 12650351
 
 
 
 
 
 
 
 
 
53
  ---
54
 
55
  # Dataset Card for DISFL-QA: A Benchmark Dataset for Understanding Disfluencies in Question Answering
 
9
  - cc-by-4.0
10
  multilinguality:
11
  - monolingual
 
 
12
  size_categories:
13
  - 10K<n<100K
14
  source_datasets:
 
18
  task_ids:
19
  - extractive-qa
20
  - open-domain-qa
21
+ pretty_name: 'DISFL-QA: A Benchmark Dataset for Understanding Disfluencies in Question
22
+ Answering'
23
  dataset_info:
24
  features:
25
  - name: squad_v2_id
 
40
  dtype: int32
41
  splits:
42
  - name: train
43
+ num_bytes: 7712491
44
  num_examples: 7182
45
  - name: test
46
+ num_bytes: 3865065
47
  num_examples: 3643
48
  - name: validation
49
+ num_bytes: 1072699
50
  num_examples: 1000
51
+ download_size: 4246350
52
+ dataset_size: 12650255
53
+ configs:
54
+ - config_name: default
55
+ data_files:
56
+ - split: train
57
+ path: data/train-*
58
+ - split: test
59
+ path: data/test-*
60
+ - split: validation
61
+ path: data/validation-*
62
  ---
63
 
64
  # Dataset Card for DISFL-QA: A Benchmark Dataset for Understanding Disfluencies in Question Answering
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a8768ab77830bb13e44110d29e41ce49a77698d53ef84f889c30d2bc1e82444
3
+ size 590102
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e4e82c5d2b8b5c03afe4b8cedb2816abcdcb59dedd9edfa009eb95bd49e3d15
3
+ size 3221857
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c88a0ae7fcbd0650cd414734948a89ee62ddf2593de5b93efb5c54ab2137b24
3
+ size 434391
disfl_qa.py DELETED
@@ -1,199 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """A Benchmark Dataset for Understanding Disfluencies in Question Answering"""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
- from datasets.tasks import QuestionAnsweringExtractive
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{gupta-etal-2021-disflqa,
26
- title = "{Disfl-QA: A Benchmark Dataset for Understanding Disfluencies in Question Answering}",
27
- author = "Gupta, Aditya and Xu, Jiacheng and Upadhyay, Shyam and Yang, Diyi and Faruqui, Manaal",
28
- booktitle = "Findings of ACL",
29
- year = "2021"
30
- }
31
-
32
- """
33
-
34
- _DESCRIPTION = """\
35
- Disfl-QA is a targeted dataset for contextual disfluencies in an information seeking setting,
36
- namely question answering over Wikipedia passages. Disfl-QA builds upon the SQuAD-v2 (Rajpurkar et al., 2018)
37
- dataset, where each question in the dev set is annotated to add a contextual disfluency using the paragraph as
38
- a source of distractors.
39
-
40
- The final dataset consists of ~12k (disfluent question, answer) pairs. Over 90% of the disfluencies are
41
- corrections or restarts, making it a much harder test set for disfluency correction. Disfl-QA aims to fill a
42
- major gap between speech and NLP research community. We hope the dataset can serve as a benchmark dataset for
43
- testing robustness of models against disfluent inputs.
44
-
45
- Our expriments reveal that the state-of-the-art models are brittle when subjected to disfluent inputs from
46
- Disfl-QA. Detailed experiments and analyses can be found in our paper.
47
- """
48
-
49
- _HOMEPAGE = "https://github.com/google-research-datasets/disfl-qa"
50
-
51
- _LICENSE = "Disfl-QA dataset is licensed under CC BY 4.0"
52
-
53
- _URL = "https://raw.githubusercontent.com/google-research-datasets/Disfl-QA/main/"
54
-
55
- _URLS_squad_v2 = {
56
- "train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/" + "train-v2.0.json",
57
- "dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/" + "dev-v2.0.json",
58
- }
59
-
60
-
61
- class DisflQA(datasets.GeneratorBasedBuilder):
62
- """A Benchmark Dataset for Understanding Disfluencies in Question Answering"""
63
-
64
- VERSION = datasets.Version("1.1.0")
65
-
66
- def _info(self):
67
- features = datasets.Features(
68
- {
69
- "squad_v2_id": datasets.Value("string"),
70
- "original question": datasets.Value("string"),
71
- "disfluent question": datasets.Value("string"),
72
- "title": datasets.Value("string"),
73
- "context": datasets.Value("string"),
74
- "answers": datasets.features.Sequence(
75
- {
76
- "text": datasets.Value("string"),
77
- "answer_start": datasets.Value("int32"),
78
- }
79
- ),
80
- }
81
- )
82
- return datasets.DatasetInfo(
83
- # This is the description that will appear on the datasets page.
84
- description=_DESCRIPTION,
85
- # This defines the different columns of the dataset and their types
86
- features=features, # Here we define them above because they are different between the two configurations
87
- # If there's a common (input, target) tuple from the features,
88
- # specify them here. They'll be used if as_supervised=True in
89
- # builder.as_dataset.
90
- supervised_keys=None,
91
- # Homepage of the dataset for documentation
92
- homepage=_HOMEPAGE,
93
- # License for the dataset if available
94
- license=_LICENSE,
95
- # Citation for the dataset
96
- citation=_CITATION,
97
- task_templates=[
98
- QuestionAnsweringExtractive(
99
- question_column="disfluent question", context_column="context", answers_column="answers"
100
- )
101
- ],
102
- )
103
-
104
- def _split_generators(self, dl_manager):
105
- """Returns SplitGenerators."""
106
-
107
- squad_v2_downloaded_files = dl_manager.download_and_extract(_URLS_squad_v2)
108
-
109
- return [
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TRAIN,
112
- # These kwargs will be passed to _generate_examples
113
- gen_kwargs={
114
- "filepath": dl_manager.download_and_extract(_URL + "train.json"),
115
- "split": "train",
116
- "squad_v2_data": squad_v2_downloaded_files,
117
- },
118
- ),
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TEST,
121
- # These kwargs will be passed to _generate_examples
122
- gen_kwargs={
123
- "filepath": dl_manager.download_and_extract(_URL + "test.json"),
124
- "split": "test",
125
- "squad_v2_data": squad_v2_downloaded_files,
126
- },
127
- ),
128
- datasets.SplitGenerator(
129
- name=datasets.Split.VALIDATION,
130
- # These kwargs will be passed to _generate_examples
131
- gen_kwargs={
132
- "filepath": dl_manager.download_and_extract(_URL + "dev.json"),
133
- "split": "dev",
134
- "squad_v2_data": squad_v2_downloaded_files,
135
- },
136
- ),
137
- ]
138
-
139
- def _generate_examples(
140
- self,
141
- filepath,
142
- split,
143
- squad_v2_data, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
144
- ):
145
- """Yields examples as (key, example) tuples."""
146
-
147
- merge_squad_v2_json = {}
148
-
149
- for file in squad_v2_data:
150
- with open(squad_v2_data[file], encoding="utf-8") as f:
151
- merge_squad_v2_json.update(json.load(f))
152
-
153
- squad_v2_dict = _helper_dict(merge_squad_v2_json) # contains all squad_v2 data in a dict with id as key
154
-
155
- with open(filepath, encoding="utf-8") as f:
156
- glob_id = 0
157
- for id_, row in enumerate(f):
158
- data = json.loads(row)
159
- for i in data:
160
- yield glob_id, {
161
- "squad_v2_id": i,
162
- "disfluent question": data[i]["disfluent"],
163
- "title": squad_v2_dict[i]["title"],
164
- "context": squad_v2_dict[i]["context"],
165
- "original question": squad_v2_dict[i]["question"],
166
- "answers": {
167
- "answer_start": squad_v2_dict[i]["answers"]["answer_start"],
168
- "text": squad_v2_dict[i]["answers"]["text"],
169
- },
170
- }
171
- glob_id += 1
172
-
173
-
174
- def _helper_dict(row_squad_v2: dict): # creates dict with id as key for combined squad_v2
175
-
176
- squad_v2_dict = {}
177
-
178
- for example in row_squad_v2["data"]:
179
- title = example.get("title", "").strip()
180
- for paragraph in example["paragraphs"]:
181
- context = paragraph["context"].strip()
182
- for qa in paragraph["qas"]:
183
- question = qa["question"].strip()
184
- id_ = qa["id"]
185
-
186
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
187
- answers = [answer["text"].strip() for answer in qa["answers"]]
188
-
189
- squad_v2_dict[id_] = {
190
- "title": title,
191
- "context": context,
192
- "question": question,
193
- "id": id_,
194
- "answers": {
195
- "answer_start": answer_starts,
196
- "text": answers,
197
- },
198
- }
199
- return squad_v2_dict