Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
94c182d
1 Parent(s): 2035f8a

Update files from the datasets library (from 1.9.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.9.0

Files changed (2) hide show
  1. README.md +18 -0
  2. drop.py +5 -5
README.md CHANGED
@@ -1,6 +1,24 @@
1
  ---
 
 
 
 
 
2
  languages:
3
  - en
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  paperswithcode_id: drop
5
  ---
6
 
 
1
  ---
2
+ pretty_name: DROP
3
+ annotations_creators:
4
+ - crowdsourced
5
+ language_creators:
6
+ - crowdsourced
7
  languages:
8
  - en
9
+ licenses:
10
+ - cc-by-sa-4-0
11
+ multilinguality:
12
+ - monolingual
13
+ size_categories:
14
+ - 10K<n<100K
15
+ source_datasets:
16
+ - original
17
+ task_categories:
18
+ - question-answering
19
+ task_ids:
20
+ - extractive-qa
21
+ - abstractive-qa
22
  paperswithcode_id: drop
23
  ---
24
 
drop.py CHANGED
@@ -7,7 +7,6 @@ import os
7
  import datasets
8
 
9
 
10
- # TODO(drop): BibTeX citation
11
  _CITATION = """\
12
  @inproceedings{Dua2019DROP,
13
  author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
@@ -17,7 +16,6 @@ _CITATION = """\
17
  }
18
  """
19
 
20
- # TODO(drop):
21
  _DESCRIPTION = """\
22
  DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.
23
  . DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a
@@ -25,7 +23,7 @@ question, perhaps to multiple input positions, and perform discrete operations o
25
  sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was
26
  necessary for prior datasets.
27
  """
28
- _URl = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
29
 
30
 
31
  class AnswerParsingError(Exception):
@@ -102,7 +100,7 @@ class Drop(datasets.GeneratorBasedBuilder):
102
  # TODO(drop): Downloads the data and defines the splits
103
  # dl_manager is a datasets.download.DownloadManager that can be used to
104
  # download and extract URLs
105
- dl_dir = dl_manager.download_and_extract(_URl)
106
  data_dir = os.path.join(dl_dir, "drop_dataset")
107
  return [
108
  datasets.SplitGenerator(
@@ -122,6 +120,7 @@ class Drop(datasets.GeneratorBasedBuilder):
122
  # TODO(drop): Yields (key, example) tuples from the dataset
123
  with open(filepath, mode="r", encoding="utf-8") as f:
124
  data = json.load(f)
 
125
  for i, (section_id, section) in enumerate(data.items()):
126
  for j, qa in enumerate(section["qa_pairs"]):
127
 
@@ -139,7 +138,8 @@ class Drop(datasets.GeneratorBasedBuilder):
139
 
140
  try:
141
  example["answers_spans"] = self.build_answers(answers)
142
- yield example["query_id"], example
 
143
  except AnswerParsingError:
144
  # This is expected for 9 examples of train
145
  # and 1 of validation.
 
7
  import datasets
8
 
9
 
 
10
  _CITATION = """\
11
  @inproceedings{Dua2019DROP,
12
  author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
 
16
  }
17
  """
18
 
 
19
  _DESCRIPTION = """\
20
  DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.
21
  . DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a
 
23
  sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was
24
  necessary for prior datasets.
25
  """
26
+ _URL = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
27
 
28
 
29
  class AnswerParsingError(Exception):
 
100
  # TODO(drop): Downloads the data and defines the splits
101
  # dl_manager is a datasets.download.DownloadManager that can be used to
102
  # download and extract URLs
103
+ dl_dir = dl_manager.download_and_extract(_URL)
104
  data_dir = os.path.join(dl_dir, "drop_dataset")
105
  return [
106
  datasets.SplitGenerator(
 
120
  # TODO(drop): Yields (key, example) tuples from the dataset
121
  with open(filepath, mode="r", encoding="utf-8") as f:
122
  data = json.load(f)
123
+ id_ = 0
124
  for i, (section_id, section) in enumerate(data.items()):
125
  for j, qa in enumerate(section["qa_pairs"]):
126
 
 
138
 
139
  try:
140
  example["answers_spans"] = self.build_answers(answers)
141
+ yield id_, example
142
+ id_ += 1
143
  except AnswerParsingError:
144
  # This is expected for 9 examples of train
145
  # and 1 of validation.