|
import json |
|
import textwrap |
|
|
|
import datasets |
|
from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
|
|
_CITATION = """\ |
|
@article{tydiqa, |
|
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages}, |
|
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki} |
|
year = {2020}, |
|
journal = {Transactions of the Association for Computational Linguistics} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs. |
|
The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language |
|
expresses -- such that we expect models performing well on this set to generalize across a large number of the languages |
|
in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic |
|
information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but |
|
don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language without |
|
the use of translation (unlike MLQA and XQuAD). |
|
""" |
|
|
|
|
|
_LANG = ["arabic", "bengali", "english", "finnish", "indonesian", "japanese", "korean", "russian", "swahili", "telugu", "thai"] |
|
|
|
_PRIMARY_URLS = "https://huggingface.co./datasets/khalidalt/tydiqa-goldp/resolve/main/primary_tasks/{split}/{language}-{split}.jsonl" |
|
_Secondary_URLS = "https://huggingface.co./datasets/khalidalt/tydiqa-goldp/resolve/main/{split}/{language}-{split}.jsonl" |
|
|
|
_VERSION = datasets.Version("1.1.0", "") |
|
|
|
|
|
class tydiqa_GoldP(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
TydiqaConfig( |
|
name="primary_task", |
|
description=textwrap.dedent( |
|
"""\ |
|
Passage selection task (SelectP): Given a list of the passages in the article, return either (a) the index of |
|
the passage that answers the question or (b) NULL if no such passage exists. |
|
Minimal answer span task (MinSpan): Given the full text of an article, return one of (a) the start and end |
|
byte indices of the minimal span that completely answers the question; (b) YES or NO if the question requires |
|
a yes/no answer and we can draw a conclusion from the passage; (c) NULL if it is not possible to produce a |
|
minimal answer for this question.""" |
|
), |
|
), |
|
TydiqaConfig( |
|
name="secondary_task", |
|
description=textwrap.dedent( |
|
"""Gold passage task (GoldP): Given a passage that is guaranteed to contain the |
|
answer, predict the single contiguous span of characters that answers the question. This is more similar to |
|
existing reading comprehension datasets (as opposed to the information-seeking task outlined above). |
|
This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing |
|
a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1, |
|
XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways: |
|
only the gold answer passage is provided rather than the entire Wikipedia article; |
|
unanswerable questions have been discarded, similar to MLQA and XQuAD; |
|
we evaluate with the SQuAD 1.1 metrics like XQuAD; and |
|
Thai and Japanese are removed since the lack of whitespace breaks some tools. |
|
""" |
|
), |
|
), |
|
for lang in _LANG |
|
] |
|
def _info(self): |
|
|
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
"document_title": datasets.Value("string"), |
|
"passage_text": datasets.Value("string"), |
|
"question_text": datasets.Value("string"), |
|
"answers": datasets.features.Sequence( |
|
{ |
|
"text": datasets.Value("string"), |
|
"start_byte": datasets.Value("int32"), |
|
"limit_byte": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/google-research-datasets/tydiqa", |
|
citation=_CITATION, |
|
task_templates=[ |
|
QuestionAnsweringExtractive( |
|
question_column="question", context_column="context", answers_column="answers" |
|
) |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
language = self.config.name |
|
splits = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev"} |
|
|
|
data_urls = { |
|
split: _Secondary_URLS.format(language=language, split=splits[split]) for split in splits |
|
} |
|
|
|
dl_paths = dl_manager.download(data_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={"filepath": dl_paths[split]}, |
|
) |
|
for split in splits |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for _id,row in enumerate(f): |
|
data = json.loads(row) |
|
|
|
|
|
yield _id, data |
|
|