scifact / scifact.py
jeffnyman's picture
Upload scifact.py
9bb456c
import json
import datasets
_DESCRIPTION = """
SciFact
A dataset of expert-written scientific claims paired with evidence-containing
abstracts and annotated with labels and rationales.
"""
_CITATION = """
@InProceedings{Wadden2020FactOF,
author = {David Wadden, Shanchuan Lin, Kyle Lo, Lucy Lu Wang,
Madeleine van Zuylen, Arman Cohan, Hannaneh Hajishirzi},
title = {Fact or Fiction: Verifying Scientific Claims},
booktitle = {EMNLP},
year = 2020,
}
"""
_DOWNLOAD_URL = "https://testerstories.com/files/ai_learn/data.tar.gz"
class ScifactConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(ScifactConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs
)
class Scifact(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
ScifactConfig(name="corpus", description="The corpus of evidence documents"),
ScifactConfig(
name="claims", description="The claims are split into train, test, dev"
),
]
def _info(self):
if self.config.name == "corpus":
features = {
"doc_id": datasets.Value("int32"),
"title": datasets.Value("string"),
"abstract": datasets.features.Sequence(datasets.Value("string")),
"structured": datasets.Value("bool"),
}
else:
features = {
"id": datasets.Value("int32"),
"claim": datasets.Value("string"),
"evidence_doc_id": datasets.Value("string"),
"evidence_label": datasets.Value("string"),
"evidence_sentences": datasets.features.Sequence(
datasets.Value("int32")
),
"cited_doc_ids": datasets.features.Sequence(datasets.Value("int32")),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
supervised_keys=None,
homepage="https://scifact.apps.allenai.org/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DOWNLOAD_URL)
if self.config.name == "corpus":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": "data/corpus.jsonl",
"split": "train",
"files": dl_manager.iter_archive(archive),
},
),
]
else:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": "data/claims_train.jsonl",
"split": "train",
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": "data/claims_test.jsonl",
"split": "test",
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": "data/claims_dev.jsonl",
"split": "dev",
"files": dl_manager.iter_archive(archive),
},
),
]
def _generate_examples(self, filepath, split, files):
for path, f in files:
if path == filepath:
for id_, row in enumerate(f):
data = json.loads(row.decode("utf-8"))
if self.config.name == "corpus":
yield id_, {
"doc_id": int(data["doc_id"]),
"title": data["title"],
"abstract": data["abstract"],
"structured": data["structured"],
}
else:
if split == "test":
yield id_, {
"id": data["id"],
"claim": data["claim"],
"evidence_doc_id": "",
"evidence_label": "",
"evidence_sentences": [],
"cited_doc_ids": [],
}
else:
evidences = data["evidence"]
if evidences:
for id1, doc_id in enumerate(evidences):
for id2, evidence in enumerate(evidences[doc_id]):
yield str(id_) + "_" + str(id1) + "_" + str(
id2
), {
"id": data["id"],
"claim": data["claim"],
"evidence_doc_id": doc_id,
"evidence_label": evidence["label"],
"evidence_sentences": evidence["sentences"],
"cited_doc_ids": data.get(
"cited_doc_ids", []
),
}
else:
yield id_, {
"id": data["id"],
"claim": data["claim"],
"evidence_doc_id": "",
"evidence_label": "",
"evidence_sentences": [],
"cited_doc_ids": data.get("cited_doc_ids", []),
}
break