Datasets:

Modalities:
Tabular
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
a5b3217
1 Parent(s): 509b49e

upload hubscripts/evidence_inference_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. evidence_inference.py +293 -0
evidence_inference.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The dataset consists of biomedical articles describing randomized control trials (RCTs)
18
+ that compare multiple treatments. Each of these articles will have multiple questions,
19
+ or 'prompts' associated with them. These prompts will ask about the relationship between
20
+ an intervention and comparator with respect to an outcome, as reported in the trial.
21
+ For example, a prompt may ask about the reported effects of aspirin as compared to placebo
22
+ on the duration of headaches.
23
+ For the sake of this task, we assume that a particular article will report that the intervention of interest either
24
+ significantly increased, significantly decreased or had significant effect on the outcome, relative to the comparator.
25
+ """
26
+
27
+ import os
28
+ from typing import Dict, List, Tuple
29
+
30
+ import datasets
31
+ import pandas as pd
32
+
33
+ from .bigbiohub import qa_features
34
+ from .bigbiohub import BigBioConfig
35
+ from .bigbiohub import Tasks
36
+
37
+ _LANGUAGES = ['English']
38
+ _PUBMED = True
39
+ _LOCAL = False
40
+ _CITATION = """\
41
+ @inproceedings{deyoung-etal-2020-evidence,
42
+ title = "Evidence Inference 2.0: More Data, Better Models",
43
+ author = "DeYoung, Jay and
44
+ Lehman, Eric and
45
+ Nye, Benjamin and
46
+ Marshall, Iain and
47
+ Wallace, Byron C.",
48
+ booktitle = "Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing",
49
+ month = jul,
50
+ year = "2020",
51
+ address = "Online",
52
+ publisher = "Association for Computational Linguistics",
53
+ url = "https://www.aclweb.org/anthology/2020.bionlp-1.13",
54
+ pages = "123--132",
55
+ }
56
+ """
57
+
58
+ _DATASETNAME = "evidence_inference"
59
+ _DISPLAYNAME = "Evidence Inference 2.0"
60
+
61
+ _DESCRIPTION = """\
62
+ The dataset consists of biomedical articles describing randomized control trials (RCTs) that compare multiple
63
+ treatments. Each of these articles will have multiple questions, or 'prompts' associated with them.
64
+ These prompts will ask about the relationship between an intervention and comparator with respect to an outcome,
65
+ as reported in the trial. For example, a prompt may ask about the reported effects of aspirin as compared
66
+ to placebo on the duration of headaches. For the sake of this task, we assume that a particular article
67
+ will report that the intervention of interest either significantly increased, significantly decreased
68
+ or had significant effect on the outcome, relative to the comparator.
69
+ """
70
+
71
+ _HOMEPAGE = "https://github.com/jayded/evidence-inference"
72
+
73
+ _LICENSE = 'MIT License'
74
+
75
+ _URLS = {
76
+ _DATASETNAME: "http://evidence-inference.ebm-nlp.com/v2.0.tar.gz",
77
+ }
78
+
79
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
80
+
81
+ _SOURCE_VERSION = "2.0.0"
82
+
83
+ _BIGBIO_VERSION = "1.0.0"
84
+
85
+ QA_CHOICES = [
86
+ "significantly increased",
87
+ "no significant difference",
88
+ "significantly decreased",
89
+ ]
90
+
91
+ # Some examples are removed due to comments on the dataset's github page
92
+ # https://github.com/jayded/evidence-inference/blob/master/annotations/README.md#caveat
93
+
94
+ INCORRECT_PROMPT_IDS = set([
95
+ 911, 912, 1262, 1261, 3044, 3248, 3111, 3620, 4308, 4490, 4491, 4324,
96
+ 4325, 4492, 4824, 5000, 5001, 5002, 5046, 5047, 4948, 5639, 5710, 5752,
97
+ 5775, 5782, 5841, 5843, 5861, 5862, 5863, 5964, 5965, 5966, 5975, 4807,
98
+ 5776, 5777, 5778, 5779, 5780, 5781, 6034, 6065, 6066, 6666, 6667, 6668,
99
+ 6669, 7040, 7042, 7944, 8590, 8605, 8606, 8639, 8640, 8745, 8747, 8749,
100
+ 8877, 8878, 8593, 8631, 8635, 8884, 8886, 8773, 10032, 10035, 8876, 8875,
101
+ 8885, 8917, 8921, 8118, 10885, 10886, 10887, 10888, 10889, 10890
102
+ ])
103
+
104
+ QUESTIONABLE_PROMPT_IDS = set([
105
+ 7811, 7812, 7813, 7814, 7815, 8197, 8198, 8199,
106
+ 8200, 8201, 9429, 9430, 9431, 8536, 9432
107
+ ])
108
+
109
+ SOMEWHAT_MALFORMED_PROMPT_IDS = set([
110
+ 3514, 346, 5037, 4715, 8767, 9295, 9297, 8870, 9862
111
+ ])
112
+
113
+ SKIP_PROMPT_IDS = INCORRECT_PROMPT_IDS | QUESTIONABLE_PROMPT_IDS | SOMEWHAT_MALFORMED_PROMPT_IDS
114
+
115
+
116
+ class EvidenceInferenceDataset(datasets.GeneratorBasedBuilder):
117
+ f"""{_DESCRIPTION}"""
118
+
119
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
120
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
121
+
122
+ BUILDER_CONFIGS = [
123
+ BigBioConfig(
124
+ name="evidence-inference_source",
125
+ version=SOURCE_VERSION,
126
+ description="evidence-inference source schema",
127
+ schema="source",
128
+ subset_id="evidence-inference",
129
+ ),
130
+ BigBioConfig(
131
+ name="evidence-inference_bigbio_qa",
132
+ version=BIGBIO_VERSION,
133
+ description="evidence-inference BigBio schema",
134
+ schema="bigbio_qa",
135
+ subset_id="evidence-inference",
136
+ ),
137
+ ]
138
+
139
+ DEFAULT_CONFIG_NAME = "evidence-inference_source"
140
+
141
+ def _info(self) -> datasets.DatasetInfo:
142
+ if self.config.schema == "source":
143
+ features = datasets.Features(
144
+ {
145
+ "id": datasets.Value("int64"),
146
+ "prompt_id": datasets.Value("int64"),
147
+ "pmcid": datasets.Value("int64"),
148
+ "label": datasets.Value("string"),
149
+ "evidence": datasets.Value("string"),
150
+ "intervention": datasets.Value("string"),
151
+ "comparator": datasets.Value("string"),
152
+ "outcome": datasets.Value("string"),
153
+ }
154
+ )
155
+
156
+ elif self.config.schema == "bigbio_qa":
157
+ features = qa_features
158
+
159
+ return datasets.DatasetInfo(
160
+ description=_DESCRIPTION,
161
+ features=features,
162
+ homepage=_HOMEPAGE,
163
+ license=str(_LICENSE),
164
+ citation=_CITATION,
165
+ )
166
+
167
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
168
+ """Returns SplitGenerators."""
169
+
170
+ urls = _URLS[_DATASETNAME]
171
+ data_dir = dl_manager.download_and_extract(urls)
172
+
173
+ return [
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TRAIN,
176
+ gen_kwargs={
177
+ "filepaths": [
178
+ os.path.join(data_dir, "annotations_merged.csv"),
179
+ os.path.join(data_dir, "prompts_merged.csv"),
180
+ ],
181
+ "datapath": os.path.join(data_dir, "txt_files"),
182
+ "split": "train",
183
+ "datadir": data_dir,
184
+ },
185
+ ),
186
+ datasets.SplitGenerator(
187
+ name=datasets.Split.VALIDATION,
188
+ gen_kwargs={
189
+ "filepaths": [
190
+ os.path.join(data_dir, "annotations_merged.csv"),
191
+ os.path.join(data_dir, "prompts_merged.csv"),
192
+ ],
193
+ "datapath": os.path.join(data_dir, "txt_files"),
194
+ "split": "validation",
195
+ "datadir": data_dir,
196
+ },
197
+ ),
198
+ datasets.SplitGenerator(
199
+ name=datasets.Split.TEST,
200
+ gen_kwargs={
201
+ "filepaths": [
202
+ os.path.join(data_dir, "annotations_merged.csv"),
203
+ os.path.join(data_dir, "prompts_merged.csv"),
204
+ ],
205
+ "datapath": os.path.join(data_dir, "txt_files"),
206
+ "split": "test",
207
+ "datadir": data_dir,
208
+ },
209
+ ),
210
+ ]
211
+
212
+ def _generate_examples(
213
+ self, filepaths, datapath, split, datadir
214
+ ) -> Tuple[int, Dict]:
215
+ """Yields examples as (key, example) tuples."""
216
+ with open(f"{datadir}/splits/{split}_article_ids.txt", "r") as f:
217
+ ids = [int(i.strip()) for i in f.readlines()]
218
+ prompts = pd.read_csv(filepaths[-1], encoding="utf8")
219
+ prompts = prompts[prompts["PMCID"].isin(ids)]
220
+
221
+ annotations = pd.read_csv(filepaths[0], encoding="utf8").set_index("PromptID")
222
+ evidences = pd.read_csv(filepaths[0], encoding="utf8").set_index("PMCID")
223
+ evidences = evidences[evidences["Evidence Start"] != -1]
224
+ uid = 0
225
+
226
+ def lookup(df: pd.DataFrame, id, col) -> str:
227
+ try:
228
+ label = df.loc[id][col]
229
+ if isinstance(label, pd.Series):
230
+ return label.values[0]
231
+ else:
232
+ return label
233
+ except KeyError:
234
+ return -1
235
+
236
+ def extract_evidence(doc_id, start, end):
237
+ p = f"{datapath}/PMC{doc_id}.txt"
238
+ with open(p, "r") as f:
239
+ return f.read()[start:end]
240
+
241
+
242
+ for key, sample in prompts.iterrows():
243
+
244
+ pid = sample["PromptID"]
245
+ pmcid = sample["PMCID"]
246
+ label = lookup(annotations, pid, "Label")
247
+ start = lookup(evidences, pmcid, "Evidence Start")
248
+ end = lookup(evidences, pmcid, "Evidence End")
249
+
250
+ if pid in SKIP_PROMPT_IDS:
251
+ continue
252
+
253
+ if label == -1:
254
+ continue
255
+
256
+ evidence = extract_evidence(pmcid, start, end)
257
+
258
+ if self.config.schema == "source":
259
+
260
+ feature_dict = {
261
+ "id": uid,
262
+ "pmcid": pmcid,
263
+ "prompt_id": pid,
264
+ "intervention": sample["Intervention"],
265
+ "comparator": sample["Comparator"],
266
+ "outcome": sample["Outcome"],
267
+ "evidence": evidence,
268
+ "label": label,
269
+ }
270
+
271
+ uid += 1
272
+ yield key, feature_dict
273
+
274
+ elif self.config.schema == "bigbio_qa":
275
+
276
+ context = evidence
277
+ question = (
278
+ f"Compared to {sample['Comparator']} "
279
+ f"what was the result of {sample['Intervention']} on {sample['Outcome']}?"
280
+ )
281
+ feature_dict = {
282
+ "id": uid,
283
+ "question_id": pid,
284
+ "document_id": pmcid,
285
+ "question": question,
286
+ "type": "multiple_choice",
287
+ "choices": QA_CHOICES,
288
+ "context": context,
289
+ "answer": [label],
290
+ }
291
+
292
+ uid += 1
293
+ yield key, feature_dict