smeoni commited on
Commit
dab57fa
·
1 Parent(s): eed0ad2

Upload dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +240 -0
dataset.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ from bs4 import ResultSet, BeautifulSoup
5
+ from datasets import DownloadManager
6
+
7
+ _CITATION = """\
8
+ @report{Magnini2021,
9
+ author = {Bernardo Magnini and Begoña Altuna and Alberto Lavelli and Manuela Speranza
10
+ and Roberto Zanoli and Fondazione Bruno Kessler},
11
+ keywords = {Clinical data,clinical enti-ties,corpus,multilingual,temporal information},
12
+ title = {The E3C Project:
13
+ European Clinical Case Corpus El proyecto E3C: European Clinical Case Corpus},
14
+ url = {https://uts.nlm.nih.gov/uts/umls/home},
15
+ year = {2021},
16
+ }
17
+
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ The European Clinical Case Corpus (E3C) project aims at collecting and \
22
+ annotating a large corpus of clinical documents in five European languages (Spanish, \
23
+ Basque, English, French and Italian), which will be freely distributed. Annotations \
24
+ include temporal information, to allow temporal reasoning on chronologies, and \
25
+ information about clinical entities based on medical taxonomies, to be used for semantic reasoning.
26
+ """
27
+
28
+ _URL = "https://github.com/hltfbk/E3C-Corpus/archive/refs/tags/v2.0.0.zip"
29
+
30
+
31
+ class E3CConfig(datasets.BuilderConfig):
32
+ """BuilderConfig for SQUAD."""
33
+
34
+ def __init__(self, **kwargs):
35
+ """BuilderConfig for SQUAD.
36
+ Args:
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ self.layer = kwargs.pop("layer")
40
+ super(E3CConfig, self).__init__(**kwargs)
41
+
42
+
43
+ class E3C(datasets.GeneratorBasedBuilder):
44
+ VERSION = datasets.Version("1.1.0")
45
+ BUILDER_CONFIGS = [
46
+ E3CConfig(
47
+ name="en",
48
+ version=VERSION,
49
+ description="this is the split of the layer 1 for English of E3C dataset",
50
+ layer="1",
51
+ ),
52
+ E3CConfig(
53
+ name="es",
54
+ version=VERSION,
55
+ description="this is the split of the layer 1 for Spanish of E3C dataset",
56
+ layer="1",
57
+ ),
58
+ E3CConfig(
59
+ name="eu",
60
+ version=VERSION,
61
+ description="this is the split of the layer 1 for Basque of E3C dataset",
62
+ layer="1",
63
+ ),
64
+ E3CConfig(
65
+ name="fr",
66
+ version=VERSION,
67
+ description="this is the split of the layer 1 for French of E3C dataset",
68
+ layer="1",
69
+ ),
70
+ E3CConfig(
71
+ name="it",
72
+ version=VERSION,
73
+ description="this is the split of the layer 1 for Italian of E3C dataset",
74
+ layer="1",
75
+ ),
76
+ ]
77
+
78
+ def _info(self):
79
+ """This method specifies the DatasetInfo which contains information and typings."""
80
+ features = datasets.Features(
81
+ {
82
+ "tokens": datasets.Sequence(datasets.Value("string")),
83
+ "ner_tags": datasets.Sequence(
84
+ datasets.features.ClassLabel(
85
+ names=[
86
+ "O",
87
+ "CLINENTITY",
88
+ "EVENT",
89
+ "ACTOR",
90
+ "BODYPART",
91
+ "TIMEX3",
92
+ "RML",
93
+ ],
94
+ ),
95
+ ),
96
+ }
97
+ )
98
+ return datasets.DatasetInfo(
99
+ description=_DESCRIPTION,
100
+ features=features,
101
+ citation=_CITATION,
102
+ supervised_keys=None,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager: DownloadManager) -> list[datasets.SplitGenerator]:
106
+ """Returns SplitGenerators who contains all the difference splits of the dataset.
107
+ Each language has its own split and each split has 3 different layers (sub-split):
108
+ - layer 1: full manual annotation of clinical entities, temporal information and
109
+ factuality, for benchmarking and linguistic analysis.
110
+ - layer 2: semi-automatic annotation of clinical entities
111
+ - layer 3: non-annotated documents
112
+ Args:
113
+ dl_manager: A `datasets.utils.DownloadManager` that can be used to download and
114
+ extract URLs.
115
+ Returns:
116
+ A list of `datasets.SplitGenerator`. Contains all subsets of the dataset depending on
117
+ the language and the layer.
118
+ """
119
+ url = _URL
120
+ data_dir = dl_manager.download_and_extract(url)
121
+ language = {
122
+ "en": "English",
123
+ "es": "Spanish",
124
+ "eu": "Basque",
125
+ "fr": "French",
126
+ "it": "Italian",
127
+ }[self.config.name]
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=self.config.name,
131
+ gen_kwargs={
132
+ "filepath": os.path.join(
133
+ data_dir,
134
+ "E3C-Corpus-2.0.0/data_annotation",
135
+ language,
136
+ f"layer{self.config.layer}",
137
+ ),
138
+ },
139
+ ),
140
+ ]
141
+
142
+ @staticmethod
143
+ def get_annotations(entities: ResultSet, text: str) -> list:
144
+ """Extract the offset, the text and the type of the entity.
145
+
146
+ Args:
147
+ entities: The entities to extract.
148
+ text: The text of the document.
149
+ Returns:
150
+ A list of list containing the offset, the text and the type of the entity.
151
+ """
152
+ return [
153
+ [
154
+ int(entity.get("begin")),
155
+ int(entity.get("end")),
156
+ text[int(entity.get("begin")) : int(entity.get("end"))],
157
+ ]
158
+ for entity in entities
159
+ ]
160
+
161
+ def get_parsed_data(self, filepath: str):
162
+ """Parse the data from the E3C dataset and store it in a dictionary.
163
+ Iterate over the files in the dataset and parse for each file the following entities:
164
+ - CLINENTITY
165
+ - EVENT
166
+ - ACTOR
167
+ - BODYPART
168
+ - TIMEX3
169
+ - RML
170
+ for each entity, we extract the offset, the text and the type of the entity.
171
+
172
+ Args:
173
+ filepath: The path to the folder containing the files to parse.
174
+ """
175
+ for root, _, files in os.walk(filepath):
176
+ for file in files:
177
+ with open(f"{root}/{file}") as soup_file:
178
+ soup = BeautifulSoup(soup_file, "xml")
179
+ text = soup.find("cas:Sofa").get("sofaString")
180
+ yield {
181
+ "CLINENTITY": self.get_annotations(
182
+ soup.find_all("custom:CLINENTITY"), text
183
+ ),
184
+ "EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
185
+ "ACTOR": self.get_annotations(soup.find_all("custom:ACTOR"), text),
186
+ "BODYPART": self.get_annotations(soup.find_all("custom:BODYPART"), text),
187
+ "TIMEX3": self.get_annotations(soup.find_all("custom:TIMEX3"), text),
188
+ "RML": self.get_annotations(soup.find_all("custom:RML"), text),
189
+ "SENTENCE": self.get_annotations(soup.find_all("type4:Sentence"), text),
190
+ "TOKENS": self.get_annotations(soup.find_all("type4:Token"), text),
191
+ }
192
+
193
+ def _generate_examples(self, filepath) -> tuple[str, dict]:
194
+ """Yields examples as (key, example) tuples.
195
+ Args:
196
+ filepath: The path to the folder containing the files to parse.
197
+ Yields:
198
+ The unique id of an example and the example itself containing tokens and ner_tags in
199
+ IOB format.
200
+ """
201
+ guid = 0
202
+ for content in self.get_parsed_data(filepath):
203
+ for sentence in content["SENTENCE"]:
204
+ filtered_tokens = list(
205
+ filter(
206
+ lambda token: token[0] >= sentence[0] and token[1] <= sentence[1],
207
+ content["TOKENS"],
208
+ )
209
+ )
210
+ labels = ["O"] * len(filtered_tokens)
211
+ for entity_type in [
212
+ "CLINENTITY",
213
+ "EVENT",
214
+ "ACTOR",
215
+ "BODYPART",
216
+ "TIMEX3",
217
+ "RML",
218
+ ]:
219
+ if len(content[entity_type]) != 0 and sentence[1] >= content[entity_type][0][0]:
220
+ for entities in list(
221
+ filter(
222
+ lambda entity: sentence[0] <= entity[0] <= sentence[1],
223
+ content[entity_type],
224
+ )
225
+ ):
226
+ annotated_tokens = [
227
+ idx_token
228
+ for idx_token, token in enumerate(filtered_tokens)
229
+ if token[0] >= entities[0] and token[1] <= entities[1]
230
+ ]
231
+ for idx_token in annotated_tokens:
232
+ if idx_token == annotated_tokens[0]:
233
+ labels[idx_token] = f"{entity_type}"
234
+ else:
235
+ labels[idx_token] = f"{entity_type}"
236
+ guid += 1
237
+ yield guid, {
238
+ "tokens": list(map(lambda tokens: tokens[2], filtered_tokens)),
239
+ "ner_tags": labels,
240
+ }