Datasets:
Fix doc with faulty spans and add more information to example
Browse files
README.md
CHANGED
@@ -21,6 +21,387 @@ paperswithcode_id: mobie
|
|
21 |
pretty_name: MobIE
|
22 |
tags:
|
23 |
- structure-prediction
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
---
|
25 |
|
26 |
# Dataset Card for "MobIE"
|
|
|
21 |
pretty_name: MobIE
|
22 |
tags:
|
23 |
- structure-prediction
|
24 |
+
dataset_info:
|
25 |
+
- config_name: ee
|
26 |
+
features:
|
27 |
+
- name: id
|
28 |
+
dtype: string
|
29 |
+
- name: text
|
30 |
+
dtype: string
|
31 |
+
- name: entity_mentions
|
32 |
+
list:
|
33 |
+
- name: id
|
34 |
+
dtype: string
|
35 |
+
- name: text
|
36 |
+
dtype: string
|
37 |
+
- name: start
|
38 |
+
dtype: int32
|
39 |
+
- name: end
|
40 |
+
dtype: int32
|
41 |
+
- name: type
|
42 |
+
dtype:
|
43 |
+
class_label:
|
44 |
+
names:
|
45 |
+
'0': date
|
46 |
+
'1': disaster-type
|
47 |
+
'2': distance
|
48 |
+
'3': duration
|
49 |
+
'4': event-cause
|
50 |
+
'5': location
|
51 |
+
'6': location-city
|
52 |
+
'7': location-route
|
53 |
+
'8': location-stop
|
54 |
+
'9': location-street
|
55 |
+
'10': money
|
56 |
+
'11': number
|
57 |
+
'12': organization
|
58 |
+
'13': organization-company
|
59 |
+
'14': org-position
|
60 |
+
'15': percent
|
61 |
+
'16': person
|
62 |
+
'17': set
|
63 |
+
'18': time
|
64 |
+
'19': trigger
|
65 |
+
- name: refids
|
66 |
+
list:
|
67 |
+
- name: key
|
68 |
+
dtype: string
|
69 |
+
- name: value
|
70 |
+
dtype: string
|
71 |
+
- name: event_mentions
|
72 |
+
list:
|
73 |
+
- name: id
|
74 |
+
dtype: string
|
75 |
+
- name: trigger
|
76 |
+
struct:
|
77 |
+
- name: id
|
78 |
+
dtype: string
|
79 |
+
- name: text
|
80 |
+
dtype: string
|
81 |
+
- name: start
|
82 |
+
dtype: int32
|
83 |
+
- name: end
|
84 |
+
dtype: int32
|
85 |
+
- name: arguments
|
86 |
+
list:
|
87 |
+
- name: id
|
88 |
+
dtype: string
|
89 |
+
- name: text
|
90 |
+
dtype: string
|
91 |
+
- name: start
|
92 |
+
dtype: int32
|
93 |
+
- name: end
|
94 |
+
dtype: int32
|
95 |
+
- name: role
|
96 |
+
dtype:
|
97 |
+
class_label:
|
98 |
+
names:
|
99 |
+
'0': no_arg
|
100 |
+
'1': location
|
101 |
+
'2': delay
|
102 |
+
'3': direction
|
103 |
+
'4': start_loc
|
104 |
+
'5': end_loc
|
105 |
+
'6': start_date
|
106 |
+
'7': end_date
|
107 |
+
'8': cause
|
108 |
+
'9': jam_length
|
109 |
+
'10': route
|
110 |
+
- name: type
|
111 |
+
dtype:
|
112 |
+
class_label:
|
113 |
+
names:
|
114 |
+
'0': date
|
115 |
+
'1': disaster-type
|
116 |
+
'2': distance
|
117 |
+
'3': duration
|
118 |
+
'4': event-cause
|
119 |
+
'5': location
|
120 |
+
'6': location-city
|
121 |
+
'7': location-route
|
122 |
+
'8': location-stop
|
123 |
+
'9': location-street
|
124 |
+
'10': money
|
125 |
+
'11': number
|
126 |
+
'12': organization
|
127 |
+
'13': organization-company
|
128 |
+
'14': org-position
|
129 |
+
'15': percent
|
130 |
+
'16': person
|
131 |
+
'17': set
|
132 |
+
'18': time
|
133 |
+
'19': trigger
|
134 |
+
- name: event_type
|
135 |
+
dtype:
|
136 |
+
class_label:
|
137 |
+
names:
|
138 |
+
'0': O
|
139 |
+
'1': Accident
|
140 |
+
'2': CanceledRoute
|
141 |
+
'3': CanceledStop
|
142 |
+
'4': Delay
|
143 |
+
'5': Obstruction
|
144 |
+
'6': RailReplacementService
|
145 |
+
'7': TrafficJam
|
146 |
+
- name: tokens
|
147 |
+
sequence: string
|
148 |
+
- name: pos_tags
|
149 |
+
sequence: string
|
150 |
+
- name: lemma
|
151 |
+
sequence: string
|
152 |
+
- name: ner_tags
|
153 |
+
sequence:
|
154 |
+
class_label:
|
155 |
+
names:
|
156 |
+
'0': O
|
157 |
+
'1': B-date
|
158 |
+
'2': B-disaster-type
|
159 |
+
'3': B-distance
|
160 |
+
'4': B-duration
|
161 |
+
'5': B-event-cause
|
162 |
+
'6': B-location
|
163 |
+
'7': B-location-city
|
164 |
+
'8': B-location-route
|
165 |
+
'9': B-location-stop
|
166 |
+
'10': B-location-street
|
167 |
+
'11': B-money
|
168 |
+
'12': B-number
|
169 |
+
'13': B-organization
|
170 |
+
'14': B-organization-company
|
171 |
+
'15': B-org-position
|
172 |
+
'16': B-percent
|
173 |
+
'17': B-person
|
174 |
+
'18': B-set
|
175 |
+
'19': B-time
|
176 |
+
'20': B-trigger
|
177 |
+
'21': I-date
|
178 |
+
'22': I-disaster-type
|
179 |
+
'23': I-distance
|
180 |
+
'24': I-duration
|
181 |
+
'25': I-event-cause
|
182 |
+
'26': I-location
|
183 |
+
'27': I-location-city
|
184 |
+
'28': I-location-route
|
185 |
+
'29': I-location-stop
|
186 |
+
'30': I-location-street
|
187 |
+
'31': I-money
|
188 |
+
'32': I-number
|
189 |
+
'33': I-organization
|
190 |
+
'34': I-organization-company
|
191 |
+
'35': I-org-position
|
192 |
+
'36': I-percent
|
193 |
+
'37': I-person
|
194 |
+
'38': I-set
|
195 |
+
'39': I-time
|
196 |
+
'40': I-trigger
|
197 |
+
splits:
|
198 |
+
- name: train
|
199 |
+
num_bytes: 2023843
|
200 |
+
num_examples: 788
|
201 |
+
- name: test
|
202 |
+
num_bytes: 1232888
|
203 |
+
num_examples: 484
|
204 |
+
- name: validation
|
205 |
+
num_bytes: 395053
|
206 |
+
num_examples: 152
|
207 |
+
download_size: 8190212
|
208 |
+
dataset_size: 3651784
|
209 |
+
- config_name: el
|
210 |
+
features:
|
211 |
+
- name: id
|
212 |
+
dtype: string
|
213 |
+
- name: text
|
214 |
+
dtype: string
|
215 |
+
- name: entity_mentions
|
216 |
+
list:
|
217 |
+
- name: id
|
218 |
+
dtype: string
|
219 |
+
- name: text
|
220 |
+
dtype: string
|
221 |
+
- name: start
|
222 |
+
dtype: int32
|
223 |
+
- name: end
|
224 |
+
dtype: int32
|
225 |
+
- name: type
|
226 |
+
dtype:
|
227 |
+
class_label:
|
228 |
+
names:
|
229 |
+
'0': date
|
230 |
+
'1': disaster-type
|
231 |
+
'2': distance
|
232 |
+
'3': duration
|
233 |
+
'4': event-cause
|
234 |
+
'5': location
|
235 |
+
'6': location-city
|
236 |
+
'7': location-route
|
237 |
+
'8': location-stop
|
238 |
+
'9': location-street
|
239 |
+
'10': money
|
240 |
+
'11': number
|
241 |
+
'12': organization
|
242 |
+
'13': organization-company
|
243 |
+
'14': org-position
|
244 |
+
'15': percent
|
245 |
+
'16': person
|
246 |
+
'17': set
|
247 |
+
'18': time
|
248 |
+
'19': trigger
|
249 |
+
- name: refids
|
250 |
+
list:
|
251 |
+
- name: key
|
252 |
+
dtype: string
|
253 |
+
- name: value
|
254 |
+
dtype: string
|
255 |
+
splits:
|
256 |
+
- name: train
|
257 |
+
num_bytes: 1345663
|
258 |
+
num_examples: 2115
|
259 |
+
- name: test
|
260 |
+
num_bytes: 503058
|
261 |
+
num_examples: 623
|
262 |
+
- name: validation
|
263 |
+
num_bytes: 298974
|
264 |
+
num_examples: 494
|
265 |
+
download_size: 8190212
|
266 |
+
dataset_size: 2147695
|
267 |
+
- config_name: ner
|
268 |
+
features:
|
269 |
+
- name: id
|
270 |
+
dtype: string
|
271 |
+
- name: tokens
|
272 |
+
sequence: string
|
273 |
+
- name: ner_tags
|
274 |
+
sequence:
|
275 |
+
class_label:
|
276 |
+
names:
|
277 |
+
'0': O
|
278 |
+
'1': B-date
|
279 |
+
'2': B-disaster-type
|
280 |
+
'3': B-distance
|
281 |
+
'4': B-duration
|
282 |
+
'5': B-event-cause
|
283 |
+
'6': B-location
|
284 |
+
'7': B-location-city
|
285 |
+
'8': B-location-route
|
286 |
+
'9': B-location-stop
|
287 |
+
'10': B-location-street
|
288 |
+
'11': B-money
|
289 |
+
'12': B-number
|
290 |
+
'13': B-organization
|
291 |
+
'14': B-organization-company
|
292 |
+
'15': B-org-position
|
293 |
+
'16': B-percent
|
294 |
+
'17': B-person
|
295 |
+
'18': B-set
|
296 |
+
'19': B-time
|
297 |
+
'20': B-trigger
|
298 |
+
'21': I-date
|
299 |
+
'22': I-disaster-type
|
300 |
+
'23': I-distance
|
301 |
+
'24': I-duration
|
302 |
+
'25': I-event-cause
|
303 |
+
'26': I-location
|
304 |
+
'27': I-location-city
|
305 |
+
'28': I-location-route
|
306 |
+
'29': I-location-stop
|
307 |
+
'30': I-location-street
|
308 |
+
'31': I-money
|
309 |
+
'32': I-number
|
310 |
+
'33': I-organization
|
311 |
+
'34': I-organization-company
|
312 |
+
'35': I-org-position
|
313 |
+
'36': I-percent
|
314 |
+
'37': I-person
|
315 |
+
'38': I-set
|
316 |
+
'39': I-time
|
317 |
+
'40': I-trigger
|
318 |
+
splits:
|
319 |
+
- name: train
|
320 |
+
num_bytes: 1112606
|
321 |
+
num_examples: 2115
|
322 |
+
- name: test
|
323 |
+
num_bytes: 354244
|
324 |
+
num_examples: 623
|
325 |
+
- name: validation
|
326 |
+
num_bytes: 251031
|
327 |
+
num_examples: 494
|
328 |
+
download_size: 8190212
|
329 |
+
dataset_size: 1717881
|
330 |
+
- config_name: re
|
331 |
+
features:
|
332 |
+
- name: id
|
333 |
+
dtype: string
|
334 |
+
- name: tokens
|
335 |
+
sequence: string
|
336 |
+
- name: entities
|
337 |
+
sequence:
|
338 |
+
list: int32
|
339 |
+
- name: entity_roles
|
340 |
+
sequence:
|
341 |
+
class_label:
|
342 |
+
names:
|
343 |
+
'0': no_arg
|
344 |
+
'1': trigger
|
345 |
+
'2': location
|
346 |
+
'3': delay
|
347 |
+
'4': direction
|
348 |
+
'5': start_loc
|
349 |
+
'6': end_loc
|
350 |
+
'7': start_date
|
351 |
+
'8': end_date
|
352 |
+
'9': cause
|
353 |
+
'10': jam_length
|
354 |
+
'11': route
|
355 |
+
- name: entity_types
|
356 |
+
sequence:
|
357 |
+
class_label:
|
358 |
+
names:
|
359 |
+
'0': date
|
360 |
+
'1': disaster-type
|
361 |
+
'2': distance
|
362 |
+
'3': duration
|
363 |
+
'4': event-cause
|
364 |
+
'5': location
|
365 |
+
'6': location-city
|
366 |
+
'7': location-route
|
367 |
+
'8': location-stop
|
368 |
+
'9': location-street
|
369 |
+
'10': money
|
370 |
+
'11': number
|
371 |
+
'12': organization
|
372 |
+
'13': organization-company
|
373 |
+
'14': org-position
|
374 |
+
'15': percent
|
375 |
+
'16': person
|
376 |
+
'17': set
|
377 |
+
'18': time
|
378 |
+
'19': trigger
|
379 |
+
- name: event_type
|
380 |
+
dtype:
|
381 |
+
class_label:
|
382 |
+
names:
|
383 |
+
'0': O
|
384 |
+
'1': Accident
|
385 |
+
'2': CanceledRoute
|
386 |
+
'3': CanceledStop
|
387 |
+
'4': Delay
|
388 |
+
'5': Obstruction
|
389 |
+
'6': RailReplacementService
|
390 |
+
'7': TrafficJam
|
391 |
+
- name: entity_ids
|
392 |
+
sequence: string
|
393 |
+
splits:
|
394 |
+
- name: train
|
395 |
+
num_bytes: 1048457
|
396 |
+
num_examples: 1199
|
397 |
+
- name: test
|
398 |
+
num_bytes: 501336
|
399 |
+
num_examples: 609
|
400 |
+
- name: validation
|
401 |
+
num_bytes: 179001
|
402 |
+
num_examples: 228
|
403 |
+
download_size: 8190212
|
404 |
+
dataset_size: 1728794
|
405 |
---
|
406 |
|
407 |
# Dataset Card for "MobIE"
|
mobie.py
CHANGED
@@ -77,6 +77,62 @@ def simplify_dict(d, remove_attribute=True):
|
|
77 |
return d
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
class Mobie(datasets.GeneratorBasedBuilder):
|
81 |
"""MobIE is a German-language dataset which is human-annotated with 20 coarse- and fine-grained entity types and entity linking information for geographically linkable entities"""
|
82 |
|
@@ -139,17 +195,16 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
139 |
]
|
140 |
}
|
141 |
]
|
|
|
|
|
142 |
if self.config.name == "ner":
|
143 |
-
prefixes = ["B", "I"]
|
144 |
-
|
145 |
-
names = ["O"] + [f"{prefix}-{label}" for prefix in prefixes for label in labels]
|
146 |
features = datasets.Features(
|
147 |
{
|
148 |
"id": datasets.Value("string"),
|
149 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
150 |
"ner_tags": datasets.Sequence(
|
151 |
datasets.features.ClassLabel(
|
152 |
-
names=
|
153 |
)
|
154 |
),
|
155 |
}
|
@@ -224,7 +279,11 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
224 |
]
|
225 |
),
|
226 |
}
|
227 |
-
]
|
|
|
|
|
|
|
|
|
228 |
}
|
229 |
)
|
230 |
else:
|
@@ -294,6 +353,8 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
294 |
raw = f.read()
|
295 |
|
296 |
for doc in decode_stacked(raw):
|
|
|
|
|
297 |
text = doc["text"]["string"]
|
298 |
iterable = doc["sentences"]["array"] if sentence_level else [doc]
|
299 |
for s in iterable:
|
@@ -314,6 +375,8 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
314 |
with open(filepath, encoding="utf-8") as f:
|
315 |
for line in f:
|
316 |
doc = json.loads(line)
|
|
|
|
|
317 |
doc = simplify_dict(doc)
|
318 |
text = doc["text"]
|
319 |
iterable = doc["sentences"] if sentence_level else [doc]
|
@@ -323,15 +386,24 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
323 |
mobie_cms = sentence["conceptMentions"]
|
324 |
entity_mentions = []
|
325 |
for cm in mobie_cms:
|
326 |
-
|
327 |
-
|
328 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
entity_mentions.append({
|
330 |
-
"id": cm["id"],
|
331 |
"text": cm_text,
|
332 |
-
"start":
|
333 |
-
"end":
|
|
|
|
|
334 |
"type": cm["type"],
|
|
|
335 |
"refids": [
|
336 |
{
|
337 |
"key": refid["key"],
|
@@ -339,56 +411,41 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
339 |
} for refid in cm["refids"]
|
340 |
] if "refids" in cm and cm["refids"] else []
|
341 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
342 |
if self.config.name == "el":
|
343 |
-
# TODO use osm_id as entity id?
|
344 |
yield sentence_id, {
|
345 |
"id": sentence_id,
|
346 |
"text": text,
|
|
|
347 |
"entity_mentions": entity_mentions
|
348 |
}
|
349 |
elif self.config.name == "re":
|
350 |
mobie_rms = sentence["relationMentions"]
|
351 |
if not mobie_rms:
|
352 |
continue
|
353 |
-
tokens = [text[token["span"]["start"]:token["span"]["end"]] for token in sentence["tokens"]]
|
354 |
entities = []
|
355 |
entity_types = []
|
356 |
entity_ids = []
|
357 |
-
for cm in
|
358 |
-
|
359 |
-
start = -1
|
360 |
-
end = -1
|
361 |
-
for idx, token in enumerate(sentence["tokens"]):
|
362 |
-
if token["span"]["start"] == cm["span"]["start"]:
|
363 |
-
start = idx
|
364 |
-
if token["span"]["end"] == cm["span"]["end"]:
|
365 |
-
end = idx
|
366 |
-
assert start != -1 and end != -1, f"Could not find token offsets for {cm['id']}"
|
367 |
-
entities.append([start, end])
|
368 |
entity_types.append(cm["type"])
|
369 |
-
|
370 |
-
for refid in cm["refids"]:
|
371 |
-
if refid["key"] == "osm_id":
|
372 |
-
entity_ids.append(refid["value"])
|
373 |
-
found_osm_id = True
|
374 |
-
break
|
375 |
-
if not found_osm_id:
|
376 |
-
entity_ids.append("NIL")
|
377 |
for rm in mobie_rms:
|
378 |
entity_roles = ["no_arg"] * len(entities)
|
379 |
for arg in rm["args"]:
|
380 |
entity_role = arg["role"]
|
381 |
-
# Matching via ids does not work, need to match via
|
382 |
-
# Find token offsets for entity mentions
|
383 |
-
start = -1
|
384 |
-
end = -1
|
385 |
cm = arg["conceptMention"]
|
386 |
-
|
387 |
-
if token["span"]["start"] == cm["span"]["start"]:
|
388 |
-
start = idx
|
389 |
-
if token["span"]["end"] == cm["span"]["end"]:
|
390 |
-
end = idx
|
391 |
-
assert start != -1 and end != -1, f"Could not find token offsets for {cm['id']}"
|
392 |
entity_idx = -1
|
393 |
for idx, entity in enumerate(entities):
|
394 |
if entity == [start, end]:
|
@@ -420,21 +477,32 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
420 |
break
|
421 |
if trigger is None:
|
422 |
continue
|
423 |
-
|
424 |
-
|
425 |
-
|
|
|
426 |
args = []
|
427 |
for arg in rm["args"]:
|
428 |
if arg["role"] == "trigger":
|
429 |
continue
|
430 |
-
|
431 |
-
|
432 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
433 |
args.append({
|
434 |
"id": arg["conceptMention"]["id"],
|
435 |
"text": arg_text,
|
436 |
-
"start": arg_start
|
437 |
-
"end": arg_end
|
|
|
|
|
438 |
"role": arg["role"],
|
439 |
"type": arg["conceptMention"]["type"]
|
440 |
})
|
@@ -443,8 +511,10 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
443 |
"trigger": {
|
444 |
"id": trigger["conceptMention"]["id"],
|
445 |
"text": trigger_text,
|
446 |
-
"start": trigger_start
|
447 |
-
"end": trigger_end
|
|
|
|
|
448 |
},
|
449 |
"arguments": args,
|
450 |
"event_type": rm["name"]
|
@@ -453,7 +523,11 @@ class Mobie(datasets.GeneratorBasedBuilder):
|
|
453 |
"id": sentence_id,
|
454 |
"text": text,
|
455 |
"entity_mentions": entity_mentions,
|
456 |
-
"event_mentions": event_mentions
|
|
|
|
|
|
|
|
|
457 |
}
|
458 |
else:
|
459 |
raise ValueError("Invalid configuration name")
|
|
|
77 |
return d
|
78 |
|
79 |
|
80 |
+
def find_concept_mention_token_offsets(sentence, concept_mention):
|
81 |
+
arg_char_start = concept_mention["span"]["start"]
|
82 |
+
arg_char_end = concept_mention["span"]["end"]
|
83 |
+
arg_start = -1
|
84 |
+
arg_end = -1
|
85 |
+
for idx, token in enumerate(sentence["tokens"]):
|
86 |
+
if token["span"]["start"] == arg_char_start:
|
87 |
+
arg_start = idx
|
88 |
+
if token["span"]["end"] == arg_char_end:
|
89 |
+
arg_end = idx+1
|
90 |
+
assert arg_start != -1 and arg_end != -1, f"Could not find token offsets for {concept_mention['id']}"
|
91 |
+
return arg_start, arg_end
|
92 |
+
|
93 |
+
|
94 |
+
def fix_doc(doc):
|
95 |
+
"""Fix document with faulty spans. REMOVE IF FIXED IN DATASET!"""
|
96 |
+
if doc["id"] == "1111185208647274501":
|
97 |
+
offset = 0
|
98 |
+
# Fix token spans
|
99 |
+
tokens = doc["tokens"]["array"]
|
100 |
+
for idx, token in enumerate(tokens):
|
101 |
+
if idx == 6:
|
102 |
+
offset += 1
|
103 |
+
token["span"]["start"] -= offset
|
104 |
+
if idx == 3:
|
105 |
+
offset += 1
|
106 |
+
token["span"]["end"] -= offset
|
107 |
+
# Fix concept mentions and relation mentions
|
108 |
+
offset = 0
|
109 |
+
concept_mentions = doc["conceptMentions"]["array"]
|
110 |
+
for idx, cm in enumerate(concept_mentions):
|
111 |
+
if idx == 1 or idx == 2:
|
112 |
+
offset += 1
|
113 |
+
cm["span"]["start"] -= offset
|
114 |
+
cm["span"]["end"] -= offset
|
115 |
+
rm = doc["relationMentions"]["array"][0]
|
116 |
+
rm["span"]["start"] -= 1
|
117 |
+
rm["span"]["end"] -= 2
|
118 |
+
rm["args"]["array"][0]["conceptMention"]["span"]["start"] -= 1
|
119 |
+
rm["args"]["array"][0]["conceptMention"]["span"]["end"] -= 1
|
120 |
+
rm["args"]["array"][1]["conceptMention"]["span"]["start"] -= 2
|
121 |
+
rm["args"]["array"][1]["conceptMention"]["span"]["end"] -= 2
|
122 |
+
|
123 |
+
doc["tokens"]["array"] = tokens
|
124 |
+
doc["sentences"]["array"][0]["span"]["end"] -= 2
|
125 |
+
doc["sentences"]["array"][0]["tokens"]["array"] = tokens[:20]
|
126 |
+
doc["sentences"]["array"][0]["conceptMentions"]["array"] = concept_mentions[:-1]
|
127 |
+
doc["sentences"]["array"][0]["relationMentions"]["array"] = [rm]
|
128 |
+
doc["sentences"]["array"][1]["span"]["start"] -= 2
|
129 |
+
doc["sentences"]["array"][1]["span"]["end"] -= 2
|
130 |
+
doc["sentences"]["array"][1]["tokens"]["array"] = tokens[20:]
|
131 |
+
doc["sentences"]["array"][1]["conceptMentions"]["array"] = [concept_mentions[-1]]
|
132 |
+
print("Fixed spans")
|
133 |
+
return doc
|
134 |
+
|
135 |
+
|
136 |
class Mobie(datasets.GeneratorBasedBuilder):
|
137 |
"""MobIE is a German-language dataset which is human-annotated with 20 coarse- and fine-grained entity types and entity linking information for geographically linkable entities"""
|
138 |
|
|
|
195 |
]
|
196 |
}
|
197 |
]
|
198 |
+
prefixes = ["B", "I"]
|
199 |
+
ner_tags = ["O"] + [f"{prefix}-{label}" for prefix in prefixes for label in labels]
|
200 |
if self.config.name == "ner":
|
|
|
|
|
|
|
201 |
features = datasets.Features(
|
202 |
{
|
203 |
"id": datasets.Value("string"),
|
204 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
205 |
"ner_tags": datasets.Sequence(
|
206 |
datasets.features.ClassLabel(
|
207 |
+
names=ner_tags
|
208 |
)
|
209 |
),
|
210 |
}
|
|
|
279 |
]
|
280 |
),
|
281 |
}
|
282 |
+
],
|
283 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
284 |
+
"pos_tags": datasets.Sequence(datasets.Value("string")),
|
285 |
+
"lemma": datasets.Sequence(datasets.Value("string")),
|
286 |
+
"ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=ner_tags))
|
287 |
}
|
288 |
)
|
289 |
else:
|
|
|
353 |
raw = f.read()
|
354 |
|
355 |
for doc in decode_stacked(raw):
|
356 |
+
if doc["id"] == "1111185208647274501":
|
357 |
+
doc = fix_doc(doc)
|
358 |
text = doc["text"]["string"]
|
359 |
iterable = doc["sentences"]["array"] if sentence_level else [doc]
|
360 |
for s in iterable:
|
|
|
375 |
with open(filepath, encoding="utf-8") as f:
|
376 |
for line in f:
|
377 |
doc = json.loads(line)
|
378 |
+
if doc["id"] == "1111185208647274501":
|
379 |
+
doc = fix_doc(doc)
|
380 |
doc = simplify_dict(doc)
|
381 |
text = doc["text"]
|
382 |
iterable = doc["sentences"] if sentence_level else [doc]
|
|
|
386 |
mobie_cms = sentence["conceptMentions"]
|
387 |
entity_mentions = []
|
388 |
for cm in mobie_cms:
|
389 |
+
char_start = cm["span"]["start"]
|
390 |
+
char_end = cm["span"]["end"]
|
391 |
+
# Find token offsets for entity mentions
|
392 |
+
start, end = find_concept_mention_token_offsets(sentence, cm)
|
393 |
+
cm_text = text[char_start:char_end]
|
394 |
+
entity_id = "NIL"
|
395 |
+
for refid in cm["refids"]:
|
396 |
+
if refid["key"] == "osm_id":
|
397 |
+
entity_id = refid["value"]
|
398 |
+
break
|
399 |
entity_mentions.append({
|
|
|
400 |
"text": cm_text,
|
401 |
+
"start": start,
|
402 |
+
"end": end,
|
403 |
+
"char_start": char_start - sentence_start,
|
404 |
+
"char_end": char_end - sentence_start,
|
405 |
"type": cm["type"],
|
406 |
+
"entity_id": entity_id,
|
407 |
"refids": [
|
408 |
{
|
409 |
"key": refid["key"],
|
|
|
411 |
} for refid in cm["refids"]
|
412 |
] if "refids" in cm and cm["refids"] else []
|
413 |
})
|
414 |
+
tokens = []
|
415 |
+
lemmas = []
|
416 |
+
ner_tags = []
|
417 |
+
pos_tags = []
|
418 |
+
for token in sentence["tokens"]:
|
419 |
+
token_text = text[token["span"]["start"]:token["span"]["end"]]
|
420 |
+
tokens.append(token_text)
|
421 |
+
lemmas.append(token["lemma"])
|
422 |
+
ner_tags.append(token["ner"])
|
423 |
+
pos_tags.append(token["posTag"])
|
424 |
if self.config.name == "el":
|
|
|
425 |
yield sentence_id, {
|
426 |
"id": sentence_id,
|
427 |
"text": text,
|
428 |
+
"tokens": tokens,
|
429 |
"entity_mentions": entity_mentions
|
430 |
}
|
431 |
elif self.config.name == "re":
|
432 |
mobie_rms = sentence["relationMentions"]
|
433 |
if not mobie_rms:
|
434 |
continue
|
|
|
435 |
entities = []
|
436 |
entity_types = []
|
437 |
entity_ids = []
|
438 |
+
for cm in entity_mentions:
|
439 |
+
entities.append([cm["start"], cm["end"]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
440 |
entity_types.append(cm["type"])
|
441 |
+
entity_ids.append(cm["entity_id"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
for rm in mobie_rms:
|
443 |
entity_roles = ["no_arg"] * len(entities)
|
444 |
for arg in rm["args"]:
|
445 |
entity_role = arg["role"]
|
446 |
+
# Matching via ids does not work, need to match via positions
|
|
|
|
|
|
|
447 |
cm = arg["conceptMention"]
|
448 |
+
start, end = find_concept_mention_token_offsets(sentence, cm)
|
|
|
|
|
|
|
|
|
|
|
449 |
entity_idx = -1
|
450 |
for idx, entity in enumerate(entities):
|
451 |
if entity == [start, end]:
|
|
|
477 |
break
|
478 |
if trigger is None:
|
479 |
continue
|
480 |
+
trigger_char_start = trigger["conceptMention"]["span"]["start"]
|
481 |
+
trigger_char_end = trigger["conceptMention"]["span"]["end"]
|
482 |
+
trigger_start, trigger_end = find_concept_mention_token_offsets(sentence, trigger["conceptMention"])
|
483 |
+
trigger_text = text[trigger_char_start:trigger_char_end]
|
484 |
args = []
|
485 |
for arg in rm["args"]:
|
486 |
if arg["role"] == "trigger":
|
487 |
continue
|
488 |
+
arg_char_start = arg["conceptMention"]["span"]["start"]
|
489 |
+
arg_char_end = arg["conceptMention"]["span"]["end"]
|
490 |
+
arg_start = -1
|
491 |
+
arg_end = -1
|
492 |
+
for idx, token in enumerate(sentence["tokens"]):
|
493 |
+
if token["span"]["start"] == arg_char_start:
|
494 |
+
arg_start = idx
|
495 |
+
if token["span"]["end"] == arg_char_end:
|
496 |
+
arg_end = idx+1
|
497 |
+
assert arg_start != -1 and arg_end != -1, f"Could not find token offsets for {arg['conceptMention']['id']}"
|
498 |
+
arg_text = text[arg_char_start:arg_char_end]
|
499 |
args.append({
|
500 |
"id": arg["conceptMention"]["id"],
|
501 |
"text": arg_text,
|
502 |
+
"start": arg_start,
|
503 |
+
"end": arg_end,
|
504 |
+
"char_start": arg_char_start - sentence_start,
|
505 |
+
"char_end": arg_char_end - sentence_start,
|
506 |
"role": arg["role"],
|
507 |
"type": arg["conceptMention"]["type"]
|
508 |
})
|
|
|
511 |
"trigger": {
|
512 |
"id": trigger["conceptMention"]["id"],
|
513 |
"text": trigger_text,
|
514 |
+
"start": trigger_start,
|
515 |
+
"end": trigger_end,
|
516 |
+
"char_start": trigger_char_start - sentence_start,
|
517 |
+
"char_end": trigger_char_end - sentence_start
|
518 |
},
|
519 |
"arguments": args,
|
520 |
"event_type": rm["name"]
|
|
|
523 |
"id": sentence_id,
|
524 |
"text": text,
|
525 |
"entity_mentions": entity_mentions,
|
526 |
+
"event_mentions": event_mentions,
|
527 |
+
"tokens": tokens,
|
528 |
+
"pos_tags": pos_tags,
|
529 |
+
"lemma": lemmas,
|
530 |
+
"ner_tags": ner_tags
|
531 |
}
|
532 |
else:
|
533 |
raise ValueError("Invalid configuration name")
|