phucdev commited on
Commit
f28bfbf
1 Parent(s): 6b1bef2

Add configs for entity linking, relation extraction and event extraction (sentence level)

Browse files
Files changed (1) hide show
  1. mobie.py +336 -88
mobie.py CHANGED
@@ -16,6 +16,7 @@
16
  MobIE is a German-language dataset which is human-annotated with 20 coarse- and fine-grained entity types and entity linking information for geographically linkable entities. The dataset consists of 3,232 social media texts and traffic reports with 91K tokens, and contains 20.5K annotated entities, 13.1K of which are linked to a knowledge base. A subset of the dataset is human-annotated with seven mobility-related, n-ary relation types, while the remaining documents are annotated using a weakly-supervised labeling approach implemented with the Snorkel framework. The dataset combines annotations for NER, EL and RE, and thus can be used for joint and multi-task learning of these fundamental information extraction tasks."""
17
 
18
  import re
 
19
  from json import JSONDecodeError, JSONDecoder
20
 
21
  import datasets
@@ -51,6 +52,30 @@ _URLs = {
51
  }
52
 
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  class Mobie(datasets.GeneratorBasedBuilder):
55
  """MobIE is a German-language dataset which is human-annotated with 20 coarse- and fine-grained entity types and entity linking information for geographically linkable entities"""
56
 
@@ -68,63 +93,154 @@ class Mobie(datasets.GeneratorBasedBuilder):
68
  # data = datasets.load_dataset('my_dataset', 'first_domain')
69
  # data = datasets.load_dataset('my_dataset', 'second_domain')
70
  BUILDER_CONFIGS = [
71
- datasets.BuilderConfig(name="mobie-v1_20210811", version=VERSION, description="MobIE V1"),
 
 
 
72
  ]
 
73
 
74
  def _info(self):
75
- features = datasets.Features(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  {
77
  "id": datasets.Value("string"),
78
- "tokens": datasets.Sequence(datasets.Value("string")),
79
- "ner_tags": datasets.Sequence(
80
- datasets.features.ClassLabel(
81
- names=[
82
- "O",
83
- "B-date",
84
- "I-date",
85
- "B-disaster-type",
86
- "I-disaster-type",
87
- "B-distance",
88
- "I-distance",
89
- "B-duration",
90
- "I-duration",
91
- "B-event-cause",
92
- "I-event-cause",
93
- "B-location",
94
- "I-location",
95
- "B-location-city",
96
- "I-location-city",
97
- "B-location-route",
98
- "I-location-route",
99
- "B-location-stop",
100
- "I-location-stop",
101
- "B-location-street",
102
- "I-location-street",
103
- "B-money",
104
- "I-money",
105
- "B-number",
106
- "I-number",
107
- "B-organization",
108
- "I-organization",
109
- "B-organization-company",
110
- "I-organization-company",
111
- "B-org-position",
112
- "I-org-position",
113
- "B-percent",
114
- "I-percent",
115
- "B-person",
116
- "I-person",
117
- "B-set",
118
- "I-set",
119
- "B-time",
120
- "I-time",
121
- "B-trigger",
122
- "I-trigger",
123
- ]
124
- )
125
- ),
126
  }
127
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  return datasets.DatasetInfo(
130
  # This is the description that will appear on the datasets page.
@@ -171,41 +287,173 @@ class Mobie(datasets.GeneratorBasedBuilder):
171
  def _generate_examples(self, filepath, split):
172
  """Yields examples."""
173
 
174
- NOT_WHITESPACE = re.compile(r"[^\s]")
175
-
176
- def decode_stacked(document, pos=0, decoder=JSONDecoder()):
177
- while True:
178
- match = NOT_WHITESPACE.search(document, pos)
179
- if not match:
180
- return
181
- pos = match.start()
182
- try:
183
- obj, pos = decoder.raw_decode(document, pos)
184
- except JSONDecodeError:
185
- raise
186
- yield obj
187
-
188
- with open(filepath, encoding="utf-8") as f:
189
- raw = f.read()
190
-
191
- for doc in decode_stacked(raw):
192
- text = doc["text"]["string"]
193
-
194
-
195
- entity_starts = []
196
- for m in doc["conceptMentions"]["array"]:
197
- entity_starts.append(m["span"]["start"])
198
- for s in doc["sentences"]["array"]:
199
- toks = []
200
- lbls = []
201
- sid = s["id"]
202
- for x in s["tokens"]["array"]:
203
- toks.append(text[x["span"]["start"] : x["span"]["end"]])
204
- lbls.append(x["ner"]["string"])
205
-
206
- yield sid, {
207
- "id": sid,
208
- "tokens": toks,
209
- "ner_tags": lbls,
210
- }
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  MobIE is a German-language dataset which is human-annotated with 20 coarse- and fine-grained entity types and entity linking information for geographically linkable entities. The dataset consists of 3,232 social media texts and traffic reports with 91K tokens, and contains 20.5K annotated entities, 13.1K of which are linked to a knowledge base. A subset of the dataset is human-annotated with seven mobility-related, n-ary relation types, while the remaining documents are annotated using a weakly-supervised labeling approach implemented with the Snorkel framework. The dataset combines annotations for NER, EL and RE, and thus can be used for joint and multi-task learning of these fundamental information extraction tasks."""
17
 
18
  import re
19
+ import json
20
  from json import JSONDecodeError, JSONDecoder
21
 
22
  import datasets
 
52
  }
53
 
54
 
55
+ def simplify_dict(d, remove_attribute=True):
56
+ if isinstance(d, dict):
57
+ new_dict = {}
58
+ for k, v in d.items():
59
+ if remove_attribute and k == "attributes":
60
+ continue
61
+ if isinstance(v, dict) and len(v) == 1:
62
+ if "string" in v:
63
+ new_dict[k] = v["string"]
64
+ elif "map" in v:
65
+ new_dict[k] = v["map"]
66
+ elif "array" in v:
67
+ new_dict[k] = simplify_dict(v["array"])
68
+ else:
69
+ new_dict[k] = simplify_dict(v)
70
+ else:
71
+ new_dict[k] = simplify_dict(v)
72
+ return new_dict
73
+ elif isinstance(d, list):
74
+ return [simplify_dict(x) for x in d]
75
+ else:
76
+ return d
77
+
78
+
79
  class Mobie(datasets.GeneratorBasedBuilder):
80
  """MobIE is a German-language dataset which is human-annotated with 20 coarse- and fine-grained entity types and entity linking information for geographically linkable entities"""
81
 
 
93
  # data = datasets.load_dataset('my_dataset', 'first_domain')
94
  # data = datasets.load_dataset('my_dataset', 'second_domain')
95
  BUILDER_CONFIGS = [
96
+ datasets.BuilderConfig(name="ner", version=VERSION, description="MobIE V1 NER"),
97
+ datasets.BuilderConfig(name="el", version=VERSION, description="MobIE V1 Entity Linking"),
98
+ datasets.BuilderConfig(name="re", version=VERSION, description="MobIE V1 Relation Extraction"),
99
+ datasets.BuilderConfig(name="ee", version=VERSION, description="MobIE V1 Event Extraction"),
100
  ]
101
+ DEFAULT_CONFIG_NAME = "ner"
102
 
103
  def _info(self):
104
+ labels = [
105
+ "date",
106
+ "disaster-type",
107
+ "distance",
108
+ "duration",
109
+ "event-cause",
110
+ "location",
111
+ "location-city",
112
+ "location-route",
113
+ "location-stop",
114
+ "location-street",
115
+ "money",
116
+ "number",
117
+ "organization",
118
+ "organization-company",
119
+ "org-position",
120
+ "percent",
121
+ "person",
122
+ "set",
123
+ "time",
124
+ "trigger",
125
+ ]
126
+ concept_mentions = [
127
  {
128
  "id": datasets.Value("string"),
129
+ "text": datasets.Value("string"),
130
+ "start": datasets.Value("int32"),
131
+ "end": datasets.Value("int32"),
132
+ "type": datasets.features.ClassLabel(names=labels),
133
+ "refids": [
134
+ {
135
+ "key": datasets.Value("string"),
136
+ "value": datasets.Value("string")
137
+ }
138
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  }
140
+ ]
141
+ if self.config.name == "ner":
142
+ prefixes = ["B", "I"]
143
+
144
+ names = ["O"] + [f"{prefix}-{label}" for prefix in prefixes for label in labels]
145
+ features = datasets.Features(
146
+ {
147
+ "id": datasets.Value("string"),
148
+ "tokens": datasets.Sequence(datasets.Value("string")),
149
+ "ner_tags": datasets.Sequence(
150
+ datasets.features.ClassLabel(
151
+ names=names
152
+ )
153
+ ),
154
+ }
155
+ )
156
+ elif self.config.name == "el":
157
+ features = datasets.Features(
158
+ {
159
+ "id": datasets.Value("string"),
160
+ "text": datasets.Value("string"),
161
+ "concept_mentions": concept_mentions
162
+ }
163
+ )
164
+ elif self.config.name == "re":
165
+ features = datasets.Features(
166
+ {
167
+ "id": datasets.Value("string"),
168
+ "text": datasets.Value("string"),
169
+ "concept_mentions": concept_mentions,
170
+ "relation_mentions": [
171
+ {
172
+ "id": datasets.Value("string"),
173
+ "trigger": {
174
+ "id": datasets.Value("string"),
175
+ "text": datasets.Value("string"),
176
+ "start": datasets.Value("int32"),
177
+ "end": datasets.Value("int32")
178
+ },
179
+ "argument": {
180
+ "id": datasets.Value("string"),
181
+ "text": datasets.Value("string"),
182
+ "start": datasets.Value("int32"),
183
+ "end": datasets.Value("int32")
184
+ },
185
+ "type": datasets.features.ClassLabel(
186
+ names=[
187
+ "no_arg", "location", "delay", "direction", "start_loc", "end_loc",
188
+ "start_date", "end_date", "cause", "jam_length", "route"
189
+ ]
190
+ ),
191
+ "event_type": datasets.features.ClassLabel(
192
+ names=[
193
+ "O", "Accident", "CanceledRoute", "CanceledStop", "Delay", "Obstruction",
194
+ "RailReplacementService", "TrafficJam"
195
+ ]
196
+ )
197
+ }
198
+ ]
199
+ }
200
+ )
201
+ elif self.config.name == "ee":
202
+ # Inspired by https://github.com/nlpcl-lab/ace2005-preprocessing?tab=readme-ov-file#format
203
+ features = datasets.Features(
204
+ {
205
+ "id": datasets.Value("string"),
206
+ "text": datasets.Value("string"),
207
+ "entity_mentions": concept_mentions,
208
+ "event_mentions": [
209
+ {
210
+ "id": datasets.Value("string"),
211
+ "trigger": {
212
+ "id": datasets.Value("string"),
213
+ "text": datasets.Value("string"),
214
+ "start": datasets.Value("int32"),
215
+ "end": datasets.Value("int32"),
216
+ },
217
+ "arguments": [{
218
+ "id": datasets.Value("string"),
219
+ "text": datasets.Value("string"),
220
+ "start": datasets.Value("int32"),
221
+ "end": datasets.Value("int32"),
222
+ "role": datasets.features.ClassLabel(
223
+ names=[
224
+ "no_arg", "location", "delay", "direction", "start_loc", "end_loc",
225
+ "start_date", "end_date", "cause", "jam_length", "route"
226
+ ]
227
+ ),
228
+ "type": datasets.features.ClassLabel(
229
+ names=labels
230
+ )
231
+ }],
232
+ "event_type": datasets.features.ClassLabel(
233
+ names=[
234
+ "O", "Accident", "CanceledRoute", "CanceledStop", "Delay", "Obstruction",
235
+ "RailReplacementService", "TrafficJam"
236
+ ]
237
+ ),
238
+ }
239
+ ]
240
+ }
241
+ )
242
+ else:
243
+ raise ValueError("Invalid configuration name")
244
 
245
  return datasets.DatasetInfo(
246
  # This is the description that will appear on the datasets page.
 
287
  def _generate_examples(self, filepath, split):
288
  """Yields examples."""
289
 
290
+ if self.config.name == "ner":
291
+ NOT_WHITESPACE = re.compile(r"[^\s]")
292
+
293
+ def decode_stacked(document, pos=0, decoder=JSONDecoder()):
294
+ while True:
295
+ match = NOT_WHITESPACE.search(document, pos)
296
+ if not match:
297
+ return
298
+ pos = match.start()
299
+ try:
300
+ obj, pos = decoder.raw_decode(document, pos)
301
+ except JSONDecodeError:
302
+ raise
303
+ yield obj
304
+
305
+ with open(filepath, encoding="utf-8") as f:
306
+ raw = f.read()
307
+
308
+ for doc in decode_stacked(raw):
309
+ text = doc["text"]["string"]
310
+
311
+ # entity_starts = []
312
+ # for m in doc["conceptMentions"]["array"]:
313
+ # entity_starts.append(m["span"]["start"])
314
+ for s in doc["sentences"]["array"]:
315
+ toks = []
316
+ lbls = []
317
+ sid = s["id"]
318
+ for x in s["tokens"]["array"]:
319
+ toks.append(text[x["span"]["start"]: x["span"]["end"]])
320
+ lbls.append(x["ner"]["string"])
 
 
 
 
 
 
321
 
322
+ yield sid, {
323
+ "id": sid,
324
+ "tokens": toks,
325
+ "ner_tags": lbls,
326
+ }
327
+ else:
328
+ example_idx = 0
329
+ with open(filepath, encoding="utf-8") as f:
330
+ for line in f:
331
+ doc = json.loads(line)
332
+ doc = simplify_dict(doc)
333
+ text = doc["text"]
334
+ for sentence in doc["sentences"]:
335
+ sentence_id = sentence["id"]
336
+ sentence_start = sentence["span"]["start"]
337
+ mobie_cms = sentence["conceptMentions"]
338
+ concept_mentions = []
339
+ for cm in mobie_cms:
340
+ cm_start = cm["span"]["start"]
341
+ cm_end = cm["span"]["end"]
342
+ cm_text = text[cm_start:cm_end]
343
+ concept_mentions.append({
344
+ "id": cm["id"],
345
+ "text": cm_text,
346
+ "start": cm_start - sentence_start,
347
+ "end": cm_end - sentence_start,
348
+ "type": cm["type"],
349
+ "refids": [
350
+ {
351
+ "key": refid["key"],
352
+ "value": refid["value"]
353
+ } for refid in cm["refids"]
354
+ ] if "refids" in cm and cm["refids"] else []
355
+ })
356
+ if self.config.name == "el":
357
+ yield sentence_id, {
358
+ "id": sentence_id,
359
+ "text": text,
360
+ "concept_mentions": concept_mentions
361
+ }
362
+ elif self.config.name == "re":
363
+ mobie_rms = sentence["relationMentions"]
364
+ if not mobie_rms:
365
+ continue
366
+ relation_mentions = []
367
+ for rm in mobie_rms:
368
+ # Find trigger in rm["args"]
369
+ trigger = None
370
+ for arg in rm["args"]:
371
+ if arg["role"] == "trigger":
372
+ trigger = arg
373
+ break
374
+ if trigger is None:
375
+ continue
376
+ trigger_start = trigger["conceptMention"]["span"]["start"]
377
+ trigger_end = trigger["conceptMention"]["span"]["end"]
378
+ trigger_text = text[trigger_start:trigger_end]
379
+ for arg in rm["args"]:
380
+ if arg["role"] == "trigger":
381
+ continue
382
+ argument_start = arg["conceptMention"]["span"]["start"]
383
+ argument_end = arg["conceptMention"]["span"]["end"]
384
+ argument_text = text[argument_start:argument_end]
385
+ relation_mentions.append({
386
+ "id": f"{sentence_id}-{example_idx}",
387
+ "trigger": {
388
+ "id": trigger["conceptMention"]["id"],
389
+ "text": trigger_text,
390
+ "start": trigger_start - sentence_start,
391
+ "end": trigger_end - sentence_start
392
+ },
393
+ "argument": {
394
+ "id": arg["conceptMention"]["id"],
395
+ "text": argument_text,
396
+ "start": argument_start - sentence_start,
397
+ "end": argument_end - sentence_start
398
+ },
399
+ "type": arg["role"],
400
+ "event_type": rm["name"]
401
+ })
402
+ yield f"{sentence_id}_{example_idx}", {
403
+ "id": f"{sentence_id}_{example_idx}",
404
+ "text": text,
405
+ "concept_mentions": concept_mentions,
406
+ "relation_mentions": relation_mentions
407
+ }
408
+ example_idx += 1
409
+ elif self.config.name == "ee":
410
+ mobie_rms = sentence["relationMentions"]
411
+ if not mobie_rms:
412
+ continue
413
+ event_mentions = []
414
+ for rm in mobie_rms:
415
+ # Find trigger in rm["args"]
416
+ trigger = None
417
+ for arg in rm["args"]:
418
+ if arg["role"] == "trigger":
419
+ trigger = arg
420
+ break
421
+ if trigger is None:
422
+ continue
423
+ trigger_start = trigger["conceptMention"]["span"]["start"]
424
+ trigger_end = trigger["conceptMention"]["span"]["end"]
425
+ trigger_text = text[trigger_start:trigger_end]
426
+ args = []
427
+ for arg in rm["args"]:
428
+ if arg["role"] == "trigger":
429
+ continue
430
+ arg_start = arg["conceptMention"]["span"]["start"]
431
+ arg_end = arg["conceptMention"]["span"]["end"]
432
+ arg_text = text[arg_start:arg_end]
433
+ args.append({
434
+ "id": arg["conceptMention"]["id"],
435
+ "text": arg_text,
436
+ "start": arg_start - sentence_start,
437
+ "end": arg_end - sentence_start,
438
+ "role": arg["role"],
439
+ "type": arg["conceptMention"]["type"]
440
+ })
441
+ event_mentions.append({
442
+ "id": rm["id"],
443
+ "trigger": {
444
+ "id": trigger["conceptMention"]["id"],
445
+ "text": trigger_text,
446
+ "start": trigger_start - sentence_start,
447
+ "end": trigger_end - sentence_start
448
+ },
449
+ "arguments": args,
450
+ "event_type": rm["name"]
451
+ })
452
+ yield sentence_id, {
453
+ "id": sentence_id,
454
+ "text": text,
455
+ "entity_mentions": concept_mentions,
456
+ "event_mentions": event_mentions
457
+ }
458
+ else:
459
+ raise ValueError("Invalid configuration name")