phucdev commited on
Commit
3ac9938
1 Parent(s): 75b9352

Update science_ie.py with subtask configurations and sentence splitting

Browse files
Files changed (1) hide show
  1. science_ie.py +182 -74
science_ie.py CHANGED
@@ -17,6 +17,7 @@
17
  import glob
18
  import datasets
19
 
 
20
  from itertools import permutations
21
  from spacy.lang.en import English
22
 
@@ -66,12 +67,42 @@ _URLS = {
66
  }
67
 
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  class ScienceIE(datasets.GeneratorBasedBuilder):
70
- """ScienceIE is a dataset for the task of extracting key phrases and relations between them from scientific documents"""
 
71
 
72
  VERSION = datasets.Version("1.0.0")
73
 
74
  BUILDER_CONFIGS = [
 
 
 
 
 
 
 
 
75
  datasets.BuilderConfig(name="ner", version=VERSION, description="NER part of ScienceIE"),
76
  datasets.BuilderConfig(name="re", version=VERSION, description="Relation extraction part of ScienceIE"),
77
  ]
@@ -79,7 +110,31 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
79
  DEFAULT_CONFIG_NAME = "ner"
80
 
81
  def _info(self):
82
- if self.config.name == "re":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  features = datasets.Features(
84
  {
85
  "id": datasets.Value("string"),
@@ -90,13 +145,7 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
90
  "arg2_start": datasets.Value("int32"),
91
  "arg2_end": datasets.Value("int32"),
92
  "arg2_type": datasets.Value("string"),
93
- "relation": datasets.features.ClassLabel(
94
- names=[
95
- "O",
96
- "Synonym-of",
97
- "Hyponym-of"
98
- ]
99
- )
100
  }
101
  )
102
  else:
@@ -104,16 +153,16 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
104
  {
105
  "id": datasets.Value("string"),
106
  "tokens": datasets.Sequence(datasets.Value("string")),
107
- "ner_tags": datasets.Sequence(
108
  datasets.features.ClassLabel(
109
  names=[
110
  "O",
 
 
111
  "B-Process",
112
  "I-Process",
113
  "B-Task",
114
- "I-Task",
115
- "B-Material",
116
- "I-Material"
117
  ]
118
  )
119
  )
@@ -151,12 +200,16 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
151
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
152
  annotation_files = glob.glob(dir_path + "/**/*.ann", recursive=True)
153
  word_splitter = English()
154
- key = 0
155
- for f_anno_path in annotation_files:
156
- f_text_path = f_anno_path.replace(".ann", ".txt")
 
 
 
157
  with open(f_anno_path, mode="r", encoding="utf8") as f_anno, \
158
  open(f_text_path, mode="r", encoding="utf8") as f_text:
159
  text = f_text.read()
 
160
  entities = []
161
  synonym_groups = []
162
  hyponyms = []
@@ -186,71 +239,126 @@ class ScienceIE(datasets.GeneratorBasedBuilder):
186
  keyphr_text_lookup = text[int(start):int(end)]
187
  keyphr_ann = split_line[2]
188
  if keyphr_text_lookup != keyphr_ann:
189
- print("Spans don't match for anno " + line.strip() + " in file " + f_anno_path)
 
 
 
 
 
190
  entities.append({
191
  "id": identifier,
192
- "char_start": int(start),
193
- "char_end": int(end),
 
 
194
  "type": key_type
195
  })
196
- doc = word_splitter(text)
197
- tokens = [token.text for token in doc]
198
- ner_tags = ["O" for _ in tokens]
199
- for entity in entities:
200
- entity_span = doc.char_span(entity["char_start"], entity["char_end"], alignment_mode="expand")
201
- entity["start"] = entity_span.start
202
- entity["end"] = entity_span.end
203
- ner_tags[entity["start"]] = "B-" + entity["type"]
204
- for i in range(entity["start"] + 1, entity["end"]):
205
- ner_tags[i] = "I-" + entity["type"]
 
 
 
 
 
 
 
 
 
 
206
 
207
- if self.config.name == "re":
208
- entity_pairs = list(permutations([entity["id"] for entity in entities], 2))
209
  relations = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
- def add_relation(_arg1_id, _arg2_id, _relation):
212
- arg1 = None
213
- arg2 = None
214
- for e in entities:
215
- if e["id"] == _arg1_id:
216
- arg1 = e
217
- elif e["id"] == _arg2_id:
218
- arg2 = e
219
- assert arg1 is not None and arg2 is not None
220
- relations.append({
221
- "arg1_start": arg1["start"],
222
- "arg1_end": arg1["end"],
223
- "arg1_type": arg1["type"],
224
- "arg2_start": arg2["start"],
225
- "arg2_end": arg2["end"],
226
- "arg2_type": arg2["type"],
227
- "relation": _relation
228
- })
229
- # noinspection PyTypeChecker
230
- entity_pairs.remove((_arg1_id, _arg2_id))
231
-
232
- for synonym_group in synonym_groups:
233
- for arg1_id, arg2_id in permutations(synonym_group, 2):
234
- add_relation(arg1_id, arg2_id, _relation="Synonym-of")
235
- for hyponym in hyponyms:
236
- add_relation(hyponym["arg1_id"], hyponym["arg2_id"], _relation="Hyponym-of")
237
  for arg1_id, arg2_id in entity_pairs:
238
- add_relation(arg1_id, arg2_id, _relation="O")
239
- for relation in relations:
240
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  # Yields examples as (key, example) tuples
242
- example = {
243
- "id": str(key),
244
- "tokens": tokens
 
245
  }
246
- for k, v in relation.items():
247
- example[k] = v
248
- yield key, example
249
- else:
250
- key += 1
251
- # Yields examples as (key, example) tuples
252
- yield key, {
253
- "id": str(key),
254
- "tokens": tokens,
255
- "ner_tags": ner_tags
256
- }
 
17
  import glob
18
  import datasets
19
 
20
+ from pathlib import Path
21
  from itertools import permutations
22
  from spacy.lang.en import English
23
 
 
67
  }
68
 
69
 
70
+ def generate_relation(entities, arg1_id, arg2_id, relation, offset=0):
71
+ arg1 = None
72
+ arg2 = None
73
+ for e in entities:
74
+ if e["id"] == arg1_id:
75
+ arg1 = e
76
+ elif e["id"] == arg2_id:
77
+ arg2 = e
78
+ assert arg1 is not None and arg2 is not None, \
79
+ f"Did not find corresponding entities {arg1_id} & {arg2_id} in {entities}"
80
+ return {
81
+ "arg1_start": arg1["start"] - offset,
82
+ "arg1_end": arg1["end"] - offset,
83
+ "arg1_type": arg1["type"],
84
+ "arg2_start": arg2["start"] - offset,
85
+ "arg2_end": arg2["end"] - offset,
86
+ "arg2_type": arg2["type"],
87
+ "relation": relation
88
+ }
89
+
90
+
91
  class ScienceIE(datasets.GeneratorBasedBuilder):
92
+ """ScienceIE is a dataset for the task of extracting key phrases and relations between them from scientific
93
+ documents"""
94
 
95
  VERSION = datasets.Version("1.0.0")
96
 
97
  BUILDER_CONFIGS = [
98
+ datasets.BuilderConfig(name="subtask_a", version=VERSION,
99
+ description="Subtask A of ScienceIE for tokens being outside, at the beginning, "
100
+ "or inside a key phrase"),
101
+ datasets.BuilderConfig(name="subtask_b", version=VERSION,
102
+ description="Subtask B of ScienceIE for tokens being outside, or part of a material, "
103
+ "process or task"),
104
+ datasets.BuilderConfig(name="subtask_c", version=VERSION,
105
+ description="Subtask C of ScienceIE for Synonym-of and Hyponym-of relations"),
106
  datasets.BuilderConfig(name="ner", version=VERSION, description="NER part of ScienceIE"),
107
  datasets.BuilderConfig(name="re", version=VERSION, description="Relation extraction part of ScienceIE"),
108
  ]
 
110
  DEFAULT_CONFIG_NAME = "ner"
111
 
112
  def _info(self):
113
+ if self.config.name == "subtask_a":
114
+ features = datasets.Features(
115
+ {
116
+ "id": datasets.Value("string"),
117
+ "tokens": datasets.Sequence(datasets.Value("string")),
118
+ "tags": datasets.Sequence(datasets.features.ClassLabel(names=["O", "B", "I"]))
119
+ }
120
+ )
121
+ elif self.config.name == "subtask_b":
122
+ features = datasets.Features(
123
+ {
124
+ "id": datasets.Value("string"),
125
+ "tokens": datasets.Sequence(datasets.Value("string")),
126
+ "tags": datasets.Sequence(datasets.features.ClassLabel(names=["O", "M", "P", "T"]))
127
+ }
128
+ )
129
+ elif self.config.name == "subtask_c":
130
+ features = datasets.Features(
131
+ {
132
+ "id": datasets.Value("string"),
133
+ "tokens": datasets.Sequence(datasets.Value("string")),
134
+ "tags": datasets.Sequence(datasets.Sequence(datasets.features.ClassLabel(names=["O", "S", "H"])))
135
+ }
136
+ )
137
+ elif self.config.name == "re":
138
  features = datasets.Features(
139
  {
140
  "id": datasets.Value("string"),
 
145
  "arg2_start": datasets.Value("int32"),
146
  "arg2_end": datasets.Value("int32"),
147
  "arg2_type": datasets.Value("string"),
148
+ "relation": datasets.features.ClassLabel(names=["O", "Synonym-of", "Hyponym-of"])
 
 
 
 
 
 
149
  }
150
  )
151
  else:
 
153
  {
154
  "id": datasets.Value("string"),
155
  "tokens": datasets.Sequence(datasets.Value("string")),
156
+ "tags": datasets.Sequence(
157
  datasets.features.ClassLabel(
158
  names=[
159
  "O",
160
+ "B-Material",
161
+ "I-Material",
162
  "B-Process",
163
  "I-Process",
164
  "B-Task",
165
+ "I-Task"
 
 
166
  ]
167
  )
168
  )
 
200
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
201
  annotation_files = glob.glob(dir_path + "/**/*.ann", recursive=True)
202
  word_splitter = English()
203
+ word_splitter.add_pipe('sentencizer')
204
+ for f_anno_file in annotation_files:
205
+ doc_example_idx = 0
206
+ f_anno_path = Path(f_anno_file)
207
+ f_text_path = f_anno_path.with_suffix(".txt")
208
+ doc_id = f_anno_path.stem
209
  with open(f_anno_path, mode="r", encoding="utf8") as f_anno, \
210
  open(f_text_path, mode="r", encoding="utf8") as f_text:
211
  text = f_text.read()
212
+ doc = word_splitter(text)
213
  entities = []
214
  synonym_groups = []
215
  hyponyms = []
 
239
  keyphr_text_lookup = text[int(start):int(end)]
240
  keyphr_ann = split_line[2]
241
  if keyphr_text_lookup != keyphr_ann:
242
+ print("Spans don't match for anno " + line.strip() + " in file " + f_anno_file)
243
+ char_start = int(start)
244
+ char_end = int(end)
245
+ entity_span = doc.char_span(char_start, char_end, alignment_mode="expand")
246
+ start = entity_span.start
247
+ end = entity_span.end
248
  entities.append({
249
  "id": identifier,
250
+ "start": start,
251
+ "end": end,
252
+ "char_start": char_start,
253
+ "char_end": char_end,
254
  "type": key_type
255
  })
256
+ # check if any annotation is lost during sentence splitting
257
+ synonym_groups_used = [False for _ in synonym_groups]
258
+ hyponyms_used = [False for _ in hyponyms]
259
+ for sent in doc.sents:
260
+ token_offset = sent.start
261
+ tokens = [token.text for token in sent]
262
+ tags = ["O" for _ in tokens]
263
+ sent_entities = []
264
+ sent_entity_ids = []
265
+ for entity in entities:
266
+ if entity["start"] >= sent.start and entity["end"] <= sent.end:
267
+ sent_entity = {k: v for k, v in entity.items()}
268
+ sent_entity["start"] -= token_offset
269
+ sent_entity["end"] -= token_offset
270
+ sent_entities.append(sent_entity)
271
+ sent_entity_ids.append(entity["id"])
272
+ for entity in sent_entities:
273
+ tags[entity["start"]] = "B-" + entity["type"]
274
+ for i in range(entity["start"] + 1, entity["end"]):
275
+ tags[i] = "I-" + entity["type"]
276
 
 
 
277
  relations = []
278
+ entity_pairs_in_relation = []
279
+ for idx, synonym_group in enumerate(synonym_groups):
280
+ if all(entity_id in sent_entity_ids for entity_id in synonym_group):
281
+ synonym_groups_used[idx] = True
282
+ for arg1_id, arg2_id in permutations(synonym_group, 2):
283
+ relations.append(
284
+ generate_relation(sent_entities, arg1_id, arg2_id, relation="Synonym-of"))
285
+ entity_pairs_in_relation.append((arg1_id, arg2_id))
286
+ for idx, hyponym in enumerate(hyponyms):
287
+ if hyponym["arg1_id"] in sent_entity_ids and hyponym["arg2_id"] in sent_entity_ids:
288
+ hyponyms_used[idx] = True
289
+ relations.append(
290
+ generate_relation(sent_entities, hyponym["arg1_id"], hyponym["arg2_id"],
291
+ relation="Hyponym-of"))
292
 
293
+ entity_pairs_in_relation.append((arg1_id, arg2_id))
294
+ entity_pairs = [(arg1["id"], arg2["id"]) for arg1, arg2 in permutations(sent_entities, 2)
295
+ if (arg1["id"], arg2["id"]) not in entity_pairs_in_relation]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  for arg1_id, arg2_id in entity_pairs:
297
+ relations.append(generate_relation(sent_entities, arg1_id, arg2_id, relation="O"))
298
+
299
+ if self.config.name == "subtask_a":
300
+ doc_example_idx += 1
301
+ key = f"{doc_id}_{doc_example_idx}"
302
+ # Yields examples as (key, example) tuples
303
+ yield key, {
304
+ "id": key,
305
+ "tokens": tokens,
306
+ "tags": [tag[0] for tag in tags]
307
+ }
308
+ elif self.config.name == "subtask_b":
309
+ doc_example_idx += 1
310
+ key = f"{doc_id}_{doc_example_idx}"
311
+ # Yields examples as (key, example) tuples
312
+ key_phrase_tags = []
313
+ for tag in tags:
314
+ if tag == "O":
315
+ key_phrase_tags.append(tag)
316
+ else:
317
+ # use first letter of key phrase type
318
+ key_phrase_tags.append(tag[2])
319
+ yield key, {
320
+ "id": key,
321
+ "tokens": tokens,
322
+ "tags": key_phrase_tags
323
+ }
324
+ elif self.config.name == "subtask_c":
325
+ doc_example_idx += 1
326
+ key = f"{doc_id}_{doc_example_idx}"
327
+ tag_vectors = [["O" for _ in tokens] for _ in tokens]
328
+ for relation in relations:
329
+ tag = relation["relation"][0]
330
+ if tag != "O":
331
+ for i in range(relation["arg1_start"], relation["arg1_end"]):
332
+ for j in range(relation["arg2_start"], relation["arg2_end"]):
333
+ tag_vectors[i][j] = tag
334
+ # Yields examples as (key, example) tuples
335
+ yield key, {
336
+ "id": key,
337
+ "tokens": tokens,
338
+ "tags": tag_vectors
339
+ }
340
+ elif self.config.name == "re":
341
+ for relation in relations:
342
+ doc_example_idx += 1
343
+ key = f"{doc_id}_{doc_example_idx}"
344
+ # Yields examples as (key, example) tuples
345
+ example = {
346
+ "id": key,
347
+ "tokens": tokens
348
+ }
349
+ for k, v in relation.items():
350
+ example[k] = v
351
+ yield doc_example_idx, example
352
+ else: # NER config
353
+ doc_example_idx += 1
354
+ key = f"{doc_id}_{doc_example_idx}"
355
  # Yields examples as (key, example) tuples
356
+ yield key, {
357
+ "id": key,
358
+ "tokens": tokens,
359
+ "tags": tags
360
  }
361
+
362
+ assert all(synonym_groups_used) and all(hyponyms_used), \
363
+ f"Annotations were lost: {len([e for e in synonym_groups_used if e])} synonym annotations," \
364
+ f"{len([e for e in hyponyms_used if e])} synonym annotations"