Matej Klemen commited on
Commit
6338ec1
1 Parent(s): ab36125

Breaking change: change format of instances to be unified with G-KOMET; refactor code

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. komet.py +119 -43
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "KOMET 1.0 is a hand-annotated corpus for metaphorical expressions which contains about 200,000 words from \nSlovene journalistic, fiction and on-line texts. \n\nTo annotate metaphors in the corpus an adapted and modified procedure of the MIPVU protocol \n(Steen et al., 2010: A method for linguistic metaphor identification: From MIP to MIPVU, https://www.benjamins.com/catalog/celcr.14) \nwas used. The lexical units (words) whose contextual meanings are opposed to their basic meanings are considered \nmetaphor-related words. The basic and contextual meaning for each word in the corpus was identified using the \nDictionary of the standard Slovene Language. The corpus was annotated for the metaphoric following relations: \nindirect metaphor (MRWi), direct metaphor (MRWd), borderline case (WIDLI) and metaphor signal (MFlag). \nIn addition, the corpus introduces a new 'frame' tag, which gives information about the concept to which it refers.\n", "citation": "@InProceedings{antloga2020komet,\ntitle = {Korpus metafor KOMET 1.0},\nauthor={Antloga, \u000b{S}pela},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities (Student abstracts)},\nyear={2020},\npages={167-170}\n}\n", "homepage": "http://hdl.handle.net/11356/1293", "license": "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"document_name": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_paragraph": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_sentence": {"dtype": "uint32", "id": null, "_type": "Value"}, "sentence_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_type": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_frame": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "komet", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5739906, "num_examples": 13963, "dataset_name": "komet"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1293/komet.tei.zip": {"num_bytes": 7311643, "checksum": "213f8f5c5b4e4989705a88e014c345fa6038f66e14a83fecb94e08e9f0da6640"}}, "download_size": 7311643, "post_processing_size": null, "dataset_size": 5739906, "size_in_bytes": 13051549}}
 
1
+ {"default": {"description": "KOMET 1.0 is a hand-annotated corpus for metaphorical expressions which contains about 200,000 words from \nSlovene journalistic, fiction and on-line texts. \n\nTo annotate metaphors in the corpus an adapted and modified procedure of the MIPVU protocol \n(Steen et al., 2010: A method for linguistic metaphor identification: From MIP to MIPVU, https://www.benjamins.com/catalog/celcr.14) \nwas used. The lexical units (words) whose contextual meanings are opposed to their basic meanings are considered \nmetaphor-related words. The basic and contextual meaning for each word in the corpus was identified using the \nDictionary of the standard Slovene Language. The corpus was annotated for the metaphoric following relations: \nindirect metaphor (MRWi), direct metaphor (MRWd), borderline case (WIDLI) and metaphor signal (MFlag). \nIn addition, the corpus introduces a new 'frame' tag, which gives information about the concept to which it refers.\n", "citation": "@InProceedings{antloga2020komet,\ntitle = {Korpus metafor KOMET 1.0},\nauthor={Antloga, \u000b{S}pela},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities (Student abstracts)},\nyear={2020},\npages={167-170}\n}\n", "homepage": "http://hdl.handle.net/11356/1293", "license": "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"document_name": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_paragraph": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_sentence": {"dtype": "uint32", "id": null, "_type": "Value"}, "sentence_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_type": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}], "met_frame": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "komet", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3481821, "num_examples": 13963, "dataset_name": "komet"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1293/komet.tei.zip": {"num_bytes": 7311643, "checksum": "213f8f5c5b4e4989705a88e014c345fa6038f66e14a83fecb94e08e9f0da6640"}}, "download_size": 7311643, "post_processing_size": null, "dataset_size": 3481821, "size_in_bytes": 10793464}}
komet.py CHANGED
@@ -3,7 +3,7 @@
3
  import os
4
  import re
5
  import xml.etree.ElementTree as ET
6
- from typing import List
7
 
8
  import datasets
9
 
@@ -40,44 +40,33 @@ _URLS = {
40
  }
41
 
42
 
 
 
 
 
43
  def namespace(element):
44
  # https://stackoverflow.com/a/12946675
45
  m = re.match(r'\{.*\}', element.tag)
46
  return m.group(0) if m else ''
47
 
48
 
49
- def resolve(element) -> List:
50
- def _resolve_recursively(element, metaphor_type: str, frame_buffer: List):
 
 
51
  # Leaf node: word or punctuation character
52
  if element.tag.endswith(("w", "pc")):
53
- if len(frame_buffer) == 0:
54
- return element.text, metaphor_type, "O"
55
- else:
56
- # Frame annotations may be nested, encode them with a "/" separator;
57
- # e.g., the first annotation is the frame of the phrase involving current word and the last annotation
58
- # is the frame of a phrase part
59
- return element.text, metaphor_type, "/".join(frame_buffer)
60
 
61
- # Annotated word or word group
62
  elif element.tag.endswith("seg"):
63
- mtype, new_frame_buffer = "O", list(frame_buffer)
64
- if element.attrib["subtype"] != "frame":
65
- mtype = element.attrib["subtype"]
66
- else:
67
- # Frame annotations in KOMET are prepended with "#met.", while those in GKomet are not: unify
68
- if element.attrib["ana"].startswith("#met."):
69
- _mframe = element.attrib["ana"][5:]
70
- else:
71
- _mframe = element.attrib["ana"]
72
- new_frame_buffer.append(_mframe)
73
-
74
  parsed_data = []
75
  for child in element:
76
- # spaces between words, skip
77
- if child.tag.endswith("c"):
78
  continue
79
 
80
- res = _resolve_recursively(child, mtype, new_frame_buffer)
81
  if isinstance(res, list):
82
  parsed_data.extend(res)
83
  else:
@@ -85,11 +74,62 @@ def resolve(element) -> List:
85
 
86
  return parsed_data
87
 
88
- curr_annotations = _resolve_recursively(element, "O", [])
89
- if not isinstance(curr_annotations, list):
90
- curr_annotations = [curr_annotations]
 
 
 
 
91
 
92
- return curr_annotations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
 
95
  class Komet(datasets.GeneratorBasedBuilder):
@@ -105,8 +145,14 @@ class Komet(datasets.GeneratorBasedBuilder):
105
  "idx_paragraph": datasets.Value("uint32"),
106
  "idx_sentence": datasets.Value("uint32"), # index inside current paragraph
107
  "sentence_words": datasets.Sequence(datasets.Value("string")),
108
- "met_type": datasets.Sequence(datasets.Value("string")),
109
- "met_frame": datasets.Sequence(datasets.Value("string"))
 
 
 
 
 
 
110
  }
111
  )
112
  return datasets.DatasetInfo(
@@ -133,6 +179,7 @@ class Komet(datasets.GeneratorBasedBuilder):
133
  curr_path = os.path.join(data_dir, fname)
134
  if os.path.isfile(curr_path) and fname.endswith(".xml") and fname != "komet.xml": # komet.xml = meta-file
135
  data_files.append(fname)
 
136
 
137
  idx_example = 0
138
  for fname in data_files:
@@ -143,25 +190,54 @@ class Komet(datasets.GeneratorBasedBuilder):
143
  NAMESPACE = namespace(root)
144
 
145
  idx_sent_glob = 0
146
- for idx_par, curr_par in enumerate(root.iterfind(f"{NAMESPACE}p")):
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
148
- words, types, frames = [], [], []
149
- for curr_el in curr_sent:
150
- if curr_el.tag.endswith(("w", "pc", "seg")):
151
- curr_res = resolve(curr_el)
152
- for _el in curr_res:
153
- words.append(_el[0])
154
- types.append(_el[1])
155
- frames.append(_el[2])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  yield idx_example, {
158
  "document_name": fname,
159
  "idx": idx_sent_glob,
160
  "idx_paragraph": idx_par,
161
  "idx_sentence": idx_sent,
162
- "sentence_words": words,
163
- "met_type": types,
164
- "met_frame": frames
165
  }
166
  idx_example += 1
 
167
  idx_sent_glob += 1
 
3
  import os
4
  import re
5
  import xml.etree.ElementTree as ET
6
+ from typing import List, Tuple
7
 
8
  import datasets
9
 
 
40
  }
41
 
42
 
43
+ XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
44
+ EL_LEAF, EL_TYPE, EL_FRAME = range(3)
45
+
46
+
47
  def namespace(element):
48
  # https://stackoverflow.com/a/12946675
49
  m = re.match(r'\{.*\}', element.tag)
50
  return m.group(0) if m else ''
51
 
52
 
53
+ def word_info(sent_el):
54
+ def _resolve_recursively(element) -> List:
55
+ """ Knowingly ignored tags: name (anonymized, without IDs), gap, vocal, pause, del,
56
+ linkGrp (syntactic dependencies) """
57
  # Leaf node: word or punctuation character
58
  if element.tag.endswith(("w", "pc")):
59
+ id_curr = element.attrib[f"{XML_NAMESPACE}id"]
60
+ return [(id_curr, element.text)]
 
 
 
 
 
61
 
62
+ # Annotated word or word group - not interested in the annotations in this function
63
  elif element.tag.endswith("seg"):
 
 
 
 
 
 
 
 
 
 
 
64
  parsed_data = []
65
  for child in element:
66
+ if child.tag.endswith("c"): # empty space betw. words
 
67
  continue
68
 
69
+ res = _resolve_recursively(child)
70
  if isinstance(res, list):
71
  parsed_data.extend(res)
72
  else:
 
74
 
75
  return parsed_data
76
 
77
+ id_words, words = [], []
78
+ for child_el in sent_el:
79
+ curr_annotations = _resolve_recursively(child_el)
80
+ if curr_annotations is not None: # None = unrecognized ("unimportant") element
81
+ for ann in curr_annotations:
82
+ id_words.append(ann[0])
83
+ words.append(ann[1])
84
 
85
+ return id_words, words
86
+
87
+
88
+ def seg_info(sent_el):
89
+ def _resolve_recursively(element) -> Tuple:
90
+ """ Returns (type[, subtype], deeper_elements, latest_element)"""
91
+ # Leaf node: word or punctuation character
92
+ if element.tag.endswith(("w", "pc")):
93
+ id_curr = element.attrib[f"{XML_NAMESPACE}id"]
94
+ return EL_LEAF, [], [id_curr]
95
+
96
+ # Annotated word or word group
97
+ elif element.tag.endswith("seg"):
98
+ if element.attrib["subtype"] == "frame":
99
+ ann_type, subtype = EL_FRAME, element.attrib["ana"]
100
+ if subtype.startswith("#met."): # for consistency with G-Komet, remove "#met." prefix from frames
101
+ subtype = subtype[5:]
102
+ elif element.attrib["type"] == "metaphor":
103
+ ann_type = EL_TYPE
104
+ subtype = element.attrib["subtype"]
105
+ else:
106
+ raise ValueError(f"Unrecognized seg type: {element.attrib['type']}")
107
+
108
+ deeper_elements = []
109
+ latest_element = []
110
+ for child in element:
111
+ if child.tag.endswith(("c", "vocal", "pause")): # empty space betw. words or "special" word
112
+ continue
113
+
114
+ res = _resolve_recursively(child)
115
+ if res[0] == EL_LEAF:
116
+ latest_element.extend(res[2])
117
+ else:
118
+ deeper_elements.append(res)
119
+ latest_element.extend(res[3])
120
+
121
+ return ann_type, subtype, deeper_elements, latest_element
122
+
123
+ annotations = []
124
+ for child_el in sent_el:
125
+ if not child_el.tag.endswith("seg"):
126
+ continue
127
+
128
+ ann_type, subtype, deeper_elements, latest_element = _resolve_recursively(child_el)
129
+ annotations.extend(list(map(lambda _tup: (_tup[0], _tup[1], _tup[3]), deeper_elements)))
130
+ annotations.append((ann_type, subtype, latest_element))
131
+
132
+ return annotations
133
 
134
 
135
  class Komet(datasets.GeneratorBasedBuilder):
 
145
  "idx_paragraph": datasets.Value("uint32"),
146
  "idx_sentence": datasets.Value("uint32"), # index inside current paragraph
147
  "sentence_words": datasets.Sequence(datasets.Value("string")),
148
+ "met_type": [{
149
+ "type": datasets.Value("string"),
150
+ "word_indices": datasets.Sequence(datasets.Value("uint32"))
151
+ }],
152
+ "met_frame": [{
153
+ "type": datasets.Value("string"),
154
+ "word_indices": datasets.Sequence(datasets.Value("uint32"))
155
+ }]
156
  }
157
  )
158
  return datasets.DatasetInfo(
 
179
  curr_path = os.path.join(data_dir, fname)
180
  if os.path.isfile(curr_path) and fname.endswith(".xml") and fname != "komet.xml": # komet.xml = meta-file
181
  data_files.append(fname)
182
+ data_files = sorted(data_files)
183
 
184
  idx_example = 0
185
  for fname in data_files:
 
190
  NAMESPACE = namespace(root)
191
 
192
  idx_sent_glob = 0
193
+ for idx_par, curr_par in enumerate(root.iterfind(f".//{NAMESPACE}p")):
194
+ id2position = {} # {<idx_sent> -> {<id_word>: <position> foreach word} foreach sent}
195
+ all_words = []
196
+
197
+ # Pass#1: extract word information
198
+ for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
199
+ id_words, words = word_info(curr_sent)
200
+
201
+ id2position[idx_sent] = dict(zip(id_words, range(len(words))))
202
+ all_words.append(words)
203
+
204
+ all_types, all_frames = [], []
205
+
206
+ # Pass#2: extract annotations from <seg>ments
207
  for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
208
+ annotated_segs = seg_info(curr_sent)
209
+ all_types.append([])
210
+ all_frames.append([])
211
+
212
+ for curr_ann in annotated_segs:
213
+ ann_type, ann_subtype, words_involved = curr_ann
214
+ if ann_type == EL_TYPE:
215
+ all_types[idx_sent].append({
216
+ "type": ann_subtype,
217
+ "word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved
218
+ if _id_word in id2position[idx_sent]]
219
+ })
220
+ elif ann_type == EL_FRAME:
221
+ all_frames[idx_sent].append({
222
+ "type": ann_subtype,
223
+ "word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved
224
+ if _id_word in id2position[idx_sent]]
225
+ })
226
+
227
+ idx_sent = 0
228
+ for curr_words, curr_types, curr_frames in zip(all_words, all_types, all_frames):
229
+ if len(curr_words) == 0:
230
+ continue
231
 
232
  yield idx_example, {
233
  "document_name": fname,
234
  "idx": idx_sent_glob,
235
  "idx_paragraph": idx_par,
236
  "idx_sentence": idx_sent,
237
+ "sentence_words": curr_words,
238
+ "met_type": curr_types,
239
+ "met_frame": curr_frames
240
  }
241
  idx_example += 1
242
+ idx_sent += 1
243
  idx_sent_glob += 1