|
"""Metaphor corpus KOMET 1.0""" |
|
|
|
import os |
|
import re |
|
import xml.etree.ElementTree as ET |
|
from typing import List, Tuple |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@InProceedings{antloga2020komet, |
|
title = {Korpus metafor KOMET 1.0}, |
|
author={Antloga, \v{S}pela}, |
|
booktitle={Proceedings of the Conference on Language Technologies and Digital Humanities (Student abstracts)}, |
|
year={2020}, |
|
pages={167-170} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
KOMET 1.0 is a hand-annotated corpus for metaphorical expressions which contains about 200,000 words from |
|
Slovene journalistic, fiction and on-line texts. |
|
|
|
To annotate metaphors in the corpus an adapted and modified procedure of the MIPVU protocol |
|
(Steen et al., 2010: A method for linguistic metaphor identification: From MIP to MIPVU, https://www.benjamins.com/catalog/celcr.14) |
|
was used. The lexical units (words) whose contextual meanings are opposed to their basic meanings are considered |
|
metaphor-related words. The basic and contextual meaning for each word in the corpus was identified using the |
|
Dictionary of the standard Slovene Language. The corpus was annotated for the metaphoric following relations: |
|
indirect metaphor (MRWi), direct metaphor (MRWd), borderline case (WIDLI) and metaphor signal (MFlag). |
|
In addition, the corpus introduces a new 'frame' tag, which gives information about the concept to which it refers. |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1293" |
|
|
|
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
_URLS = { |
|
"komet": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1293/komet.tei.zip" |
|
} |
|
|
|
|
|
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" |
|
EL_LEAF, EL_TYPE, EL_FRAME = range(3) |
|
|
|
|
|
def namespace(element): |
|
|
|
m = re.match(r'\{.*\}', element.tag) |
|
return m.group(0) if m else '' |
|
|
|
|
|
def word_info(sent_el): |
|
def _resolve_recursively(element) -> List: |
|
""" Knowingly ignored tags: name (anonymized, without IDs), gap, vocal, pause, del, |
|
linkGrp (syntactic dependencies) """ |
|
|
|
if element.tag.endswith(("w", "pc")): |
|
id_curr = element.attrib[f"{XML_NAMESPACE}id"] |
|
return [(id_curr, element.text)] |
|
|
|
|
|
elif element.tag.endswith("seg"): |
|
parsed_data = [] |
|
for child in element: |
|
if child.tag.endswith("c"): |
|
continue |
|
|
|
res = _resolve_recursively(child) |
|
if isinstance(res, list): |
|
parsed_data.extend(res) |
|
else: |
|
parsed_data.append(res) |
|
|
|
return parsed_data |
|
|
|
id_words, words = [], [] |
|
for child_el in sent_el: |
|
curr_annotations = _resolve_recursively(child_el) |
|
if curr_annotations is not None: |
|
for ann in curr_annotations: |
|
id_words.append(ann[0]) |
|
words.append(ann[1]) |
|
|
|
return id_words, words |
|
|
|
|
|
def seg_info(sent_el): |
|
def _resolve_recursively(element) -> Tuple: |
|
""" Returns (type[, subtype], deeper_elements, latest_element)""" |
|
|
|
if element.tag.endswith(("w", "pc")): |
|
id_curr = element.attrib[f"{XML_NAMESPACE}id"] |
|
return EL_LEAF, [], [id_curr] |
|
|
|
|
|
elif element.tag.endswith("seg"): |
|
if element.attrib["subtype"] == "frame": |
|
ann_type, subtype = EL_FRAME, element.attrib["ana"] |
|
if subtype.startswith("#met."): |
|
subtype = subtype[5:] |
|
elif element.attrib["type"] == "metaphor": |
|
ann_type = EL_TYPE |
|
subtype = element.attrib["subtype"] |
|
else: |
|
raise ValueError(f"Unrecognized seg type: {element.attrib['type']}") |
|
|
|
deeper_elements = [] |
|
latest_element = [] |
|
for child in element: |
|
if child.tag.endswith("c"): |
|
continue |
|
|
|
res = _resolve_recursively(child) |
|
if res[0] == EL_LEAF: |
|
latest_element.extend(res[2]) |
|
else: |
|
deeper_elements.extend(res[2]) |
|
deeper_elements.append((res[0], res[1], res[3])) |
|
latest_element.extend(res[3]) |
|
|
|
return ann_type, subtype, deeper_elements, latest_element |
|
|
|
annotations = [] |
|
for child_el in sent_el: |
|
if not child_el.tag.endswith("seg"): |
|
continue |
|
|
|
ann_type, subtype, deeper_elements, latest_element = _resolve_recursively(child_el) |
|
annotations.extend(deeper_elements) |
|
annotations.append((ann_type, subtype, latest_element)) |
|
|
|
return annotations |
|
|
|
|
|
class Komet(datasets.GeneratorBasedBuilder): |
|
"""KOMET is a hand-annotated Slovenian corpus of metaphorical expressions.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"document_name": datasets.Value("string"), |
|
"idx": datasets.Value("uint32"), |
|
"idx_paragraph": datasets.Value("uint32"), |
|
"idx_sentence": datasets.Value("uint32"), |
|
"sentence_words": datasets.Sequence(datasets.Value("string")), |
|
"met_type": [{ |
|
"type": datasets.Value("string"), |
|
"word_indices": datasets.Sequence(datasets.Value("uint32")) |
|
}], |
|
"met_frame": [{ |
|
"type": datasets.Value("string"), |
|
"word_indices": datasets.Sequence(datasets.Value("uint32")) |
|
}] |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URLS["komet"]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"data_dir": os.path.join(data_dir, "komet.tei")}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, data_dir): |
|
data_files = [] |
|
for fname in os.listdir(data_dir): |
|
curr_path = os.path.join(data_dir, fname) |
|
if os.path.isfile(curr_path) and fname.endswith(".xml") and fname != "komet.xml": |
|
data_files.append(fname) |
|
data_files = sorted(data_files) |
|
|
|
idx_example = 0 |
|
for fname in data_files: |
|
fpath = os.path.join(data_dir, fname) |
|
|
|
curr_doc = ET.parse(fpath) |
|
root = curr_doc.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
idx_sent_glob = 0 |
|
for idx_par, curr_par in enumerate(root.iterfind(f".//{NAMESPACE}p")): |
|
id2position = {} |
|
all_words = [] |
|
|
|
|
|
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")): |
|
id_words, words = word_info(curr_sent) |
|
|
|
id2position[idx_sent] = dict(zip(id_words, range(len(words)))) |
|
all_words.append(words) |
|
|
|
all_types, all_frames = [], [] |
|
|
|
|
|
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")): |
|
annotated_segs = seg_info(curr_sent) |
|
all_types.append([]) |
|
all_frames.append([]) |
|
|
|
for curr_ann in annotated_segs: |
|
ann_type, ann_subtype, words_involved = curr_ann |
|
if ann_type == EL_TYPE: |
|
all_types[idx_sent].append({ |
|
"type": ann_subtype, |
|
"word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved |
|
if _id_word in id2position[idx_sent]] |
|
}) |
|
elif ann_type == EL_FRAME: |
|
all_frames[idx_sent].append({ |
|
"type": ann_subtype, |
|
"word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved |
|
if _id_word in id2position[idx_sent]] |
|
}) |
|
|
|
idx_sent = 0 |
|
for curr_words, curr_types, curr_frames in zip(all_words, all_types, all_frames): |
|
if len(curr_words) == 0: |
|
continue |
|
|
|
yield idx_example, { |
|
"document_name": fname, |
|
"idx": idx_sent_glob, |
|
"idx_paragraph": idx_par, |
|
"idx_sentence": idx_sent, |
|
"sentence_words": curr_words, |
|
"met_type": curr_types, |
|
"met_frame": curr_frames |
|
} |
|
idx_example += 1 |
|
idx_sent += 1 |
|
idx_sent_glob += 1 |
|
|