vuamc / vuamc.py
Matej Klemen
Group together metaphor annotations for phrases
04a1a15
""" English metaphor-annotated corpus. """
import os
from copy import deepcopy
import datasets
import logging
import re
import xml.etree.ElementTree as ET
from typing import List, Tuple, Dict
_CITATION = """\
@book{steen2010method,
title={A method for linguistic metaphor identification: From MIP to MIPVU},
author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje},
volume={14},
year={2010},
publisher={John Benjamins Publishing}
}
"""
_DESCRIPTION = """\
The resource contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor.
There are four registers, each comprising about 50,000 words: academic texts, news texts, fiction, and conversations.
Words have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for
metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal
metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made
between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of
metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.
"""
_HOMEPAGE = "https://hdl.handle.net/20.500.12024/2541"
_LICENSE = "Available for non-commercial use on condition that the terms of the BNC Licence are observed and that " \
"this header is included in its entirety with any copy distributed."
_URLS = {
"vuamc": "https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2541/VUAMC.xml"
}
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
VICI_NAMESPACE = "{http://www.tei-c.org/ns/VICI}"
NA_STR = "N/A"
def namespace(element):
# https://stackoverflow.com/a/12946675
m = re.match(r'\{.*\}', element.tag)
return m.group(0) if m else ''
def resolve_recursively(el, ns):
words, pos_tags, met_type, meta_tags = [], [], [], []
if el.tag.endswith("w"):
# A <w>ord may be
# (1) just text,
# (2) a metaphor (text fully enclosed in another seg)
# (3) a partial metaphor (optionally some text, followed by a seg, optionally followed by more text)
idx_word = 0
_w_text = el.text.strip() if el.text is not None else ""
if len(_w_text) > 0:
words.append(_w_text)
pos_tags.append(el.attrib["type"])
meta_tags.append(NA_STR)
idx_word += 1
met_els = el.findall(f"{ns}seg")
for met_el in met_els:
parse_tail = True
if met_el.text is None:
# Handle encoding inconsistency where the metaphor is encoded without a closing tag (I hate this format)
# <w lemma="to" type="PRP"><seg function="mrw" type="met" vici:morph="n"/>to </w>
parse_tail = False
_w_text = met_el.tail.strip()
else:
_w_text = met_el.text.strip()
curr_met_type = met_el.attrib[f"function"]
# Let the user decide how they want to aggregate metaphors
if "type" in met_el.attrib:
curr_met_type = f"{curr_met_type}/{met_el.attrib['type']}"
if "subtype" in met_el.attrib:
curr_met_type = f"{curr_met_type}/{met_el.attrib['subtype']}"
words.append(_w_text)
pos_tags.append(el.attrib["type"])
meta_tags.append(NA_STR)
met_dict = {"type": curr_met_type, "word_indices": [idx_word]}
# Multi-word metaphors are annotated with xml:id="..." or corresp="..."
if f"{XML_NAMESPACE}id" in met_el.attrib:
met_dict["id"] = met_el.attrib[f"{XML_NAMESPACE}id"]
elif "corresp" in met_el.attrib:
met_dict["id"] = met_el.attrib["corresp"][1:] # remove the "#" in front
met_type.append(met_dict)
idx_word += 1
if not parse_tail:
continue
_w_text = met_el.tail.strip() if met_el.tail is not None else ""
if len(_w_text) > 0:
words.append(_w_text)
pos_tags.append(el.attrib["type"])
meta_tags.append(NA_STR)
idx_word += 1
elif el.tag.endswith("vocal"):
desc_el = el.find(f"{ns}desc")
description = desc_el.text.strip() if desc_el is not None else "unknown"
words.append("")
pos_tags.append(NA_STR)
meta_tags.append(f"vocal/{description}") # vocal/<desc>
elif el.tag.endswith("gap"):
words.append("")
pos_tags.append(NA_STR)
meta_tags.append(f"gap/{el.attrib.get('reason', 'unclear')}") # gap/<reason>
elif el.tag.endswith("incident"):
desc_el = el.find(f"{ns}desc")
description = desc_el.text.strip() if desc_el is not None else "unknown"
words.append("")
pos_tags.append(NA_STR)
meta_tags.append(f"incident/{description}")
elif el.tag.endswith("shift"):
# TODO: this is not exposed
new_state = el.attrib.get("new", "normal")
children = list(iter(el))
# NOTE: Intentionally skip shifts like this, without children:
# <u who="#PS05E"> <shift new="crying"/> </u>
if len(children) > 0:
for w_el in el:
_words, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
words.extend(_words)
pos_tags.extend(_pos)
meta_tags.extend(_metas)
elif el.tag.endswith("seg"):
# Direct <seg> descendant of a sentence indicates truncated text
word_el = el.find(f"{ns}w")
words.append(word_el.text.strip())
pos_tags.append(word_el.attrib["type"])
meta_tags.append(NA_STR)
elif el.tag.endswith("pause"):
words.append("")
pos_tags.append(NA_STR)
meta_tags.append(f"pause")
elif el.tag.endswith("sic"):
for w_el in el:
_words, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
words.extend(_words)
pos_tags.extend(_pos)
meta_tags.extend(_metas)
elif el.tag.endswith("c"):
words.append(el.text.strip())
pos_tags.append(el.attrib["type"])
meta_tags.append(NA_STR)
elif el.tag.endswith("pb"):
words.append("")
pos_tags.append(NA_STR)
meta_tags.append(NA_STR)
elif el.tag.endswith("hi"):
# TODO: this is not exposed
rendition = el.attrib.get("rend", "normal")
for child_el in el:
_words, _pos, _mets, _metas = resolve_recursively(child_el, ns=ns)
words.extend(_words)
pos_tags.extend(_pos)
meta_tags.extend(_metas)
elif el.tag.endswith("choice"):
sic_el = el.find(f"{ns}sic")
_words, _pos, _mets, _metas = resolve_recursively(sic_el, ns=ns)
words.extend(_words)
pos_tags.extend(_pos)
met_type.extend(_mets)
meta_tags.extend(_metas)
elif el.tag.endswith(("ptr", "corr")):
# Intentionally skipping these:
# - no idea what <ptr> is
# - <sic> is being parsed instead of <corr>
pass
else:
logging.warning(f"Unrecognized child element: {el.tag}.\n"
f"If you are seeing this message, please open an issue on HF datasets.")
return words, pos_tags, met_type, meta_tags
def parse_sent(sent_el, ns) -> Tuple[List[str], List[str], List[Dict], List[str]]:
all_words, all_pos_tags, all_met_types, all_metas = [], [], [], []
for child_el in sent_el:
word, pos, mtype, meta = resolve_recursively(child_el, ns=ns)
# Need to remap local (index inside the word group) `word_indices` to global (index inside the sentence)
if len(mtype) > 0:
base = len(all_words)
for idx_met, met_info in enumerate(mtype):
mtype[idx_met]["word_indices"] = list(map(lambda _i: base + _i, met_info["word_indices"]))
all_words.extend(word)
all_pos_tags.extend(pos)
all_met_types.extend(mtype)
all_metas.extend(meta)
# Check if any of the independent metaphor annotations belong to the same word group (e.g., "taking" and "over")
if len(all_met_types) > 0:
grouped_met_type = {}
for met_info in all_met_types:
curr_id = met_info.get("id", f"met{len(grouped_met_type)}")
if curr_id in grouped_met_type:
existing_data = grouped_met_type[curr_id]
existing_data["word_indices"].extend(met_info["word_indices"])
else:
existing_data = deepcopy(met_info)
grouped_met_type[curr_id] = existing_data
new_met_types = []
for _, met_info in grouped_met_type.items():
if "id" in met_info:
del met_info["id"]
new_met_types.append(met_info)
all_met_types = new_met_types
return all_words, all_pos_tags, all_met_types, all_metas
def parse_text_body(body_el, ns):
all_words: List[List] = []
all_pos: List[List] = []
all_met_type: List[List] = []
all_meta: List[List] = []
# Edge case#1: <s>entence
if body_el.tag.endswith("s"):
words, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
all_words.append(words)
all_pos.append(pos_tags)
all_met_type.append(met_types)
all_meta.append(meta_tags)
# Edge case#2: <u>tterance either contains a sentence of metadata or contains multiple sentences as children
elif body_el.tag.endswith("u"):
children = list(filter(lambda _child: not _child.tag.endswith("ptr"), list(iter(body_el))))
is_utterance_sent = all(map(lambda _child: not _child.tag.endswith("s"), children))
if is_utterance_sent:
# <u> contains elements as children that are not a <s>entence, so it is itself considered a sentence
words, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
all_words.append(words)
all_pos.append(pos_tags)
all_met_type.append(met_types)
all_meta.append(meta_tags)
else:
# <u> contains one or more of <s>entence children
for _child in children:
words, pos_tags, met_types, meta_tags = parse_sent(_child, ns=ns)
all_words.append(words)
all_pos.append(pos_tags)
all_met_type.append(met_types)
all_meta.append(meta_tags)
# Recursively go deeper through all the <p>aragraphs, <div>s, etc. until we reach the sentences
else:
for _child in body_el:
_c_word, _c_pos, _c_met, _c_meta = parse_text_body(_child, ns=ns)
all_words.extend(_c_word)
all_pos.extend(_c_pos)
all_met_type.extend(_c_met)
all_meta.extend(_c_meta)
return all_words, all_pos, all_met_type, all_meta
class VUAMC(datasets.GeneratorBasedBuilder):
"""English metaphor-annotated corpus. """
VERSION = datasets.Version("1.0.1")
def _info(self):
features = datasets.Features(
{
"document_name": datasets.Value("string"),
"words": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(datasets.Value("string")),
"met_type": [{
"type": datasets.Value("string"),
"word_indices": datasets.Sequence(datasets.Value("uint32"))
}],
"meta": datasets.Sequence(datasets.Value("string"))
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
urls = _URLS["vuamc"]
data_path = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": os.path.join(data_path)}
)
]
def _generate_examples(self, file_path):
curr_doc = ET.parse(file_path)
root = curr_doc.getroot()
NAMESPACE = namespace(root)
root = root.find(f"{NAMESPACE}text")
idx_instance = 0
for idx_doc, doc in enumerate(root.iterfind(f".//{NAMESPACE}text")):
document_name = doc.attrib[f"{XML_NAMESPACE}id"]
body = doc.find(f"{NAMESPACE}body")
body_data = parse_text_body(body, ns=NAMESPACE)
for sent_words, sent_pos, sent_met_type, sent_meta in zip(*body_data):
# TODO: Due to some simplifications (not parsing certain metadata), some sentences may be empty
if len(sent_words) == 0:
continue
yield idx_instance, {
"document_name": document_name,
"words": sent_words,
"pos_tags": sent_pos,
"met_type": sent_met_type,
"meta": sent_meta
}
idx_instance += 1