import os import re import xml.etree.ElementTree as ET import datasets _CITATION = """\ @misc{janes_tag, title = {{CMC} training corpus Janes-Tag 3.0}, author = {Lenardi{\v c}, Jakob and {\v C}ibej, Jaka and Arhar Holdt, {\v S}pela and Erjavec, Toma{\v z} and Fi{\v s}er, Darja and Ljube{\v s}i{\'c}, Nikola and Zupan, Katja and Dobrovoljc, Kaja}, url = {http://hdl.handle.net/11356/1732}, note = {Slovenian language resource repository {CLARIN}.{SI}}, copyright = {Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)}, year = {2022} } """ _DESCRIPTION = """\ Janes-Tag is a manually annotated corpus of Slovene Computer-Mediated Communication (CMC) consisting of mostly tweets but also blogs, forums and news comments. """ _HOMEPAGE = "https://nl.ijs.si/janes/" _LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" _URLS = { "janes_tag_tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1732/Janes-Tag.3.0.TEI.zip" } XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" DEFAULT_NE = "O" def namespace(element): # https://stackoverflow.com/a/12946675 m = re.match(r'\{.*\}', element.tag) return m.group(0) if m else '' def word_info(wordlike_tag, _namespace): if wordlike_tag.tag == f"{_namespace}c": return None, None, None, None if wordlike_tag.tag in {f"{_namespace}w", f"{_namespace}pc"}: nes = None children = list(iter(wordlike_tag)) if len(children) > 0: # If this happens, the word contains nested words indicating its normalized form words, lemmas, msds = [], [], [] for _child in wordlike_tag: assert _child.tag in {f"{_namespace}w", f"{_namespace}pc"}, _child.tag # Arbitrary words in the text have a normalized form that is formatted inconsistently and so it is # unclear how to parse it correctly -> convention: always use information of the normalized words if "norm" in _child.attrib: words.append(_child.attrib["norm"].strip()) lemmas.append(_child.attrib["lemma"].strip()) msds.append(_child.attrib["ana"].strip()) else: # These don't have linguistic annotations ¯\_(ツ)_/¯ words.append(_child.text.strip()) lemmas.append(_child.text.strip()) msds.append("UNK") else: words = [wordlike_tag.text.strip()] lemmas = [wordlike_tag.attrib["lemma"].strip()] msds = [wordlike_tag.attrib["ana"].strip()] return words, lemmas, msds, nes words, lemmas, msds, nes = [], [], [], [] if wordlike_tag.tag == f"{_namespace}seg": ne_tag = wordlike_tag.attrib["subtype"].strip().upper() if ne_tag.startswith("DERIV-"): ne_tag = ne_tag[len("DERIV-"):] for _child in wordlike_tag: _child_words, _child_lemmas, _child_msds, _child_nes = word_info(_child, _namespace) if _child_words is None: continue words.extend(_child_words) lemmas.extend(_child_lemmas) msds.extend(_child_msds) nes = [f"B-{ne_tag}" if _i == 0 else f"I-{ne_tag}" for _i, _ in enumerate(words)] return words, lemmas, msds, nes class JanesTag(datasets.GeneratorBasedBuilder): """Janes-Tag is a manually annotated corpus of Slovene Computer-Mediated Communication""" VERSION = datasets.Version("3.0.0") def _info(self): features = datasets.Features( { "id": datasets.Value("string"), "words": datasets.Sequence(datasets.Value("string")), "lemmas": datasets.Sequence(datasets.Value("string")), "msds": datasets.Sequence(datasets.Value("string")), "nes": datasets.Sequence(datasets.Value("string")) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _URLS["janes_tag_tei"] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"file_path": os.path.join(data_dir, "Janes-Tag.3.0.TEI", "janes-tag.xml")} ) ] def _generate_examples(self, file_path): curr_doc = ET.parse(file_path) root = curr_doc.getroot() NAMESPACE = namespace(root) root = root.find(f"{NAMESPACE}text").find(f"{NAMESPACE}body") idx_ex = 0 for curr_ex in root.iterfind(f"{NAMESPACE}ab"): # anonymous block curr_id = curr_ex.attrib[f"{XML_NAMESPACE}id"] ex_words, ex_lemmas, ex_msds, ex_nes = [], [], [], [] for child_tag in curr_ex: if child_tag.tag not in {f"{NAMESPACE}s", f"{NAMESPACE}c"}: continue if child_tag.tag == f"{NAMESPACE}c": continue # Iterate over elements of a entence for word_or_seg_tag in child_tag: _words, _lemmas, _msds, _nes = word_info(word_or_seg_tag, NAMESPACE) if _words is None: continue if _nes is None: _nes = [DEFAULT_NE for _ in range(len(_words))] ex_words.extend(_words) ex_lemmas.extend(_lemmas) ex_msds.extend(_msds) ex_nes.extend(_nes) yield idx_ex, { "id": curr_id, "words": ex_words, "lemmas": ex_lemmas, "msds": ex_msds, "nes": ex_nes } idx_ex += 1