|
import logging |
|
import os |
|
import re |
|
import xml.etree.ElementTree as ET |
|
from typing import Optional |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@misc{solar3.0, |
|
title = {Developmental corpus {\v S}olar 3.0}, |
|
author = {Arhar Holdt, {\v S}pela and Rozman, Tadeja and Stritar Ku{\v c}uk, Mojca and Krek, Simon and Krap{\v s} Vodopivec, Irena and Stabej, Marko and Pori, Eva and Goli, Teja and Lavri{\v c}, Polona and Laskowski, Cyprian and Kocjan{\v c}i{\v c}, Polonca and Klemenc, Bojan and Krsnik, Luka and Kosem, Iztok}, |
|
url = {http://hdl.handle.net/11356/1589}, |
|
note = {Slovenian language resource repository {CLARIN}.{SI}}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Šolar is a developmental corpus of 5485 school texts (e.g., essays), written by students in Slovenian secondary schools |
|
(age 15-19) and pupils in the 7th-9th grade of primary school (13-15), with a small percentage also from the 6th grade. |
|
Part of the corpus (2,094 texts) is annotated with teachers' corrections using a system of labels described in the |
|
document available at https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Smernice-za-oznacevanje-korpusa-Solar_V1.1.pdf (in Slovenian). |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1589" |
|
|
|
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" |
|
|
|
_URLS = { |
|
"solar_tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Solar.TEI.zip" |
|
} |
|
|
|
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" |
|
|
|
|
|
def namespace(element): |
|
|
|
m = re.match(r'\{.*\}', element.tag) |
|
return m.group(0) if m else '' |
|
|
|
|
|
def resolve_element(tag_el, ne_tag: Optional[str] = "O"): |
|
if not tag_el.tag.endswith(("w", "pc", "seg")): |
|
logging.info(f"Skipping tag {tag_el.tag}") |
|
return [] |
|
|
|
if tag_el.tag.endswith(("w", "pc")): |
|
form = tag_el.text.strip() |
|
lemma = tag_el.text.strip() if tag_el.tag.endswith("pc") else tag_el.attrib["lemma"] |
|
msd = tag_el.attrib["ana"] |
|
ret_ne_tag = ne_tag |
|
id_tag = tag_el.attrib[f"{XML_NAMESPACE}id"] |
|
|
|
return [(id_tag, form, lemma, msd, ret_ne_tag)] |
|
|
|
elif tag_el.tag.endswith("seg"): |
|
anns = [] |
|
ret_ne_tag = tag_el.attrib["subtype"].upper() |
|
for curr_child in tag_el: |
|
anns.extend(resolve_element(curr_child, ne_tag=ret_ne_tag)) |
|
|
|
return anns |
|
|
|
|
|
def read_data(data_path): |
|
data = {} |
|
tree = ET.parse(data_path) |
|
root = tree.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
for curr_text in root.iterfind(f".//{NAMESPACE}div"): |
|
id_text = curr_text.attrib[f"{XML_NAMESPACE}id"] |
|
bibl_el = curr_text.find(f"{NAMESPACE}bibl") |
|
if bibl_el is None: |
|
text_title = "Unknown_title" |
|
logging.warning(f"The following text does not have a 'bibl' element: {curr_text.attrib}. " |
|
f"Setting title to 'Unknown_title'") |
|
is_manually_validated = False |
|
else: |
|
text_title = bibl_el.attrib["n"] |
|
note_el = bibl_el.find(f"{NAMESPACE}note") |
|
is_manually_validated = note_el.text == "DA" |
|
|
|
for idx_par, curr_par in enumerate(curr_text.iterfind(f".//{NAMESPACE}p")): |
|
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f".//{NAMESPACE}s")): |
|
id_sent = curr_sent.attrib[f"{XML_NAMESPACE}id"] |
|
ids, forms, lemmas, msds, nes = [], [], [], [], [] |
|
for curr_el in curr_sent: |
|
curr_annotations = resolve_element(curr_el) |
|
for curr_ann in curr_annotations: |
|
ids.append(curr_ann[0]) |
|
forms.append(curr_ann[1]) |
|
lemmas.append(curr_ann[2]) |
|
msds.append(curr_ann[3]) |
|
nes.append(curr_ann[4]) |
|
|
|
data[id_sent] = { |
|
"id_doc": id_text, |
|
"doc_title": text_title, |
|
"idx_par": idx_par, |
|
"idx_sent": idx_sent, |
|
"id_token": ids, "form": forms, "lemma": lemmas, "msd": msds, "ne_tag": nes, |
|
"is_manually_validated": is_manually_validated |
|
} |
|
|
|
return data |
|
|
|
|
|
class Solar3(datasets.GeneratorBasedBuilder): |
|
"""Šolar is a developmental corpus of school texts (e.g., essays), annotated with metadata and (partially) |
|
with teachers' corrections. """ |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id_doc": datasets.Value("string"), |
|
"doc_title": datasets.Value("string"), |
|
"is_manually_validated": datasets.Value("bool"), |
|
"idx_src_par": datasets.Value("int32"), |
|
"idx_src_sent": datasets.Value("int32"), |
|
"id_src_tokens": datasets.Sequence(datasets.Value("string")), |
|
"src_tokens": datasets.Sequence(datasets.Value("string")), |
|
"idx_tgt_par": datasets.Value("int32"), |
|
"idx_tgt_sent": datasets.Value("int32"), |
|
"id_tgt_tokens": datasets.Sequence(datasets.Value("string")), |
|
"tgt_tokens": datasets.Sequence(datasets.Value("string")), |
|
"corrections": [ |
|
{ |
|
"idx_src": datasets.Sequence(datasets.Value("int32")), |
|
"idx_tgt": datasets.Sequence(datasets.Value("int32")), |
|
"corr_types": datasets.Sequence(datasets.Value("string")) |
|
} |
|
] |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["solar_tei"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"source_path": os.path.join(data_dir, "Solar.TEI", "solar-orig.xml"), |
|
"target_path": os.path.join(data_dir, "Solar.TEI", "solar-corr.xml"), |
|
"links_path": os.path.join(data_dir, "Solar.TEI", "solar-errs.xml") |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, source_path, target_path, links_path): |
|
source_data = read_data(source_path) |
|
target_data = read_data(target_path) |
|
|
|
data = ET.parse(links_path) |
|
root = data.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
for idx_corr, corrected_sent in enumerate(root.iterfind(f"{NAMESPACE}linkGrp")): |
|
involved_sents = corrected_sent.attrib["corresp"].split(" ") |
|
assert len(involved_sents) <= 2, f"The following correction has more than two sentences involved, " \ |
|
f"which the script cannot handle: {corrected_sent.attrib}" |
|
if len(involved_sents) == 2: |
|
|
|
if "t" in involved_sents[0]: |
|
id_src_sent, id_tgt_sent = list(map(lambda _s: _s[1:], involved_sents[::-1])) |
|
else: |
|
id_src_sent, id_tgt_sent = list(map(lambda _s: _s[1:], involved_sents)) |
|
else: |
|
id_src_sent, id_tgt_sent = None, None |
|
if "t" in involved_sents[0]: |
|
id_tgt_sent = involved_sents[0][1:] |
|
else: |
|
id_src_sent = involved_sents[0][1:] |
|
|
|
id_doc, doc_title, is_manually_validated = None, None, False |
|
src_sent_data, tgt_sent_data = {}, {} |
|
assert id_src_sent is not None or id_tgt_sent is not None |
|
if id_src_sent is not None: |
|
src_sent_data = source_data[id_src_sent] |
|
id_doc = src_sent_data["id_doc"] |
|
doc_title = src_sent_data["doc_title"] |
|
is_manually_validated |= src_sent_data["is_manually_validated"] |
|
if id_tgt_sent is not None: |
|
tgt_sent_data = target_data[id_tgt_sent] |
|
id_doc = tgt_sent_data["id_doc"] |
|
doc_title = tgt_sent_data["doc_title"] |
|
is_manually_validated |= tgt_sent_data["is_manually_validated"] |
|
|
|
corr_data = [] |
|
for token_info in corrected_sent.findall(f"{NAMESPACE}link"): |
|
connections = token_info.attrib["target"].split(" ") |
|
|
|
corrections = token_info.attrib["type"] |
|
if corrections == "ID": |
|
continue |
|
|
|
src_inds, tgt_inds = [], [] |
|
corr_types = [] |
|
for curr_corr in corrections.split("|"): |
|
corr_types.append(curr_corr) |
|
|
|
for curr_tok in connections: |
|
|
|
idx_tok = int(curr_tok.split(".")[-1]) - 1 |
|
if "t" in curr_tok: |
|
tgt_inds.append(idx_tok) |
|
else: |
|
src_inds.append(idx_tok) |
|
|
|
corr_data.append({"idx_src": src_inds, "idx_tgt": tgt_inds, "corr_types": corr_types}) |
|
|
|
yield idx_corr, { |
|
"id_doc": id_doc[:-1], |
|
"doc_title": doc_title, |
|
"is_manually_validated": is_manually_validated, |
|
"idx_src_par": src_sent_data.get("idx_par", -1), |
|
"idx_src_sent": src_sent_data.get("idx_sent", -1), |
|
"id_src_tokens": src_sent_data.get("id_token", []), |
|
"src_tokens": src_sent_data.get("form", []), |
|
"idx_tgt_par": tgt_sent_data.get("idx_par", -1), |
|
"idx_tgt_sent": tgt_sent_data.get("idx_sent", -1), |
|
"id_tgt_tokens": tgt_sent_data.get("id_token", []), |
|
"tgt_tokens": tgt_sent_data.get("form", []), |
|
"corrections": corr_data |
|
} |
|
|