Datasets:
cjvt
/

File size: 10,526 Bytes
5b47bfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b1ad9b
5b47bfd
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
import logging
import os
import re
import xml.etree.ElementTree as ET
from typing import Optional

import datasets

_CITATION = """\
@misc{solar3.0,
    title = {Developmental corpus {\v S}olar 3.0},
    author = {Arhar Holdt, {\v S}pela and Rozman, Tadeja and Stritar Ku{\v c}uk, Mojca and Krek, Simon and Krap{\v s} Vodopivec, Irena and Stabej, Marko and Pori, Eva and Goli, Teja and Lavri{\v c}, Polona and Laskowski, Cyprian and Kocjan{\v c}i{\v c}, Polonca and Klemenc, Bojan and Krsnik, Luka and Kosem, Iztok},
    url = {http://hdl.handle.net/11356/1589},
    note = {Slovenian language resource repository {CLARIN}.{SI}},
    year = {2022}
}
"""

_DESCRIPTION = """\
Šolar is a developmental corpus of 5485 school texts (e.g., essays), written by students in Slovenian secondary schools 
(age 15-19) and pupils in the 7th-9th grade of primary school (13-15), with a small percentage also from the 6th grade. 
Part of the corpus (2,094 texts) is annotated with teachers' corrections using a system of labels described in the 
document available at https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Smernice-za-oznacevanje-korpusa-Solar_V1.1.pdf (in Slovenian).
"""

_HOMEPAGE = "http://hdl.handle.net/11356/1589"

_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"

_URLS = {
    "solar_tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Solar.TEI.zip"
}

XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"


def namespace(element):
    # https://stackoverflow.com/a/12946675
    m = re.match(r'\{.*\}', element.tag)
    return m.group(0) if m else ''


def resolve_element(tag_el, ne_tag: Optional[str] = "O"):
    if not tag_el.tag.endswith(("w", "pc", "seg")):
        logging.info(f"Skipping tag {tag_el.tag}")
        return []

    if tag_el.tag.endswith(("w", "pc")):
        form = tag_el.text.strip()
        lemma = tag_el.text.strip() if tag_el.tag.endswith("pc") else tag_el.attrib["lemma"]
        msd = tag_el.attrib["ana"]
        ret_ne_tag = ne_tag
        id_tag = tag_el.attrib[f"{XML_NAMESPACE}id"]

        return [(id_tag, form, lemma, msd, ret_ne_tag)]
    # Named entities: words and punctuation nested directly below current element
    elif tag_el.tag.endswith("seg"):
        anns = []
        ret_ne_tag = tag_el.attrib["subtype"].upper()
        for curr_child in tag_el:
            anns.extend(resolve_element(curr_child, ne_tag=ret_ne_tag))

        return anns


def read_data(data_path):
    data = {}  # ID_sent -> sentence_metadata
    tree = ET.parse(data_path)
    root = tree.getroot()
    NAMESPACE = namespace(root)

    for curr_text in root.iterfind(f".//{NAMESPACE}div"):
        id_text = curr_text.attrib[f"{XML_NAMESPACE}id"]
        bibl_el = curr_text.find(f"{NAMESPACE}bibl")
        if bibl_el is None:
            text_title = "Unknown_title"
            logging.warning(f"The following text does not have a 'bibl' element: {curr_text.attrib}. "
                            f"Setting title to 'Unknown_title'")
            is_manually_validated = False
        else:
            text_title = bibl_el.attrib["n"]
            note_el = bibl_el.find(f"{NAMESPACE}note")
            is_manually_validated = note_el.text == "DA"

        for idx_par, curr_par in enumerate(curr_text.iterfind(f".//{NAMESPACE}p")):
            for idx_sent, curr_sent in enumerate(curr_par.iterfind(f".//{NAMESPACE}s")):
                id_sent = curr_sent.attrib[f"{XML_NAMESPACE}id"]
                ids, forms, lemmas, msds, nes = [], [], [], [], []
                for curr_el in curr_sent:
                    curr_annotations = resolve_element(curr_el)
                    for curr_ann in curr_annotations:
                        ids.append(curr_ann[0])
                        forms.append(curr_ann[1])
                        lemmas.append(curr_ann[2])
                        msds.append(curr_ann[3])
                        nes.append(curr_ann[4])

                data[id_sent] = {
                    "id_doc": id_text,
                    "doc_title": text_title,
                    "idx_par": idx_par,
                    "idx_sent": idx_sent,
                    "id_token": ids, "form": forms, "lemma": lemmas, "msd": msds, "ne_tag": nes,
                    "is_manually_validated": is_manually_validated
                }

    return data


class Solar3(datasets.GeneratorBasedBuilder):
    """Šolar is a developmental corpus of school texts (e.g., essays), annotated with metadata and (partially)
    with teachers' corrections. """

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        features = datasets.Features(
            {
                "id_doc": datasets.Value("string"),
                "doc_title": datasets.Value("string"),
                "is_manually_validated": datasets.Value("bool"),
                "idx_src_par": datasets.Value("int32"),
                "idx_src_sent": datasets.Value("int32"),
                "id_src_tokens": datasets.Sequence(datasets.Value("string")),
                "src_tokens": datasets.Sequence(datasets.Value("string")),
                "idx_tgt_par": datasets.Value("int32"),
                "idx_tgt_sent": datasets.Value("int32"),
                "id_tgt_tokens": datasets.Sequence(datasets.Value("string")),
                "tgt_tokens": datasets.Sequence(datasets.Value("string")),
                "corrections": [
                    {
                        "idx_src": datasets.Sequence(datasets.Value("int32")),
                        "idx_tgt": datasets.Sequence(datasets.Value("int32")),
                        "corr_types": datasets.Sequence(datasets.Value("string"))
                    }
                ]
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        urls = _URLS["solar_tei"]
        data_dir = dl_manager.download_and_extract(urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "source_path": os.path.join(data_dir, "Solar.TEI", "solar-orig.xml"),
                    "target_path": os.path.join(data_dir, "Solar.TEI", "solar-corr.xml"),
                    "links_path": os.path.join(data_dir, "Solar.TEI", "solar-errs.xml")
                }
            )
        ]

    def _generate_examples(self, source_path, target_path, links_path):
        source_data = read_data(source_path)
        target_data = read_data(target_path)

        data = ET.parse(links_path)
        root = data.getroot()
        NAMESPACE = namespace(root)

        for idx_corr, corrected_sent in enumerate(root.iterfind(f"{NAMESPACE}linkGrp")):
            involved_sents = corrected_sent.attrib["corresp"].split(" ")
            assert len(involved_sents) <= 2, f"The following correction has more than two sentences involved, " \
                                             f"which the script cannot handle: {corrected_sent.attrib}"
            if len(involved_sents) == 2:
                # not always ordered <source> <target>, sometimes reversed
                if "t" in involved_sents[0]:
                    id_src_sent, id_tgt_sent = list(map(lambda _s: _s[1:], involved_sents[::-1]))
                else:
                    id_src_sent, id_tgt_sent = list(map(lambda _s: _s[1:], involved_sents))
            else:  # one sentence: only source or only target (usually a sentence marked as redundant or as missing)
                id_src_sent, id_tgt_sent = None, None
                if "t" in involved_sents[0]:
                    id_tgt_sent = involved_sents[0][1:]
                else:
                    id_src_sent = involved_sents[0][1:]

            id_doc, doc_title, is_manually_validated = None, None, False
            src_sent_data, tgt_sent_data = {}, {}
            assert id_src_sent is not None or id_tgt_sent is not None
            if id_src_sent is not None:
                src_sent_data = source_data[id_src_sent]
                id_doc = src_sent_data["id_doc"]
                doc_title = src_sent_data["doc_title"]
                is_manually_validated |= src_sent_data["is_manually_validated"]
            if id_tgt_sent is not None:
                tgt_sent_data = target_data[id_tgt_sent]
                id_doc = tgt_sent_data["id_doc"]
                doc_title = tgt_sent_data["doc_title"]
                is_manually_validated |= tgt_sent_data["is_manually_validated"]

            corr_data = []
            for token_info in corrected_sent.findall(f"{NAMESPACE}link"):
                connections = token_info.attrib["target"].split(" ")

                corrections = token_info.attrib["type"]
                if corrections == "ID":
                    continue

                src_inds, tgt_inds = [], []
                corr_types = []
                for curr_corr in corrections.split("|"):
                    corr_types.append(curr_corr)

                for curr_tok in connections:
                    # Token IDs have an index at the end, but it is 1-based; convert it to 0-based
                    idx_tok = int(curr_tok.split(".")[-1]) - 1
                    if "t" in curr_tok:  # target token
                        tgt_inds.append(idx_tok)
                    else:  # source token
                        src_inds.append(idx_tok)

                corr_data.append({"idx_src": src_inds, "idx_tgt": tgt_inds, "corr_types": corr_types})

            yield idx_corr, {
                "id_doc": id_doc[:-1],  # doc ID without the "s" or "t" info
                "doc_title": doc_title,
                "is_manually_validated": is_manually_validated,
                "idx_src_par": src_sent_data.get("idx_par", -1),
                "idx_src_sent": src_sent_data.get("idx_sent", -1),
                "id_src_tokens": src_sent_data.get("id_token", []),
                "src_tokens": src_sent_data.get("form", []),
                "idx_tgt_par": tgt_sent_data.get("idx_par", -1),
                "idx_tgt_sent": tgt_sent_data.get("idx_sent", -1),
                "id_tgt_tokens": tgt_sent_data.get("id_token", []),
                "tgt_tokens": tgt_sent_data.get("form", []),
                "corrections": corr_data
            }