Datasets:
cjvt
/

Matej Klemen commited on
Commit
5b47bfd
1 Parent(s): de3dd8d

Add first version of Solar 3.0 loading script

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. solar3.py +239 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\u0160olar is a developmental corpus of 5485 school texts (e.g., essays), written by students in Slovenian secondary schools \n(age 15-19) and pupils in the 7th-9th grade of primary school (13-15), with a small percentage also from the 6th grade. \nPart of the corpus (2,094 texts) is annotated with teachers' corrections using a system of labels described in the \ndocument available at https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Smernice-za-oznacevanje-korpusa-Solar_V1.1.pdf (in Slovenian).\n", "citation": "@misc{solar3.0,\n title = {Developmental corpus {\u000b S}olar 3.0},\n author = {Arhar Holdt, {\u000b S}pela and Rozman, Tadeja and Stritar Ku{\u000b c}uk, Mojca and Krek, Simon and Krap{\u000b s} Vodopivec, Irena and Stabej, Marko and Pori, Eva and Goli, Teja and Lavri{\u000b c}, Polona and Laskowski, Cyprian and Kocjan{\u000b c}i{\u000b c}, Polonca and Klemenc, Bojan and Krsnik, Luka and Kosem, Iztok},\n url = {http://hdl.handle.net/11356/1589},\n note = {Slovenian language resource repository {CLARIN}.{SI}},\n year = {2022}\n}\n", "homepage": "http://hdl.handle.net/11356/1589", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"id_doc": {"dtype": "string", "id": null, "_type": "Value"}, "doc_title": {"dtype": "string", "id": null, "_type": "Value"}, "is_manually_validated": {"dtype": "bool", "id": null, "_type": "Value"}, "idx_src_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_src_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_src_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "src_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "idx_tgt_par": {"dtype": "int32", "id": null, "_type": "Value"}, "idx_tgt_sent": {"dtype": "int32", "id": null, "_type": "Value"}, "id_tgt_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tgt_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "corrections": [{"idx_src": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "idx_tgt": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "corr_types": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "solar3", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 122284807, "num_examples": 125867, "dataset_name": "solar3"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Solar.TEI.zip": {"num_bytes": 99287852, "checksum": "188945c90c663cc34c77c6aefd40357b60b88436b3d9fd53f24304c927ac1cbf"}}, "download_size": 99287852, "post_processing_size": null, "dataset_size": 122284807, "size_in_bytes": 221572659}}
solar3.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import re
4
+ import xml.etree.ElementTree as ET
5
+ from typing import Optional
6
+
7
+ import datasets
8
+
9
+ _CITATION = """\
10
+ @misc{solar3.0,
11
+ title = {Developmental corpus {\v S}olar 3.0},
12
+ author = {Arhar Holdt, {\v S}pela and Rozman, Tadeja and Stritar Ku{\v c}uk, Mojca and Krek, Simon and Krap{\v s} Vodopivec, Irena and Stabej, Marko and Pori, Eva and Goli, Teja and Lavri{\v c}, Polona and Laskowski, Cyprian and Kocjan{\v c}i{\v c}, Polonca and Klemenc, Bojan and Krsnik, Luka and Kosem, Iztok},
13
+ url = {http://hdl.handle.net/11356/1589},
14
+ note = {Slovenian language resource repository {CLARIN}.{SI}},
15
+ year = {2022}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ Šolar is a developmental corpus of 5485 school texts (e.g., essays), written by students in Slovenian secondary schools
21
+ (age 15-19) and pupils in the 7th-9th grade of primary school (13-15), with a small percentage also from the 6th grade.
22
+ Part of the corpus (2,094 texts) is annotated with teachers' corrections using a system of labels described in the
23
+ document available at https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Smernice-za-oznacevanje-korpusa-Solar_V1.1.pdf (in Slovenian).
24
+ """
25
+
26
+ _HOMEPAGE = "http://hdl.handle.net/11356/1589"
27
+
28
+ _LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
29
+
30
+ _URLS = {
31
+ "solar_tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Solar.TEI.zip"
32
+ }
33
+
34
+ XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
35
+
36
+
37
+ def namespace(element):
38
+ # https://stackoverflow.com/a/12946675
39
+ m = re.match(r'\{.*\}', element.tag)
40
+ return m.group(0) if m else ''
41
+
42
+
43
+ def resolve_element(tag_el, ne_tag: Optional[str] = "O"):
44
+ if not tag_el.tag.endswith(("w", "pc", "seg")):
45
+ logging.info(f"Skipping tag {tag_el.tag}")
46
+ return []
47
+
48
+ if tag_el.tag.endswith(("w", "pc")):
49
+ form = tag_el.text.strip()
50
+ lemma = tag_el.text.strip() if tag_el.tag.endswith("pc") else tag_el.attrib["lemma"]
51
+ msd = tag_el.attrib["ana"]
52
+ ret_ne_tag = ne_tag
53
+ id_tag = tag_el.attrib[f"{XML_NAMESPACE}id"]
54
+
55
+ return [(id_tag, form, lemma, msd, ret_ne_tag)]
56
+ # Named entities: words and punctuation nested directly below current element
57
+ elif tag_el.tag.endswith("seg"):
58
+ anns = []
59
+ ret_ne_tag = tag_el.attrib["subtype"].upper()
60
+ for curr_child in tag_el:
61
+ anns.extend(resolve_element(curr_child, ne_tag=ret_ne_tag))
62
+
63
+ return anns
64
+
65
+
66
+ def read_data(data_path):
67
+ data = {} # ID_sent -> sentence_metadata
68
+ tree = ET.parse(data_path)
69
+ root = tree.getroot()
70
+ NAMESPACE = namespace(root)
71
+
72
+ for curr_text in root.iterfind(f".//{NAMESPACE}div"):
73
+ id_text = curr_text.attrib[f"{XML_NAMESPACE}id"]
74
+ bibl_el = curr_text.find(f"{NAMESPACE}bibl")
75
+ if bibl_el is None:
76
+ text_title = "Unknown_title"
77
+ logging.warning(f"The following text does not have a 'bibl' element: {curr_text.attrib}. "
78
+ f"Setting title to 'Unknown_title'")
79
+ is_manually_validated = False
80
+ else:
81
+ text_title = bibl_el.attrib["n"]
82
+ note_el = bibl_el.find(f"{NAMESPACE}note")
83
+ is_manually_validated = note_el.text == "DA"
84
+
85
+ for idx_par, curr_par in enumerate(curr_text.iterfind(f".//{NAMESPACE}p")):
86
+ for idx_sent, curr_sent in enumerate(curr_par.iterfind(f".//{NAMESPACE}s")):
87
+ id_sent = curr_sent.attrib[f"{XML_NAMESPACE}id"]
88
+ ids, forms, lemmas, msds, nes = [], [], [], [], []
89
+ for curr_el in curr_sent:
90
+ curr_annotations = resolve_element(curr_el)
91
+ for curr_ann in curr_annotations:
92
+ ids.append(curr_ann[0])
93
+ forms.append(curr_ann[1])
94
+ lemmas.append(curr_ann[2])
95
+ msds.append(curr_ann[3])
96
+ nes.append(curr_ann[4])
97
+
98
+ data[id_sent] = {
99
+ "id_doc": id_text,
100
+ "doc_title": text_title,
101
+ "idx_par": idx_par,
102
+ "idx_sent": idx_sent,
103
+ "id_token": ids, "form": forms, "lemma": lemmas, "msd": msds, "ne_tag": nes,
104
+ "is_manually_validated": is_manually_validated
105
+ }
106
+
107
+ return data
108
+
109
+
110
+ class Solar3(datasets.GeneratorBasedBuilder):
111
+ """Šolar is a developmental corpus of school texts (e.g., essays), annotated with metadata and (partially)
112
+ with teachers' corrections. """
113
+
114
+ VERSION = datasets.Version("1.0.0")
115
+
116
+ def _info(self):
117
+ features = datasets.Features(
118
+ {
119
+ "id_doc": datasets.Value("string"),
120
+ "doc_title": datasets.Value("string"),
121
+ "is_manually_validated": datasets.Value("bool"),
122
+ "idx_src_par": datasets.Value("int32"),
123
+ "idx_src_sent": datasets.Value("int32"),
124
+ "id_src_tokens": datasets.Sequence(datasets.Value("string")),
125
+ "src_tokens": datasets.Sequence(datasets.Value("string")),
126
+ "idx_tgt_par": datasets.Value("int32"),
127
+ "idx_tgt_sent": datasets.Value("int32"),
128
+ "id_tgt_tokens": datasets.Sequence(datasets.Value("string")),
129
+ "tgt_tokens": datasets.Sequence(datasets.Value("string")),
130
+ "corrections": [
131
+ {
132
+ "idx_src": datasets.Sequence(datasets.Value("int32")),
133
+ "idx_tgt": datasets.Sequence(datasets.Value("int32")),
134
+ "corr_types": datasets.Sequence(datasets.Value("string"))
135
+ }
136
+ ]
137
+ }
138
+ )
139
+
140
+ return datasets.DatasetInfo(
141
+ description=_DESCRIPTION,
142
+ features=features,
143
+ homepage=_HOMEPAGE,
144
+ license=_LICENSE,
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager):
149
+ urls = _URLS["solar_tei"]
150
+ data_dir = dl_manager.download_and_extract(urls)
151
+
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TRAIN,
155
+ # These kwargs will be passed to _generate_examples
156
+ gen_kwargs={
157
+ "source_path": os.path.join(data_dir, "Solar.TEI", "solar-orig.xml"),
158
+ "target_path": os.path.join(data_dir, "Solar.TEI", "solar-corr.xml"),
159
+ "links_path": os.path.join(data_dir, "Solar.TEI", "solar-errs.xml")
160
+ }
161
+ )
162
+ ]
163
+
164
+ def _generate_examples(self, source_path, target_path, links_path):
165
+ source_data = read_data(source_path)
166
+ target_data = read_data(target_path)
167
+
168
+ data = ET.parse(links_path)
169
+ root = data.getroot()
170
+ NAMESPACE = namespace(root)
171
+
172
+ for idx_corr, corrected_sent in enumerate(root.iterfind(f"{NAMESPACE}linkGrp")):
173
+ involved_sents = corrected_sent.attrib["corresp"].split(" ")
174
+ assert len(involved_sents) <= 2, f"The following correction has more than two sentences involved, " \
175
+ f"which the script cannot handle: {corrected_sent.attrib}"
176
+ if len(involved_sents) == 2:
177
+ # not always ordered <source> <target>, sometimes reversed
178
+ if "t" in involved_sents[0]:
179
+ id_src_sent, id_tgt_sent = list(map(lambda _s: _s[1:], involved_sents[::-1]))
180
+ else:
181
+ id_src_sent, id_tgt_sent = list(map(lambda _s: _s[1:], involved_sents))
182
+ else: # one sentence: only source or only target (usually a sentence marked as redundant or as missing)
183
+ id_src_sent, id_tgt_sent = None, None
184
+ if "t" in involved_sents[0]:
185
+ id_tgt_sent = involved_sents[0][1:]
186
+ else:
187
+ id_src_sent = involved_sents[0][1:]
188
+
189
+ id_doc, doc_title, is_manually_validated = None, None, False
190
+ src_sent_data, tgt_sent_data = {}, {}
191
+ assert id_src_sent is not None or id_tgt_sent is not None
192
+ if id_src_sent is not None:
193
+ src_sent_data = source_data[id_src_sent]
194
+ id_doc = src_sent_data["id_doc"]
195
+ doc_title = src_sent_data["doc_title"]
196
+ is_manually_validated |= src_sent_data["is_manually_validated"]
197
+ if id_tgt_sent is not None:
198
+ tgt_sent_data = target_data[id_tgt_sent]
199
+ id_doc = tgt_sent_data["id_doc"]
200
+ doc_title = tgt_sent_data["doc_title"]
201
+ is_manually_validated |= tgt_sent_data["is_manually_validated"]
202
+
203
+ corr_data = []
204
+ for token_info in corrected_sent.findall(f"{NAMESPACE}link"):
205
+ connections = token_info.attrib["target"].split(" ")
206
+
207
+ corrections = token_info.attrib["type"]
208
+ if corrections == "ID":
209
+ continue
210
+
211
+ src_inds, tgt_inds = [], []
212
+ corr_types = []
213
+ for curr_corr in corrections.split("|"):
214
+ corr_types.append(curr_corr)
215
+
216
+ for curr_tok in connections:
217
+ # Token IDs have an index at the end, but it is 1-based; convert it to 0-based
218
+ idx_tok = int(curr_tok.split(".")[-1]) - 1
219
+ if "t" in curr_tok: # target token
220
+ tgt_inds.append(idx_tok)
221
+ else: # source token
222
+ src_inds.append(idx_tok)
223
+
224
+ corr_data.append({"idx_src": src_inds, "idx_tgt": tgt_inds, "corr_types": corr_types})
225
+
226
+ yield idx_corr, {
227
+ "id_doc": id_doc,
228
+ "doc_title": doc_title,
229
+ "is_manually_validated": is_manually_validated,
230
+ "idx_src_par": src_sent_data.get("idx_par", -1),
231
+ "idx_src_sent": src_sent_data.get("idx_sent", -1),
232
+ "id_src_tokens": src_sent_data.get("id_token", []),
233
+ "src_tokens": src_sent_data.get("form", []),
234
+ "idx_tgt_par": tgt_sent_data.get("idx_par", -1),
235
+ "idx_tgt_sent": tgt_sent_data.get("idx_sent", -1),
236
+ "id_tgt_tokens": tgt_sent_data.get("id_token", []),
237
+ "tgt_tokens": tgt_sent_data.get("form", []),
238
+ "corrections": corr_data
239
+ }