|
import logging |
|
import os |
|
import re |
|
import xml.etree.ElementTree as ET |
|
from copy import deepcopy |
|
from itertools import groupby |
|
from typing import Optional |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@misc{solar3.0, |
|
title = {Developmental corpus {\v S}olar 3.0}, |
|
author = {Arhar Holdt, {\v S}pela and Rozman, Tadeja and Stritar Ku{\v c}uk, Mojca and Krek, Simon and Krap{\v s} Vodopivec, Irena and Stabej, Marko and Pori, Eva and Goli, Teja and Lavri{\v c}, Polona and Laskowski, Cyprian and Kocjan{\v c}i{\v c}, Polonca and Klemenc, Bojan and Krsnik, Luka and Kosem, Iztok}, |
|
url = {http://hdl.handle.net/11356/1589}, |
|
note = {Slovenian language resource repository {CLARIN}.{SI}}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Šolar is a developmental corpus of 5485 school texts (e.g., essays), written by students in Slovenian secondary schools |
|
(age 15-19) and pupils in the 7th-9th grade of primary school (13-15), with a small percentage also from the 6th grade. |
|
Part of the corpus (1516 texts) is annotated with teachers' corrections using a system of labels described in the |
|
document available at https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Smernice-za-oznacevanje-korpusa-Solar_V1.1.pdf (in Slovenian). |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1589" |
|
|
|
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" |
|
|
|
_URLS = { |
|
"solar_tei": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1589/Solar.TEI.zip" |
|
} |
|
|
|
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" |
|
|
|
|
|
def namespace(element): |
|
|
|
m = re.match(r'\{.*\}', element.tag) |
|
return m.group(0) if m else '' |
|
|
|
|
|
def resolve_element(tag_el, ne_tag: Optional[str] = "O"): |
|
if not tag_el.tag.endswith(("w", "pc", "seg")): |
|
return [] |
|
|
|
if tag_el.tag.endswith(("w", "pc")): |
|
form = tag_el.text.strip() |
|
lemma = tag_el.text.strip() if tag_el.tag.endswith("pc") else tag_el.attrib["lemma"] |
|
ana = tag_el.attrib["ana"] |
|
msd = tag_el.attrib["msd"] |
|
ret_ne_tag = ne_tag |
|
id_tag = tag_el.attrib[f"{XML_NAMESPACE}id"] |
|
space_after = False if "join" in tag_el.attrib and tag_el.attrib["join"]=="right" else True |
|
|
|
return [(id_tag, form, lemma, ana, msd, ret_ne_tag, space_after)] |
|
|
|
elif tag_el.tag.endswith("seg"): |
|
anns = [] |
|
ret_ne_tag = tag_el.attrib["subtype"].upper() |
|
for idx_child, curr_child in enumerate(tag_el): |
|
anns.extend(resolve_element(curr_child, ne_tag=f"B-{ret_ne_tag}" if idx_child == 0 else f"I-{ret_ne_tag}")) |
|
|
|
return anns |
|
|
|
|
|
def extract_sent_id(tok_id): |
|
|
|
_tok_id = tok_id[1:] if tok_id.startswith("#") else tok_id |
|
return ".".join(_tok_id.split(".")[: -1]) |
|
|
|
|
|
def find_involved_sents(correction_group_el): |
|
src_sent_ids = set() |
|
tgt_sent_ids = set() |
|
for _curr_corr in correction_group_el: |
|
sent_ids = list(map(lambda _tok_id: extract_sent_id(_tok_id), |
|
_curr_corr.attrib["target"].split(" "))) |
|
|
|
for _s_id in sent_ids: |
|
if "t" in _s_id: |
|
tgt_sent_ids.add(_s_id) |
|
else: |
|
src_sent_ids.add(_s_id) |
|
|
|
return sorted(list(src_sent_ids)), sorted(list(tgt_sent_ids)) |
|
|
|
|
|
def read_data(data_path): |
|
data = {} |
|
tree = ET.parse(data_path) |
|
root = tree.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
for curr_text in root.iterfind(f".//{NAMESPACE}div"): |
|
id_text = curr_text.attrib[f"{XML_NAMESPACE}id"] |
|
bibl_el = curr_text.find(f"{NAMESPACE}bibl") |
|
if bibl_el is None: |
|
text_title = "Unknown_title" |
|
logging.warning(f"The following text does not have a 'bibl' element: {curr_text.attrib}. " |
|
f"Setting title to 'Unknown_title'") |
|
is_manually_validated = False |
|
else: |
|
text_title = bibl_el.attrib["n"] |
|
note_el = bibl_el.find(f"{NAMESPACE}note") |
|
is_manually_validated = note_el.text == "DA" |
|
|
|
for idx_par, curr_par in enumerate(curr_text.iterfind(f".//{NAMESPACE}p")): |
|
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f".//{NAMESPACE}s")): |
|
id_sent = curr_sent.attrib[f"{XML_NAMESPACE}id"] |
|
ids, forms, lemmas, msds, nes, spaces_after = [], [], [], [], [], [] |
|
msds_jos, msds_ud = [], [] |
|
for curr_el in curr_sent: |
|
curr_annotations = resolve_element(curr_el) |
|
for curr_ann in curr_annotations: |
|
ids.append(curr_ann[0]) |
|
forms.append(curr_ann[1]) |
|
lemmas.append(curr_ann[2]) |
|
msds_jos.append(curr_ann[3]) |
|
msds_ud.append(curr_ann[4]) |
|
nes.append(curr_ann[5]) |
|
spaces_after.append(curr_ann[6]) |
|
|
|
data[id_sent] = { |
|
"id_doc": id_text, |
|
"doc_title": text_title, |
|
"idx_par": idx_par, |
|
"id_token": ids, "form": forms, "lemma": lemmas, "ana": msds_jos, "msd": msds_ud, "ne_tag": nes, "space_after": spaces_after, |
|
"is_manually_validated": is_manually_validated |
|
} |
|
|
|
return data |
|
|
|
|
|
class Solar3(datasets.GeneratorBasedBuilder): |
|
"""Šolar is a developmental corpus of school texts (e.g., essays), annotated with metadata and (partially) |
|
with teachers' corrections. """ |
|
|
|
VERSION = datasets.Version("3.0.2") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="sentence_level", version=VERSION, |
|
description="Annotations at sentence-level."), |
|
datasets.BuilderConfig(name="paragraph_level", version=VERSION, |
|
description="Annotations at paragraph-level."), |
|
datasets.BuilderConfig(name="document_level", version=VERSION, |
|
description="Annotations at document-level."), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "sentence_level" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id_doc": datasets.Value("string"), |
|
"doc_title": datasets.Value("string"), |
|
"is_manually_validated": datasets.Value("bool"), |
|
"src_tokens": datasets.Sequence(datasets.Value("string")), |
|
"src_ling_annotations": { |
|
"lemma": datasets.Sequence(datasets.Value("string")), |
|
"ana": datasets.Sequence(datasets.Value("string")), |
|
"msd": datasets.Sequence(datasets.Value("string")), |
|
"ne_tag": datasets.Sequence(datasets.Value("string")), |
|
"space_after": datasets.Sequence(datasets.Value("bool")) |
|
}, |
|
"tgt_tokens": datasets.Sequence(datasets.Value("string")), |
|
"tgt_ling_annotations": { |
|
"lemma": datasets.Sequence(datasets.Value("string")), |
|
"ana": datasets.Sequence(datasets.Value("string")), |
|
"msd": datasets.Sequence(datasets.Value("string")), |
|
"ne_tag": datasets.Sequence(datasets.Value("string")), |
|
"space_after": datasets.Sequence(datasets.Value("bool")) |
|
}, |
|
"corrections": [ |
|
{ |
|
"idx_src": datasets.Sequence(datasets.Value("int32")), |
|
"idx_tgt": datasets.Sequence(datasets.Value("int32")), |
|
"corr_types": datasets.Sequence(datasets.Value("string")) |
|
} |
|
] |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["solar_tei"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"source_path": os.path.join(data_dir, "Solar.TEI", "solar-orig.xml"), |
|
"target_path": os.path.join(data_dir, "Solar.TEI", "solar-corr.xml"), |
|
"links_path": os.path.join(data_dir, "Solar.TEI", "solar-errs.xml") |
|
} |
|
) |
|
] |
|
|
|
@staticmethod |
|
def generate_sentences(source_path, target_path, links_path): |
|
source_data = read_data(source_path) |
|
target_data = read_data(target_path) |
|
|
|
data = ET.parse(links_path) |
|
root = data.getroot() |
|
NAMESPACE = namespace(root) |
|
|
|
for idx_corr, corrected_sent in enumerate(root.iterfind(f"{NAMESPACE}linkGrp")): |
|
|
|
|
|
involved_src_sents, involved_tgt_sents = find_involved_sents(corrected_sent) |
|
|
|
id_doc, doc_title, is_manually_validated = None, None, False |
|
src_sent_data, tgt_sent_data = {}, {} |
|
tok2position = {} |
|
assert len(involved_src_sents) > 0 or len(involved_tgt_sents) > 0 |
|
|
|
if len(involved_src_sents) > 0: |
|
src_sent_data = deepcopy(source_data[involved_src_sents[0]]) |
|
if not isinstance(src_sent_data["idx_par"], list): |
|
src_sent_data["idx_par"] = [src_sent_data["idx_par"]] |
|
|
|
for src_sent_id in involved_src_sents[1:]: |
|
curr_sent_data = source_data[src_sent_id] |
|
|
|
src_sent_data["id_token"].extend(curr_sent_data["id_token"]) |
|
src_sent_data["idx_par"].append(curr_sent_data["idx_par"]) |
|
src_sent_data["form"].extend(curr_sent_data["form"]) |
|
src_sent_data["lemma"].extend(curr_sent_data["lemma"]) |
|
src_sent_data["ana"].extend(curr_sent_data["ana"]) |
|
src_sent_data["msd"].extend(curr_sent_data["msd"]) |
|
src_sent_data["ne_tag"].extend(curr_sent_data["ne_tag"]) |
|
src_sent_data["space_after"].extend(curr_sent_data["space_after"]) |
|
|
|
id_doc = src_sent_data["id_doc"] |
|
doc_title = src_sent_data["doc_title"] |
|
is_manually_validated |= src_sent_data["is_manually_validated"] |
|
for _pos, _tok in enumerate(src_sent_data["id_token"]): |
|
tok2position[_tok] = _pos |
|
|
|
if len(involved_tgt_sents) > 0: |
|
tgt_sent_data = deepcopy(target_data[involved_tgt_sents[0]]) |
|
if not isinstance(tgt_sent_data["idx_par"], list): |
|
tgt_sent_data["idx_par"] = [tgt_sent_data["idx_par"]] |
|
|
|
for tgt_sent_id in involved_tgt_sents[1:]: |
|
curr_sent_data = target_data[tgt_sent_id] |
|
|
|
tgt_sent_data["id_token"].extend(curr_sent_data["id_token"]) |
|
tgt_sent_data["idx_par"].append(curr_sent_data["idx_par"]) |
|
tgt_sent_data["form"].extend(curr_sent_data["form"]) |
|
tgt_sent_data["lemma"].extend(curr_sent_data["lemma"]) |
|
tgt_sent_data["ana"].extend(curr_sent_data["ana"]) |
|
tgt_sent_data["msd"].extend(curr_sent_data["msd"]) |
|
tgt_sent_data["ne_tag"].extend(curr_sent_data["ne_tag"]) |
|
tgt_sent_data["space_after"].extend(curr_sent_data["space_after"]) |
|
|
|
id_doc = tgt_sent_data["id_doc"] |
|
doc_title = tgt_sent_data["doc_title"] |
|
is_manually_validated |= tgt_sent_data["is_manually_validated"] |
|
for _pos, _tok in enumerate(tgt_sent_data["id_token"]): |
|
tok2position[_tok] = _pos |
|
|
|
corr_data = [] |
|
for token_info in corrected_sent.findall(f"{NAMESPACE}link"): |
|
connections = token_info.attrib["target"].split(" ") |
|
|
|
corrections = token_info.attrib["type"] |
|
if corrections == "ID": |
|
continue |
|
|
|
src_inds, tgt_inds = [], [] |
|
corr_types = [] |
|
for curr_corr in corrections.split("|"): |
|
corr_types.append(curr_corr) |
|
|
|
for curr_tok in connections: |
|
|
|
idx_tok = tok2position[curr_tok[1:]] |
|
if "t" in curr_tok: |
|
tgt_inds.append(idx_tok) |
|
else: |
|
src_inds.append(idx_tok) |
|
|
|
corr_data.append({"idx_src": src_inds, "idx_tgt": tgt_inds, "corr_types": corr_types}) |
|
|
|
yield idx_corr, { |
|
"id_doc": id_doc[:-1], |
|
"doc_title": doc_title, |
|
"is_manually_validated": is_manually_validated, |
|
"idx_src_par": src_sent_data.get("idx_par", []), |
|
"id_src_tokens": src_sent_data.get("id_token", []), |
|
"src_tokens": src_sent_data.get("form", []), |
|
"src_ling_annotations": { |
|
"lemma": src_sent_data.get("lemma", []), |
|
"ana": src_sent_data.get("ana", []), |
|
"msd": src_sent_data.get("msd", []), |
|
"ne_tag": src_sent_data.get("ne_tag", []), |
|
"space_after": src_sent_data.get("space_after", []) |
|
}, |
|
"idx_tgt_par": tgt_sent_data.get("idx_par", []), |
|
"id_tgt_tokens": tgt_sent_data.get("id_token", []), |
|
"tgt_tokens": tgt_sent_data.get("form", []), |
|
"tgt_ling_annotations": { |
|
"lemma": tgt_sent_data.get("lemma", []), |
|
"ana": tgt_sent_data.get("ana", []), |
|
"msd": tgt_sent_data.get("msd", []), |
|
"ne_tag": tgt_sent_data.get("ne_tag", []), |
|
"space_after": tgt_sent_data.get("space_after", []) |
|
}, |
|
"corrections": corr_data |
|
} |
|
|
|
@staticmethod |
|
def aggregate_pars(sent_level_data): |
|
|
|
uniq_idx_par = 0 |
|
for idx_doc, (curr_id, curr_group) in enumerate(groupby(sent_level_data, key=lambda tup: tup[1]["id_doc"])): |
|
curr_instances = list(map(lambda tup: tup[1], curr_group)) |
|
|
|
|
|
for idx_par, curr_par_group in groupby( |
|
curr_instances, |
|
key=lambda _inst: _inst["idx_src_par"][0] if len(_inst["idx_src_par"]) > 0 else |
|
_inst["idx_tgt_par"][0] |
|
): |
|
src_tokens, tgt_tokens, mapped_corrections = [], [], [] |
|
src_ling_anns = {"lemma": [], "ana": [], "msd": [], "ne_tag": [], "space_after": []} |
|
tgt_ling_anns = {"lemma": [], "ana": [], "msd": [], "ne_tag": [], "space_after": []} |
|
seen_src_tokens, seen_tgt_tokens = {}, {} |
|
src_base, tgt_base = 0, 0 |
|
prev_src_base, prev_tgt_base = 0, 0 |
|
|
|
doc_title, is_validated = None, None |
|
for curr_inst in curr_par_group: |
|
doc_title, is_validated = curr_inst["doc_title"], curr_inst["is_manually_validated"] |
|
|
|
id_src_toks, id_tgt_toks = curr_inst["id_src_tokens"], curr_inst["id_tgt_tokens"] |
|
curr_src_toks, curr_tgt_toks = curr_inst["src_tokens"], curr_inst["tgt_tokens"] |
|
curr_src_anns, curr_tgt_anns = curr_inst["src_ling_annotations"], curr_inst["tgt_ling_annotations"] |
|
curr_corrs = curr_inst["corrections"] |
|
|
|
num_added_src, num_added_tgt = 0, 0 |
|
for idx_position, (id_tok, tok) in enumerate(zip(id_src_toks, curr_src_toks)): |
|
if id_tok not in seen_src_tokens: |
|
src_tokens.append(tok) |
|
src_ling_anns["lemma"].append(curr_src_anns["lemma"][idx_position]) |
|
src_ling_anns["ana"].append(curr_src_anns["ana"][idx_position]) |
|
src_ling_anns["msd"].append(curr_src_anns["msd"][idx_position]) |
|
src_ling_anns["ne_tag"].append(curr_src_anns["ne_tag"][idx_position]) |
|
src_ling_anns["space_after"].append(curr_src_anns["space_after"][idx_position]) |
|
|
|
seen_src_tokens[id_tok] = tok |
|
num_added_src += 1 |
|
|
|
for idx_position, (id_tok, tok) in enumerate(zip(id_tgt_toks, curr_tgt_toks)): |
|
if id_tok not in seen_tgt_tokens: |
|
tgt_tokens.append(tok) |
|
tgt_ling_anns["lemma"].append(curr_tgt_anns["lemma"][idx_position]) |
|
tgt_ling_anns["ana"].append(curr_tgt_anns["ana"][idx_position]) |
|
tgt_ling_anns["msd"].append(curr_tgt_anns["msd"][idx_position]) |
|
tgt_ling_anns["ne_tag"].append(curr_tgt_anns["ne_tag"][idx_position]) |
|
tgt_ling_anns["space_after"].append(curr_tgt_anns["space_after"][idx_position]) |
|
|
|
seen_tgt_tokens[id_tok] = tok |
|
num_added_tgt += 1 |
|
|
|
if num_added_src == 0: |
|
src_base, prev_src_base = prev_src_base, src_base |
|
|
|
if num_added_tgt == 0: |
|
tgt_base, prev_tgt_base = prev_tgt_base, tgt_base |
|
|
|
for corr in curr_corrs: |
|
mapped_corrections.append({ |
|
"idx_src": list(map(lambda _i: src_base + _i, corr["idx_src"])), |
|
"idx_tgt": list(map(lambda _i: tgt_base + _i, corr["idx_tgt"])), |
|
"corr_types": corr["corr_types"] |
|
}) |
|
|
|
src_base += num_added_src |
|
tgt_base += num_added_tgt |
|
|
|
if num_added_src == 0: |
|
src_base, prev_src_base = prev_src_base, src_base |
|
|
|
if num_added_tgt == 0: |
|
tgt_base, prev_tgt_base = prev_tgt_base, tgt_base |
|
|
|
yield uniq_idx_par, { |
|
"id_doc": curr_id, |
|
"doc_title": doc_title, |
|
"is_manually_validated": is_validated, |
|
"src_tokens": src_tokens, |
|
"src_ling_annotations": src_ling_anns, |
|
"tgt_tokens": tgt_tokens, |
|
"tgt_ling_annotations": tgt_ling_anns, |
|
"corrections": mapped_corrections |
|
} |
|
uniq_idx_par += 1 |
|
|
|
@staticmethod |
|
def aggregate_docs(sent_level_data): |
|
|
|
for idx_doc, (curr_id, curr_group) in enumerate(groupby(sent_level_data, key=lambda tup: tup[1]["id_doc"])): |
|
curr_instances = map(lambda tup: tup[1], curr_group) |
|
|
|
src_tokens, tgt_tokens, mapped_corrections = [], [], [] |
|
src_ling_anns = {"lemma": [], "ana": [], "msd": [], "ne_tag": [], "space_after": []} |
|
tgt_ling_anns = {"lemma": [], "ana": [], "msd": [], "ne_tag": [], "space_after": []} |
|
seen_src_tokens, seen_tgt_tokens = {}, {} |
|
|
|
|
|
|
|
|
|
src_base, tgt_base = 0, 0 |
|
prev_src_base, prev_tgt_base = 0, 0 |
|
|
|
doc_title, is_validated = None, None |
|
for curr_inst in curr_instances: |
|
doc_title, is_validated = curr_inst["doc_title"], curr_inst["is_manually_validated"] |
|
|
|
id_src_toks, id_tgt_toks = curr_inst["id_src_tokens"], curr_inst["id_tgt_tokens"] |
|
curr_src_toks, curr_tgt_toks = curr_inst["src_tokens"], curr_inst["tgt_tokens"] |
|
curr_src_anns, curr_tgt_anns = curr_inst["src_ling_annotations"], curr_inst["tgt_ling_annotations"] |
|
curr_corrs = curr_inst["corrections"] |
|
|
|
num_added_src, num_added_tgt = 0, 0 |
|
for idx_position, (id_tok, tok) in enumerate(zip(id_src_toks, curr_src_toks)): |
|
if id_tok not in seen_src_tokens: |
|
src_tokens.append(tok) |
|
src_ling_anns["lemma"].append(curr_src_anns["lemma"][idx_position]) |
|
src_ling_anns["ana"].append(curr_src_anns["ana"][idx_position]) |
|
src_ling_anns["msd"].append(curr_src_anns["msd"][idx_position]) |
|
src_ling_anns["ne_tag"].append(curr_src_anns["ne_tag"][idx_position]) |
|
src_ling_anns["space_after"].append(curr_src_anns["space_after"][idx_position]) |
|
|
|
seen_src_tokens[id_tok] = tok |
|
num_added_src += 1 |
|
|
|
for idx_position, (id_tok, tok) in enumerate(zip(id_tgt_toks, curr_tgt_toks)): |
|
if id_tok not in seen_tgt_tokens: |
|
tgt_tokens.append(tok) |
|
tgt_ling_anns["lemma"].append(curr_tgt_anns["lemma"][idx_position]) |
|
tgt_ling_anns["ana"].append(curr_tgt_anns["ana"][idx_position]) |
|
tgt_ling_anns["msd"].append(curr_tgt_anns["msd"][idx_position]) |
|
tgt_ling_anns["ne_tag"].append(curr_tgt_anns["ne_tag"][idx_position]) |
|
tgt_ling_anns["space_after"].append(curr_tgt_anns["space_after"][idx_position]) |
|
|
|
seen_tgt_tokens[id_tok] = tok |
|
num_added_tgt += 1 |
|
|
|
if num_added_src == 0: |
|
src_base, prev_src_base = prev_src_base, src_base |
|
|
|
if num_added_tgt == 0: |
|
tgt_base, prev_tgt_base = prev_tgt_base, tgt_base |
|
|
|
for corr in curr_corrs: |
|
mapped_corrections.append({ |
|
"idx_src": list(map(lambda _i: src_base + _i, corr["idx_src"])), |
|
"idx_tgt": list(map(lambda _i: tgt_base + _i, corr["idx_tgt"])), |
|
"corr_types": corr["corr_types"] |
|
}) |
|
|
|
src_base += num_added_src |
|
tgt_base += num_added_tgt |
|
|
|
if num_added_src == 0: |
|
src_base, prev_src_base = prev_src_base, src_base |
|
|
|
if num_added_tgt == 0: |
|
tgt_base, prev_tgt_base = prev_tgt_base, tgt_base |
|
|
|
yield idx_doc, { |
|
"id_doc": curr_id, |
|
"doc_title": doc_title, |
|
"is_manually_validated": is_validated, |
|
"src_tokens": src_tokens, |
|
"src_ling_annotations": src_ling_anns, |
|
"tgt_tokens": tgt_tokens, |
|
"tgt_ling_annotations": tgt_ling_anns, |
|
"corrections": mapped_corrections |
|
} |
|
|
|
def _generate_examples(self, source_path, target_path, links_path): |
|
sent_level_data = list(Solar3.generate_sentences(source_path, target_path, links_path)) |
|
|
|
if self.config.name == "sentence_level": |
|
|
|
for i, instance in sent_level_data: |
|
yield i, {_k: _v for _k, _v in instance.items() if _k not in {"id_src_tokens", "id_tgt_tokens", |
|
"idx_src_par", "idx_tgt_par"}} |
|
elif self.config.name == "paragraph_level": |
|
yield from list(Solar3.aggregate_pars(sent_level_data)) |
|
else: |
|
yield from list(Solar3.aggregate_docs(sent_level_data)) |
|
|
|
|