File size: 5,646 Bytes
75a8519 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import os
from copy import deepcopy
import datasets
_CITATION = """\
@article{naplava2019wnut,
title={Grammatical Error Correction in Low-Resource Scenarios},
author={N{\'a}plava, Jakub and Straka, Milan},
journal={arXiv preprint arXiv:1910.00353},
year={2019}
}
"""
_DESCRIPTION = """\
AKCES-GEC is a grammar error correction corpus for Czech generated from a subset of AKCES resources.
"""
_HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3057"
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
_URLS = {
"akces_gec": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3057/AKCES-GEC.zip"
}
class AkcesGEC(datasets.GeneratorBasedBuilder):
"""AKCES-GEC dataset for grammatical error correction. """
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="ann0", version=VERSION, description="Use annotations from annotator#0"),
datasets.BuilderConfig(name="ann1", version=VERSION, description="Use annotations from annotator#1")
]
DEFAULT_CONFIG_NAME = "ann0"
def _info(self):
features = datasets.Features(
{
"src_tokens": datasets.Sequence(datasets.Value("string")),
"tgt_tokens": datasets.Sequence(datasets.Value("string")),
"corrections": [{
"idx_src": datasets.Sequence(datasets.Value("int32")),
"idx_tgt": datasets.Sequence(datasets.Value("int32")),
"corr_types": datasets.Sequence(datasets.Value("string"))
}]
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS["akces_gec"]
data_dir = dl_manager.download_and_extract(urls)
consider_annotator = 0 if self.config.name == "ann0" else 1
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": os.path.join(data_dir, "train", "train.all.m2"), "annotator": consider_annotator},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"file_path": os.path.join(data_dir, "dev", "dev.all.m2"), "annotator": consider_annotator},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"file_path": os.path.join(data_dir, "test", "test.all.m2"), "annotator": consider_annotator},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file_path, annotator=0):
skip_edits = {"noop", "UNK", "Um"}
with open(file_path, "r", encoding="utf-8") as f:
idx_ex = 0
src_sent, tgt_sent, corrections, offset = None, None, [], 0
for idx_line, _line in enumerate(f):
line = _line.strip()
if len(line) > 0:
prefix, remainder = line[0], line[2:]
if prefix == "S":
src_sent = remainder.split(" ")
tgt_sent = deepcopy(src_sent)
elif prefix == "A":
annotation_data = remainder.split("|||")
idx_start, idx_end = map(int, annotation_data[0].split(" "))
edit_types, edit_text = annotation_data[1], annotation_data[2]
edit_types = edit_types.split(",")
if len(set(edit_types) & skip_edits) > 0:
continue
formatted_correction = {
"idx_src": list(range(idx_start, idx_end)),
"idx_tgt": [],
"corr_types": edit_types
}
annotator_id = int(annotation_data[-1])
if annotator_id != annotator:
continue
removal = len(edit_text) == 0 or edit_text == "-NONE-"
if removal:
for idx_to_remove in range(idx_start, idx_end):
del tgt_sent[offset + idx_to_remove]
offset -= 1
else: # replacement/insertion
edit_tokens = edit_text.split(" ")
len_diff = len(edit_tokens) - (idx_end - idx_start)
formatted_correction["idx_tgt"] = list(
range(offset + idx_start, offset + idx_end + len_diff))
tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
offset += len_diff
corrections.append(formatted_correction)
else: # empty line, indicating end of example
if src_sent is None and tgt_sent is None: # multiple empty lines
continue
yield idx_ex, {
"src_tokens": src_sent,
"tgt_tokens": tgt_sent,
"corrections": corrections
}
src_sent, tgt_sent, corrections, offset = None, None, [], 0
idx_ex += 1 |