Matej Klemen commited on
Commit
f550567
1 Parent(s): b3c11c1

Add first dataset script version

Browse files
Files changed (2) hide show
  1. README.md +26 -0
  2. falko_merlin.py +132 -0
README.md CHANGED
@@ -1,3 +1,29 @@
1
  ---
2
  license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: cc-by-sa-4.0
3
+ dataset_info:
4
+ features:
5
+ - name: src_tokens
6
+ sequence: string
7
+ - name: tgt_tokens
8
+ sequence: string
9
+ - name: corrections
10
+ list:
11
+ - name: idx_src
12
+ sequence: int32
13
+ - name: idx_tgt
14
+ sequence: int32
15
+ - name: corr_type
16
+ dtype: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 6981243
20
+ num_examples: 19237
21
+ - name: validation
22
+ num_bytes: 902510
23
+ num_examples: 2503
24
+ - name: test
25
+ num_bytes: 836757
26
+ num_examples: 2337
27
+ download_size: 85667586
28
+ dataset_size: 8720510
29
  ---
falko_merlin.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import csv
3
+ import json
4
+ import os
5
+ from copy import deepcopy
6
+
7
+ import datasets
8
+
9
+
10
+ _CITATION = """\
11
+ @InProceedings{boyd2018wnut,
12
+ author = {Adriane Boyd},
13
+ title = {Using Wikipedia Edits in Low Resource Grammatical Error Correction},
14
+ booktitle = {Proceedings of the 4th Workshop on Noisy User-generated Text},
15
+ publisher = {Association for Computational Linguistics},
16
+ year = {2018},
17
+ url = {http://aclweb.org/anthology/W18-6111}
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ Falko-MERLIN is a grammatical error correction corpus consisting of essays and exams.
23
+ """
24
+
25
+ _HOMEPAGE = "https://github.com/adrianeboyd/boyd-wnut2018"
26
+
27
+ _LICENSE = "Creative Commons Attribution Share Alike 4.0 International"
28
+
29
+ _URLS = {
30
+ "falko_merlin_wikipedia": "http://www.sfs.uni-tuebingen.de/~adriane/download/wnut2018/data.tar.gz"
31
+ }
32
+
33
+
34
+ class FalkoMERLIN(datasets.GeneratorBasedBuilder):
35
+
36
+ VERSION = datasets.Version("1.0.0")
37
+
38
+ def _info(self):
39
+ features = datasets.Features(
40
+ {
41
+ "src_tokens": datasets.Sequence(datasets.Value("string")),
42
+ "tgt_tokens": datasets.Sequence(datasets.Value("string")),
43
+ "corrections": [{
44
+ "idx_src": datasets.Sequence(datasets.Value("int32")),
45
+ "idx_tgt": datasets.Sequence(datasets.Value("int32")),
46
+ "corr_type": datasets.Value("string")
47
+ }]
48
+ }
49
+ )
50
+
51
+ return datasets.DatasetInfo(
52
+ description=_DESCRIPTION,
53
+ features=features,
54
+ homepage=_HOMEPAGE,
55
+ license=_LICENSE,
56
+ citation=_CITATION,
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+ urls = _URLS["falko_merlin_wikipedia"]
61
+ data_dir = dl_manager.download_and_extract(urls)
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TRAIN,
65
+ gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-train.m2")},
66
+ ),
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.VALIDATION,
69
+ gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-dev.m2")},
70
+ ),
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TEST,
73
+ gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-test.m2")}
74
+ )
75
+ ]
76
+
77
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
78
+ def _generate_examples(self, file_path):
79
+ skip_edits = {"noop", "UNK", "Um"}
80
+ with open(file_path, "r", encoding="utf-8") as f:
81
+ idx_ex = 0
82
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
83
+ for idx_line, _line in enumerate(f):
84
+ line = _line.strip()
85
+
86
+ if len(line) > 0:
87
+ prefix, remainder = line[0], line[2:]
88
+ if prefix == "S":
89
+ src_sent = remainder.split(" ")
90
+ tgt_sent = deepcopy(src_sent)
91
+
92
+ elif prefix == "A":
93
+ annotation_data = remainder.split("|||")
94
+ idx_start, idx_end = map(int, annotation_data[0].split(" "))
95
+ edit_type, edit_text = annotation_data[1], annotation_data[2]
96
+ if edit_type in skip_edits:
97
+ continue
98
+
99
+ formatted_correction = {
100
+ "idx_src": list(range(idx_start, idx_end)),
101
+ "idx_tgt": [],
102
+ "corr_type": edit_type
103
+ }
104
+ annotator_id = int(annotation_data[-1])
105
+ assert annotator_id == 0
106
+
107
+ removal = len(edit_text) == 0 or edit_text == "-NONE-"
108
+ if removal:
109
+ for idx_to_remove in range(idx_start, idx_end):
110
+ del tgt_sent[offset + idx_to_remove]
111
+ offset -= 1
112
+
113
+ else: # replacement/insertion
114
+ edit_tokens = edit_text.split(" ")
115
+ len_diff = len(edit_tokens) - (idx_end - idx_start)
116
+
117
+ formatted_correction["idx_tgt"] = list(
118
+ range(offset + idx_start, offset + idx_end + len_diff))
119
+ tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
120
+ offset += len_diff
121
+
122
+ corrections.append(formatted_correction)
123
+
124
+ else: # empty line, indicating end of example
125
+ yield idx_ex, {
126
+ "src_tokens": src_sent,
127
+ "tgt_tokens": tgt_sent,
128
+ "corrections": corrections
129
+ }
130
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
131
+ idx_ex += 1
132
+