Matej Klemen commited on
Commit
27d7114
1 Parent(s): 0539bdf

Add first version of dataset script

Browse files
Files changed (2) hide show
  1. README.md +113 -0
  2. wi_locness.py +159 -0
README.md CHANGED
@@ -1,3 +1,116 @@
1
  ---
2
  license: other
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: other
3
+ dataset_info:
4
+ - config_name: A
5
+ features:
6
+ - name: src_tokens
7
+ sequence: string
8
+ - name: tgt_tokens
9
+ sequence: string
10
+ - name: corrections
11
+ list:
12
+ - name: idx_src
13
+ sequence: int32
14
+ - name: idx_tgt
15
+ sequence: int32
16
+ - name: corr_type
17
+ dtype: string
18
+ splits:
19
+ - name: train
20
+ num_bytes: 3847179
21
+ num_examples: 10493
22
+ - name: validation
23
+ num_bytes: 392622
24
+ num_examples: 1037
25
+ download_size: 6120469
26
+ dataset_size: 4239801
27
+ - config_name: B
28
+ features:
29
+ - name: src_tokens
30
+ sequence: string
31
+ - name: tgt_tokens
32
+ sequence: string
33
+ - name: corrections
34
+ list:
35
+ - name: idx_src
36
+ sequence: int32
37
+ - name: idx_tgt
38
+ sequence: int32
39
+ - name: corr_type
40
+ dtype: string
41
+ splits:
42
+ - name: train
43
+ num_bytes: 4649805
44
+ num_examples: 13032
45
+ - name: validation
46
+ num_bytes: 468078
47
+ num_examples: 1290
48
+ download_size: 6120469
49
+ dataset_size: 5117883
50
+ - config_name: C
51
+ features:
52
+ - name: src_tokens
53
+ sequence: string
54
+ - name: tgt_tokens
55
+ sequence: string
56
+ - name: corrections
57
+ list:
58
+ - name: idx_src
59
+ sequence: int32
60
+ - name: idx_tgt
61
+ sequence: int32
62
+ - name: corr_type
63
+ dtype: string
64
+ splits:
65
+ - name: train
66
+ num_bytes: 3765831
67
+ num_examples: 10783
68
+ - name: validation
69
+ num_bytes: 390439
70
+ num_examples: 1069
71
+ download_size: 6120469
72
+ dataset_size: 4156270
73
+ - config_name: N
74
+ features:
75
+ - name: src_tokens
76
+ sequence: string
77
+ - name: tgt_tokens
78
+ sequence: string
79
+ - name: corrections
80
+ list:
81
+ - name: idx_src
82
+ sequence: int32
83
+ - name: idx_tgt
84
+ sequence: int32
85
+ - name: corr_type
86
+ dtype: string
87
+ splits:
88
+ - name: validation
89
+ num_bytes: 421656
90
+ num_examples: 988
91
+ download_size: 6120469
92
+ dataset_size: 421656
93
+ - config_name: all
94
+ features:
95
+ - name: src_tokens
96
+ sequence: string
97
+ - name: tgt_tokens
98
+ sequence: string
99
+ - name: corrections
100
+ list:
101
+ - name: idx_src
102
+ sequence: int32
103
+ - name: idx_tgt
104
+ sequence: int32
105
+ - name: corr_type
106
+ dtype: string
107
+ splits:
108
+ - name: train
109
+ num_bytes: 12262815
110
+ num_examples: 34308
111
+ - name: validation
112
+ num_bytes: 1672795
113
+ num_examples: 4384
114
+ download_size: 6120469
115
+ dataset_size: 13935610
116
  ---
wi_locness.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from copy import deepcopy
3
+
4
+ import datasets
5
+
6
+
7
+ _CITATION = """\
8
+ @article{wi_locness,
9
+ author = {Helen Yannakoudakis and Øistein E Andersen and Ardeshir Geranpayeh and Ted Briscoe and Diane Nicholls},
10
+ title = {Developing an automated writing placement system for ESL learners},
11
+ journal = {Applied Measurement in Education},
12
+ volume = {31},
13
+ number = {3},
14
+ pages = {251-267},
15
+ year = {2018},
16
+ doi = {10.1080/08957347.2018.1464447},
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ Write & Improve is an online web platform that assists non-native English students with their writing. Specifically, students from around the world submit letters, stories, articles and essays in response to various prompts, and the W&I system provides instant feedback. Since W&I went live in 2014, W&I annotators have manually annotated some of these submissions and assigned them a CEFR level.
22
+ The LOCNESS corpus consists of essays written by native English students. It was originally compiled by researchers at the Centre for English Corpus Linguistics at the University of Louvain. Since native English students also sometimes make mistakes, we asked the W&I annotators to annotate a subsection of LOCNESS so researchers can test the effectiveness of their systems on the full range of English levels and abilities.
23
+ """
24
+
25
+ _HOMEPAGE = "https://www.cl.cam.ac.uk/research/nl/bea2019st/"
26
+
27
+ _LICENSE = "other"
28
+
29
+ _URLS = {
30
+ "wi_locness": "https://www.cl.cam.ac.uk/research/nl/bea2019st/data/wi+locness_v2.1.bea19.tar.gz"
31
+ }
32
+
33
+
34
+ class WILocness(datasets.GeneratorBasedBuilder):
35
+ """Write&Improve and LOCNESS dataset for grammatical error correction. """
36
+
37
+ VERSION = datasets.Version("2.1.0")
38
+
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(name="A", version=VERSION, description="CEFR level A"),
41
+ datasets.BuilderConfig(name="B", version=VERSION, description="CEFR level B"),
42
+ datasets.BuilderConfig(name="C", version=VERSION, description="CEFR level C"),
43
+ datasets.BuilderConfig(name="N", version=VERSION, description="Native essays from LOCNESS"),
44
+ datasets.BuilderConfig(name="all", version=VERSION, description="All training and validation data combined")
45
+ ]
46
+
47
+ DEFAULT_CONFIG_NAME = "all"
48
+
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ "src_tokens": datasets.Sequence(datasets.Value("string")),
53
+ "tgt_tokens": datasets.Sequence(datasets.Value("string")),
54
+ "corrections": [{
55
+ "idx_src": datasets.Sequence(datasets.Value("int32")),
56
+ "idx_tgt": datasets.Sequence(datasets.Value("int32")),
57
+ "corr_type": datasets.Value("string")
58
+ }]
59
+ }
60
+ )
61
+
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=features,
65
+ homepage=_HOMEPAGE,
66
+ license=_LICENSE,
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ urls = _URLS["wi_locness"]
72
+ data_dir = dl_manager.download_and_extract(urls)
73
+ if self.config.name in {"A", "B", "C"}:
74
+ splits = [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN,
77
+ gen_kwargs={"file_path": os.path.join(data_dir, "wi+locness", "m2", f"{self.config.name}.train.gold.bea19.m2")},
78
+ ),
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.VALIDATION,
81
+ gen_kwargs={"file_path": os.path.join(data_dir, "wi+locness", "m2", f"{self.config.name}.dev.gold.bea19.m2")},
82
+ )
83
+ ]
84
+ elif self.config.name == "N":
85
+ splits = [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.VALIDATION,
88
+ gen_kwargs={"file_path": os.path.join(data_dir, "wi+locness", "m2", "N.dev.gold.bea19.m2")},
89
+ )
90
+ ]
91
+ else:
92
+ splits = [
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TRAIN,
95
+ gen_kwargs={"file_path": os.path.join(data_dir, "wi+locness", "m2", f"ABC.train.gold.bea19.m2")},
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION,
99
+ gen_kwargs={"file_path": os.path.join(data_dir, "wi+locness", "m2", f"ABCN.dev.gold.bea19.m2")},
100
+ )
101
+ ]
102
+
103
+ return splits
104
+
105
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
106
+ def _generate_examples(self, file_path):
107
+ skip_edits = {"noop", "UNK", "Um"}
108
+ with open(file_path, "r", encoding="utf-8") as f:
109
+ idx_ex = 0
110
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
111
+ for idx_line, _line in enumerate(f):
112
+ line = _line.strip()
113
+
114
+ if len(line) > 0:
115
+ prefix, remainder = line[0], line[2:]
116
+ if prefix == "S":
117
+ src_sent = remainder.split(" ")
118
+ tgt_sent = deepcopy(src_sent)
119
+
120
+ elif prefix == "A":
121
+ annotation_data = remainder.split("|||")
122
+ idx_start, idx_end = map(int, annotation_data[0].split(" "))
123
+ edit_type, edit_text = annotation_data[1], annotation_data[2]
124
+ if edit_type in skip_edits:
125
+ continue
126
+
127
+ formatted_correction = {
128
+ "idx_src": list(range(idx_start, idx_end)),
129
+ "idx_tgt": [],
130
+ "corr_type": edit_type
131
+ }
132
+ annotator_id = int(annotation_data[-1])
133
+ assert annotator_id == 0
134
+
135
+ removal = len(edit_text) == 0 or edit_text == "-NONE-"
136
+ if removal:
137
+ for idx_to_remove in range(idx_start, idx_end):
138
+ del tgt_sent[offset + idx_to_remove]
139
+ offset -= 1
140
+
141
+ else: # replacement/insertion
142
+ edit_tokens = edit_text.split(" ")
143
+ len_diff = len(edit_tokens) - (idx_end - idx_start)
144
+
145
+ formatted_correction["idx_tgt"] = list(
146
+ range(offset + idx_start, offset + idx_end + len_diff))
147
+ tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
148
+ offset += len_diff
149
+
150
+ corrections.append(formatted_correction)
151
+
152
+ else: # empty line, indicating end of example
153
+ yield idx_ex, {
154
+ "src_tokens": src_sent,
155
+ "tgt_tokens": tgt_sent,
156
+ "corrections": corrections
157
+ }
158
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
159
+ idx_ex += 1