Matej Klemen commited on
Commit
e4b0c1d
1 Parent(s): 9d29a6c

Add first version of dataset script

Browse files
Files changed (2) hide show
  1. README.md +26 -0
  2. clc_fce.py +134 -0
README.md CHANGED
@@ -1,3 +1,29 @@
1
  ---
2
  license: other
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: other
3
+ dataset_info:
4
+ features:
5
+ - name: src_tokens
6
+ sequence: string
7
+ - name: tgt_tokens
8
+ sequence: string
9
+ - name: corrections
10
+ list:
11
+ - name: idx_src
12
+ sequence: int32
13
+ - name: idx_tgt
14
+ sequence: int32
15
+ - name: corr_type
16
+ dtype: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 8658209
20
+ num_examples: 28350
21
+ - name: validation
22
+ num_bytes: 668073
23
+ num_examples: 2191
24
+ - name: test
25
+ num_bytes: 823872
26
+ num_examples: 2695
27
+ download_size: 2774021
28
+ dataset_size: 10150154
29
  ---
clc_fce.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from copy import deepcopy
3
+
4
+ import datasets
5
+
6
+
7
+ _CITATION = """\
8
+ @inproceedings{yannakoudakis-etal-2011-new,
9
+ title = "A New Dataset and Method for Automatically Grading {ESOL} Texts",
10
+ author = "Yannakoudakis, Helen and
11
+ Briscoe, Ted and
12
+ Medlock, Ben",
13
+ booktitle = "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
14
+ month = jun,
15
+ year = "2011",
16
+ url = "https://aclanthology.org/P11-1019",
17
+ pages = "180--189",
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ The CLC FCE Dataset is a set of 1,244 exam scripts written by candidates sitting the Cambridge ESOL First Certificate
23
+ in English (FCE) examination in 2000 and 2001. The dataset exposes the sentence-level pre-tokenized M2 version, totaling
24
+ 33236 sentences.
25
+ """
26
+
27
+ _HOMEPAGE = ""
28
+
29
+ _LICENSE = "Custom, allowed for non-commercial research and educational purposes"
30
+
31
+ _URLS = {
32
+ "clc_fce_bea19": "https://www.cl.cam.ac.uk/research/nl/bea2019st/data/fce_v2.1.bea19.tar.gz"
33
+ }
34
+
35
+
36
+ class CLCFCE(datasets.GeneratorBasedBuilder):
37
+ """Cambridge Learner Corpus: First Certificate in English"""
38
+
39
+ VERSION = datasets.Version("2.1.0")
40
+
41
+ def _info(self):
42
+ features = datasets.Features(
43
+ {
44
+ "src_tokens": datasets.Sequence(datasets.Value("string")),
45
+ "tgt_tokens": datasets.Sequence(datasets.Value("string")),
46
+ "corrections": [{
47
+ "idx_src": datasets.Sequence(datasets.Value("int32")),
48
+ "idx_tgt": datasets.Sequence(datasets.Value("int32")),
49
+ "corr_type": datasets.Value("string")
50
+ }]
51
+ }
52
+ )
53
+
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=features,
57
+ homepage=_HOMEPAGE,
58
+ license=_LICENSE,
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ urls = _URLS["clc_fce_bea19"]
64
+ data_dir = dl_manager.download_and_extract(urls)
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ gen_kwargs={"file_path": os.path.join(data_dir, "fce", "m2", "fce.train.gold.bea19.m2")},
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={"file_path": os.path.join(data_dir, "fce", "m2", "fce.dev.gold.bea19.m2")},
73
+ ),
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TEST,
76
+ gen_kwargs={"file_path": os.path.join(data_dir, "fce", "m2", "fce.test.gold.bea19.m2")},
77
+ ),
78
+ ]
79
+
80
+ def _generate_examples(self, file_path):
81
+ skip_edits = {"noop", "UNK", "Um"}
82
+ with open(file_path, "r", encoding="utf-8") as f:
83
+ idx_ex = 0
84
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
85
+ for idx_line, _line in enumerate(f):
86
+ line = _line.strip()
87
+
88
+ if len(line) > 0:
89
+ prefix, remainder = line[0], line[2:]
90
+ if prefix == "S":
91
+ src_sent = remainder.split(" ")
92
+ tgt_sent = deepcopy(src_sent)
93
+
94
+ elif prefix == "A":
95
+ annotation_data = remainder.split("|||")
96
+ idx_start, idx_end = map(int, annotation_data[0].split(" "))
97
+ edit_type, edit_text = annotation_data[1], annotation_data[2]
98
+ if edit_type in skip_edits:
99
+ continue
100
+
101
+ formatted_correction = {
102
+ "idx_src": list(range(idx_start, idx_end)),
103
+ "idx_tgt": [],
104
+ "corr_type": edit_type
105
+ }
106
+ annotator_id = int(annotation_data[-1])
107
+ assert annotator_id == 0
108
+
109
+ removal = len(edit_text) == 0 or edit_text == "-NONE-"
110
+ if removal:
111
+ for idx_to_remove in range(idx_start, idx_end):
112
+ del tgt_sent[offset + idx_to_remove]
113
+ offset -= 1
114
+
115
+ else: # replacement/insertion
116
+ edit_tokens = edit_text.split(" ")
117
+ len_diff = len(edit_tokens) - (idx_end - idx_start)
118
+
119
+ formatted_correction["idx_tgt"] = list(
120
+ range(offset + idx_start, offset + idx_end + len_diff))
121
+ tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
122
+ offset += len_diff
123
+
124
+ corrections.append(formatted_correction)
125
+
126
+ else: # empty line, indicating end of example
127
+ yield idx_ex, {
128
+ "src_tokens": src_sent,
129
+ "tgt_tokens": tgt_sent,
130
+ "corrections": corrections
131
+ }
132
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
133
+ idx_ex += 1
134
+