Matej Klemen commited on
Commit
04bb6b4
1 Parent(s): 8b5c5ce

Add first version of the dataset script

Browse files
Files changed (2) hide show
  1. README.md +41 -2
  2. nucle.py +145 -0
README.md CHANGED
@@ -1,8 +1,47 @@
1
  ---
2
  license: other
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
4
 
5
  **Important**: This is only a script for loading the data, but the data itself is private. The script will only work in case you have access to the data, which you may request for non-commercial purposes [here](https://sterling8.d2.comp.nus.edu.sg/nucle_download/nucle.php).
6
  ```python
7
- data = datasets.load_dataset("matejklemen/nucle", data_dir=<dir-of-private-data>, ignore_verifications=True)"
8
- ```
 
 
 
1
  ---
2
  license: other
3
+ dataset_info:
4
+ - config_name: public
5
+ features:
6
+ - name: src_tokens
7
+ sequence: string
8
+ - name: tgt_tokens
9
+ sequence: string
10
+ - name: corrections
11
+ list:
12
+ - name: idx_src
13
+ sequence: int32
14
+ - name: idx_tgt
15
+ sequence: int32
16
+ - name: corr_type
17
+ dtype: string
18
+ splits:
19
+ - name: train
20
+ download_size: 0
21
+ dataset_size: 0
22
+ - config_name: private
23
+ features:
24
+ - name: src_tokens
25
+ sequence: string
26
+ - name: tgt_tokens
27
+ sequence: string
28
+ - name: corrections
29
+ list:
30
+ - name: idx_src
31
+ sequence: int32
32
+ - name: idx_tgt
33
+ sequence: int32
34
+ - name: corr_type
35
+ dtype: string
36
+ splits:
37
+ - name: train
38
+ download_size: 0
39
+ dataset_size: 0
40
  ---
41
 
42
  **Important**: This is only a script for loading the data, but the data itself is private. The script will only work in case you have access to the data, which you may request for non-commercial purposes [here](https://sterling8.d2.comp.nus.edu.sg/nucle_download/nucle.php).
43
  ```python
44
+ data = datasets.load_dataset("matejklemen/nucle", "private", data_dir=<dir-of-private-data>, ignore_verifications=True)"
45
+ ```
46
+ The `ignore_verifications=True` is important as the datasets library initially builds validation statistics that it verifies against,
47
+ and these cannot be correctly computed when the data is not public.
nucle.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from copy import deepcopy
4
+
5
+ import datasets
6
+
7
+
8
+ _CITATION = """\
9
+ @inproceedings{dahlmeier-etal-2013-building,
10
+ title = "Building a Large Annotated Corpus of Learner {E}nglish: The {NUS} Corpus of Learner {E}nglish",
11
+ author = "Dahlmeier, Daniel and
12
+ Ng, Hwee Tou and
13
+ Wu, Siew Mei",
14
+ booktitle = "Proceedings of the Eighth Workshop on Innovative Use of {NLP} for Building Educational Applications",
15
+ month = jun,
16
+ year = "2013",
17
+ url = "https://aclanthology.org/W13-1703",
18
+ pages = "22--31",
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ The National University of Singapore Corpus of Learner English (NUCLE) consists of 1,400 essays written by mainly Asian undergraduate students at the National University of Singapore
24
+ """
25
+
26
+
27
+ _HOMEPAGE = "https://www.comp.nus.edu.sg/~nlp/corpora.html"
28
+
29
+ _LICENSE = "other"
30
+
31
+ _URLS = {
32
+ "dummy_link": "https://example.com/"
33
+ }
34
+
35
+
36
+ class NUCLE(datasets.GeneratorBasedBuilder):
37
+ """NUCLE dataset for grammatical error correction"""
38
+
39
+ VERSION = datasets.Version("3.3.0")
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(name="public", version=VERSION, description="Dummy public config so that datasets tests pass"),
43
+ datasets.BuilderConfig(name="private", version=VERSION, description="Actual config used for loading the data")
44
+ ]
45
+
46
+ DEFAULT_CONFIG_NAME = "public"
47
+
48
+ def _info(self):
49
+ features = datasets.Features(
50
+ {
51
+ "src_tokens": datasets.Sequence(datasets.Value("string")),
52
+ "tgt_tokens": datasets.Sequence(datasets.Value("string")),
53
+ "corrections": [{
54
+ "idx_src": datasets.Sequence(datasets.Value("int32")),
55
+ "idx_tgt": datasets.Sequence(datasets.Value("int32")),
56
+ "corr_type": datasets.Value("string")
57
+ }]
58
+ }
59
+ )
60
+
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=features,
64
+ homepage=_HOMEPAGE,
65
+ license=_LICENSE,
66
+ citation=_CITATION,
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ file_path = f"dummy.m2"
71
+ if self.config.name == "private":
72
+ data_dir = dl_manager.manual_dir
73
+ if data_dir is not None:
74
+ file_path = os.path.join(data_dir, "nucle.train.gold.bea19.m2")
75
+ else:
76
+ logging.warning("Manual data_dir not provided, so the data will not be loaded")
77
+ else:
78
+ logging.warning("The default config 'public' is intended to enable passing the tests and loading the "
79
+ "private data separately. If you have access to the data, please use the config 'private' "
80
+ "and provide the directory to the BEA19-formatted data as `data_dir=...`")
81
+
82
+ return [
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TRAIN,
85
+ gen_kwargs={"file_path": file_path}
86
+ )
87
+ ]
88
+
89
+ def _generate_examples(self, file_path):
90
+ if not os.path.exists(file_path):
91
+ return
92
+
93
+ skip_edits = {"noop", "UNK", "Um"}
94
+ with open(file_path, "r", encoding="utf-8") as f:
95
+ idx_ex = 0
96
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
97
+ for idx_line, _line in enumerate(f):
98
+ line = _line.strip()
99
+
100
+ if len(line) > 0:
101
+ prefix, remainder = line[0], line[2:]
102
+ if prefix == "S":
103
+ src_sent = remainder.split(" ")
104
+ tgt_sent = deepcopy(src_sent)
105
+
106
+ elif prefix == "A":
107
+ annotation_data = remainder.split("|||")
108
+ idx_start, idx_end = map(int, annotation_data[0].split(" "))
109
+ edit_type, edit_text = annotation_data[1], annotation_data[2]
110
+ if edit_type in skip_edits:
111
+ continue
112
+
113
+ formatted_correction = {
114
+ "idx_src": list(range(idx_start, idx_end)),
115
+ "idx_tgt": [],
116
+ "corr_type": edit_type
117
+ }
118
+ annotator_id = int(annotation_data[-1])
119
+ assert annotator_id == 0
120
+
121
+ removal = len(edit_text) == 0 or edit_text == "-NONE-"
122
+ if removal:
123
+ for idx_to_remove in range(idx_start, idx_end):
124
+ del tgt_sent[offset + idx_to_remove]
125
+ offset -= 1
126
+
127
+ else: # replacement/insertion
128
+ edit_tokens = edit_text.split(" ")
129
+ len_diff = len(edit_tokens) - (idx_end - idx_start)
130
+
131
+ formatted_correction["idx_tgt"] = list(
132
+ range(offset + idx_start, offset + idx_end + len_diff))
133
+ tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
134
+ offset += len_diff
135
+
136
+ corrections.append(formatted_correction)
137
+
138
+ else: # empty line, indicating end of example
139
+ yield idx_ex, {
140
+ "src_tokens": src_sent,
141
+ "tgt_tokens": tgt_sent,
142
+ "corrections": corrections
143
+ }
144
+ src_sent, tgt_sent, corrections, offset = None, None, [], 0
145
+ idx_ex += 1