alex1qaz commited on
Commit
03d15df
·
1 Parent(s): 1cf83ed

Upload goodsmemo.py

Browse files
Files changed (1) hide show
  1. goodsmemo.py +227 -0
goodsmemo.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the googdsmemo Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
29
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
30
+ author = "Tjong Kim Sang, Erik F. and
31
+ De Meulder, Fien",
32
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
33
+ year = "2003",
34
+ url = "https://www.aclweb.org/anthology/W03-0419",
35
+ pages = "142--147",
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ goods_memo dataset
41
+ """
42
+
43
+ _LOCAL_PATH = "goodsmemo.zip"
44
+ _TRAINING_FILE = "train.txt"
45
+ _DEV_FILE = "valid.txt"
46
+ _TEST_FILE = "test.txt"
47
+
48
+
49
+ class GoodsMemoConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for googdsmemo"""
51
+
52
+ def __init__(self, **kwargs):
53
+ """BuilderConfig for googdsmemo.
54
+
55
+ Args:
56
+ **kwargs: keyword arguments forwarded to super.
57
+ """
58
+ super(GoodsMemoConfig, self).__init__(**kwargs)
59
+
60
+
61
+ class GoodsMemo(datasets.GeneratorBasedBuilder):
62
+ """GoodsMemo dataset."""
63
+
64
+ BUILDER_CONFIGS = [
65
+ GoodsMemoConfig(name="googdsmemo", version=datasets.Version("1.0.0"), description="GoodsMemo dataset"),
66
+ ]
67
+
68
+ def _info(self):
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=datasets.Features(
72
+ {
73
+ "id": datasets.Value("string"),
74
+ "tokens": datasets.Sequence(datasets.Value("string")),
75
+ "pos_tags": datasets.Sequence(
76
+ datasets.features.ClassLabel(
77
+ names=[
78
+ '"',
79
+ "''",
80
+ "#",
81
+ "$",
82
+ "(",
83
+ ")",
84
+ ",",
85
+ ".",
86
+ ":",
87
+ "``",
88
+ "CC",
89
+ "CD",
90
+ "DT",
91
+ "EX",
92
+ "FW",
93
+ "IN",
94
+ "JJ",
95
+ "JJR",
96
+ "JJS",
97
+ "LS",
98
+ "MD",
99
+ "NN",
100
+ "NNP",
101
+ "NNPS",
102
+ "NNS",
103
+ "NN|SYM",
104
+ "PDT",
105
+ "POS",
106
+ "PRP",
107
+ "PRP$",
108
+ "RB",
109
+ "RBR",
110
+ "RBS",
111
+ "RP",
112
+ "SYM",
113
+ "TO",
114
+ "UH",
115
+ "VB",
116
+ "VBD",
117
+ "VBG",
118
+ "VBN",
119
+ "VBP",
120
+ "VBZ",
121
+ "WDT",
122
+ "WP",
123
+ "WP$",
124
+ "WRB",
125
+ ]
126
+ )
127
+ ),
128
+ "chunk_tags": datasets.Sequence(
129
+ datasets.features.ClassLabel(
130
+ names=[
131
+ "O",
132
+ "B-ADJP",
133
+ "I-ADJP",
134
+ "B-ADVP",
135
+ "I-ADVP",
136
+ "B-CONJP",
137
+ "I-CONJP",
138
+ "B-INTJ",
139
+ "I-INTJ",
140
+ "B-LST",
141
+ "I-LST",
142
+ "B-NP",
143
+ "I-NP",
144
+ "B-PP",
145
+ "I-PP",
146
+ "B-PRT",
147
+ "I-PRT",
148
+ "B-SBAR",
149
+ "I-SBAR",
150
+ "B-UCP",
151
+ "I-UCP",
152
+ "B-VP",
153
+ "I-VP",
154
+ ]
155
+ )
156
+ ),
157
+ "ner_tags": datasets.Sequence(
158
+ datasets.features.ClassLabel(
159
+ names=[
160
+ "O",
161
+ "B-ITEM",
162
+ "I-ITEM"
163
+ ]
164
+ )
165
+ ),
166
+ }
167
+ ),
168
+ supervised_keys=None,
169
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
170
+ citation=_CITATION,
171
+ )
172
+
173
+ def _split_generators(self, dl_manager):
174
+ """Returns SplitGenerators."""
175
+ # downloaded_file = dl_manager.download_and_extract(_URL)
176
+ downloaded_file = dl_manager.extract(_LOCAL_PATH)
177
+ data_files = {
178
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
179
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
180
+ "test": os.path.join(downloaded_file, _TEST_FILE),
181
+ }
182
+
183
+ return [
184
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
185
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
186
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
187
+ ]
188
+
189
+ def _generate_examples(self, filepath):
190
+ logger.info("⏳ Generating examples from = %s", filepath)
191
+ with open(filepath, encoding="utf-8") as f:
192
+ guid = 0
193
+ tokens = []
194
+ pos_tags = []
195
+ chunk_tags = []
196
+ ner_tags = []
197
+ for line in f:
198
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
199
+ if tokens:
200
+ yield guid, {
201
+ "id": str(guid),
202
+ "tokens": tokens,
203
+ "pos_tags": pos_tags,
204
+ "chunk_tags": chunk_tags,
205
+ "ner_tags": ner_tags,
206
+ }
207
+ guid += 1
208
+ tokens = []
209
+ pos_tags = []
210
+ chunk_tags = []
211
+ ner_tags = []
212
+ else:
213
+ # googdsmemo tokens are space separated
214
+ splits = line.split(" ")
215
+ tokens.append(splits[0])
216
+ pos_tags.append(splits[1])
217
+ chunk_tags.append(splits[2])
218
+ ner_tags.append(splits[3].rstrip())
219
+ # last example
220
+ if tokens:
221
+ yield guid, {
222
+ "id": str(guid),
223
+ "tokens": tokens,
224
+ "pos_tags": pos_tags,
225
+ "chunk_tags": chunk_tags,
226
+ "ner_tags": ner_tags,
227
+ }