Tom Aarsen commited on
Commit
af78b07
1 Parent(s): c4f8c8b

Initial commit

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.conll filter=lfs diff=lfs merge=lfs -text
BN-Bangla/bn_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db183e1c04922bfd309fc749660b41ca185eef73511b27d8cbe1f182f2513e9c
3
+ size 290744
BN-Bangla/bn_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90de438cf5df9c26fa652b2c8c23f853e46369e22f50d1b02d820b4b2e71591f
3
+ size 25692466
BN-Bangla/bn_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aaf09940df67eb42cdb847d2e28f40d051a826100f368b2bbdcb387a8f78995
3
+ size 5462822
DE-German/de_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed0b52a7198ed616863ee3a95514d1e7bd8e105ca22fbd12ae5d0abf7f46ccec
3
+ size 200212
DE-German/de_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f09279fa85aef7fd456a6e3d687497e0341bafc6b7ec3fcd8e2afa86cd565220
3
+ size 40066102
DE-German/de_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e51b9fe3dbca3eeda65daa7869059b8ae08f4623ec7fa8ecc83f7325a70fdbec
3
+ size 3823422
EN-English/en_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf5276e98fdac71bb90df25f26a8146ba4e6e5f502f487843f7637044724701
3
+ size 206595
EN-English/en_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:657ea5cab4414b4504d5fb9c5a86f1c4cf83771ed4d5fe3e9987ef7d359e7864
3
+ size 40566208
EN-English/en_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db97d27d2474feec703614c81164503738514d859cac420a7c0b6300d7dfa1b1
3
+ size 3936860
ES-Spanish/es_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:916fedef248c02d4d09470731842796b23c22b0c689abc684bfc73fa90405457
3
+ size 211521
ES-Spanish/es_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:544fa7c8684409fa88be01a9b87ea4d4652dc0d5e7122036c045971f0bd8804f
3
+ size 42558378
ES-Spanish/es_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e149288fa155319dd60e1144284abfe447e7331efabc61dd63ff1190b9dda88
3
+ size 4091828
FA-Farsi/fa_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d9eb486ea22cc637bc98387581b2eac0f73d90d129709478c9637f193b31cbb
3
+ size 263642
FA-Farsi/fa_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9836d4f69d446a2c7d54391b9fc32059b05f05905ab0852195c5035501f083f9
3
+ size 32773538
FA-Farsi/fa_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c6bacad6c20ee962d0a463ec34e6d377c49594d062575f88dffa190ab3a8519
3
+ size 5005226
HI-Hindi/hi_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b9a36cc1672418c03fbbe4257013412a1efcc037bc8da6b720888778073b246
3
+ size 298956
HI-Hindi/hi_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b218bedefb3874a873f536cff2fd532ba6e423dff43f9dc5ff6706a855b48c5
3
+ size 29080644
HI-Hindi/hi_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811129bf88de98a9618bec49d2776a4979f79e973c9eb0527f7b8beeb85ebc06
3
+ size 5785571
KO-Korean/ko_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7abc775c9ae7128bae00bdbb3fdb24ce3d227f84ebe5fcd0f68f74b6430bb2b2
3
+ size 216622
KO-Korean/ko_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4caa199c24a274aa78c3f085e1edb58051b789b81d13357f3015d9eea7ba67f1
3
+ size 30919495
KO-Korean/ko_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aada26d8cbdcb0a48aee3fa597395a8fb07c629edc468e742bd91218972023b0
3
+ size 4145053
MIX_Code_mixed/mix_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0311ed2ca8e785d3146b4774c031bebb44380b0cbf3fa47199fd83e0a25cfc44
3
+ size 101488
MIX_Code_mixed/mix_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d47518b92b472f0ab24db845160e6089d10358183961e626aaf5ade68311f47f
3
+ size 21393843
MIX_Code_mixed/mix_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d931d241f72f25f039e62085830d8e3a9844c671a00be2f829b4e9581fa6a34c
3
+ size 307654
MULTI_Multilingual/multi_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3fe6a6b7c9e25ad7deaad8d336f5a6991e85c8f7581f7d6c6b1093872ad6aff
3
+ size 2625815
MULTI_Multilingual/multi_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f8bda41fbf5c63e4c76c8033e907b41cd2e07080fd16bea11d125ccb6e6a8c2
3
+ size 96145555
MULTI_Multilingual/multi_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:681f5a42823c57ebaa2e881acc14443b839c60c005d7cf3b032a12a6d43f93b9
3
+ size 49962124
NL-Dutch/nl_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea05f7a0b0241ff65bf6974069148a0f5fb3a158e4c114bb87f17cb9f9eb77ea
3
+ size 190703
NL-Dutch/nl_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f33101633c72f72dcf11abd60e82b3172bc52682c4762d7c8fa6b9a2759caa9
3
+ size 39340773
NL-Dutch/nl_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6dd86b0a7c35901a1e995b149c7e728a711beecb56df499de22cc35c2a8d19
3
+ size 3732388
README.md CHANGED
@@ -1,3 +1,482 @@
1
  ---
2
  license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
1
  ---
2
  license: cc-by-4.0
3
+ task_categories:
4
+ - token-classification
5
+ language:
6
+ - bn
7
+ - de
8
+ - en
9
+ - es
10
+ - fa
11
+ - hi
12
+ - ko
13
+ - nl
14
+ - ru
15
+ - tr
16
+ - zh
17
+ - multilingual
18
+ tags:
19
+ - multiconer
20
+ - ner
21
+ - multilingual
22
+ - named entity recognition
23
+ size_categories:
24
+ - 100K<n<1M
25
+ dataset_info:
26
+ - config_name: bn
27
+ features:
28
+ - name: id
29
+ dtype: int32
30
+ - name: tokens
31
+ sequence: string
32
+ - name: ner_tags
33
+ sequence:
34
+ class_label:
35
+ names:
36
+ '0': O
37
+ '1': B-PER
38
+ '2': I-PER
39
+ '3': B-LOC
40
+ '4': I-LOC
41
+ '5': B-CORP
42
+ '6': I-CORP
43
+ '7': B-GRP
44
+ '8': I-GRP
45
+ '9': B-PROD
46
+ '10': I-PROD
47
+ '11': B-CW
48
+ '12': I-CW
49
+ splits:
50
+ - name: train
51
+ num_bytes: 5616369
52
+ num_examples: 15300
53
+ - name: validation
54
+ num_bytes: 301806
55
+ num_examples: 800
56
+ - name: test
57
+ num_bytes: 21668288
58
+ num_examples: 133119
59
+ download_size: 31446032
60
+ dataset_size: 27586463
61
+ - config_name: de
62
+ features:
63
+ - name: id
64
+ dtype: int32
65
+ - name: tokens
66
+ sequence: string
67
+ - name: ner_tags
68
+ sequence:
69
+ class_label:
70
+ names:
71
+ '0': O
72
+ '1': B-PER
73
+ '2': I-PER
74
+ '3': B-LOC
75
+ '4': I-LOC
76
+ '5': B-CORP
77
+ '6': I-CORP
78
+ '7': B-GRP
79
+ '8': I-GRP
80
+ '9': B-PROD
81
+ '10': I-PROD
82
+ '11': B-CW
83
+ '12': I-CW
84
+ splits:
85
+ - name: train
86
+ num_bytes: 4056698
87
+ num_examples: 15300
88
+ - name: validation
89
+ num_bytes: 214572
90
+ num_examples: 800
91
+ - name: test
92
+ num_bytes: 37113304
93
+ num_examples: 217824
94
+ download_size: 44089736
95
+ dataset_size: 41384574
96
+ - config_name: en
97
+ features:
98
+ - name: id
99
+ dtype: int32
100
+ - name: tokens
101
+ sequence: string
102
+ - name: ner_tags
103
+ sequence:
104
+ class_label:
105
+ names:
106
+ '0': O
107
+ '1': B-PER
108
+ '2': I-PER
109
+ '3': B-LOC
110
+ '4': I-LOC
111
+ '5': B-CORP
112
+ '6': I-CORP
113
+ '7': B-GRP
114
+ '8': I-GRP
115
+ '9': B-PROD
116
+ '10': I-PROD
117
+ '11': B-CW
118
+ '12': I-CW
119
+ splits:
120
+ - name: train
121
+ num_bytes: 4330080
122
+ num_examples: 15300
123
+ - name: validation
124
+ num_bytes: 229689
125
+ num_examples: 800
126
+ - name: test
127
+ num_bytes: 38728401
128
+ num_examples: 217818
129
+ download_size: 44709663
130
+ dataset_size: 43288170
131
+ - config_name: es
132
+ features:
133
+ - name: id
134
+ dtype: int32
135
+ - name: tokens
136
+ sequence: string
137
+ - name: ner_tags
138
+ sequence:
139
+ class_label:
140
+ names:
141
+ '0': O
142
+ '1': B-PER
143
+ '2': I-PER
144
+ '3': B-LOC
145
+ '4': I-LOC
146
+ '5': B-CORP
147
+ '6': I-CORP
148
+ '7': B-GRP
149
+ '8': I-GRP
150
+ '9': B-PROD
151
+ '10': I-PROD
152
+ '11': B-CW
153
+ '12': I-CW
154
+ splits:
155
+ - name: train
156
+ num_bytes: 4576557
157
+ num_examples: 15300
158
+ - name: validation
159
+ num_bytes: 238872
160
+ num_examples: 800
161
+ - name: test
162
+ num_bytes: 41457435
163
+ num_examples: 217887
164
+ download_size: 46861727
165
+ dataset_size: 46272864
166
+ - config_name: fa
167
+ features:
168
+ - name: id
169
+ dtype: int32
170
+ - name: tokens
171
+ sequence: string
172
+ - name: ner_tags
173
+ sequence:
174
+ class_label:
175
+ names:
176
+ '0': O
177
+ '1': B-PER
178
+ '2': I-PER
179
+ '3': B-LOC
180
+ '4': I-LOC
181
+ '5': B-CORP
182
+ '6': I-CORP
183
+ '7': B-GRP
184
+ '8': I-GRP
185
+ '9': B-PROD
186
+ '10': I-PROD
187
+ '11': B-CW
188
+ '12': I-CW
189
+ splits:
190
+ - name: train
191
+ num_bytes: 5550551
192
+ num_examples: 15300
193
+ - name: validation
194
+ num_bytes: 294184
195
+ num_examples: 800
196
+ - name: test
197
+ num_bytes: 30301688
198
+ num_examples: 165702
199
+ download_size: 38042406
200
+ dataset_size: 36146423
201
+ - config_name: hi
202
+ features:
203
+ - name: id
204
+ dtype: int32
205
+ - name: tokens
206
+ sequence: string
207
+ - name: ner_tags
208
+ sequence:
209
+ class_label:
210
+ names:
211
+ '0': O
212
+ '1': B-PER
213
+ '2': I-PER
214
+ '3': B-LOC
215
+ '4': I-LOC
216
+ '5': B-CORP
217
+ '6': I-CORP
218
+ '7': B-GRP
219
+ '8': I-GRP
220
+ '9': B-PROD
221
+ '10': I-PROD
222
+ '11': B-CW
223
+ '12': I-CW
224
+ splits:
225
+ - name: train
226
+ num_bytes: 6189324
227
+ num_examples: 15300
228
+ - name: validation
229
+ num_bytes: 321246
230
+ num_examples: 800
231
+ - name: test
232
+ num_bytes: 25771882
233
+ num_examples: 141565
234
+ download_size: 35165171
235
+ dataset_size: 32282452
236
+ - config_name: ko
237
+ features:
238
+ - name: id
239
+ dtype: int32
240
+ - name: tokens
241
+ sequence: string
242
+ - name: ner_tags
243
+ sequence:
244
+ class_label:
245
+ names:
246
+ '0': O
247
+ '1': B-PER
248
+ '2': I-PER
249
+ '3': B-LOC
250
+ '4': I-LOC
251
+ '5': B-CORP
252
+ '6': I-CORP
253
+ '7': B-GRP
254
+ '8': I-GRP
255
+ '9': B-PROD
256
+ '10': I-PROD
257
+ '11': B-CW
258
+ '12': I-CW
259
+ splits:
260
+ - name: train
261
+ num_bytes: 4439652
262
+ num_examples: 15300
263
+ - name: validation
264
+ num_bytes: 233963
265
+ num_examples: 800
266
+ - name: test
267
+ num_bytes: 27529239
268
+ num_examples: 178249
269
+ download_size: 35281170
270
+ dataset_size: 32202854
271
+ - config_name: mix
272
+ features:
273
+ - name: id
274
+ dtype: int32
275
+ - name: tokens
276
+ sequence: string
277
+ - name: ner_tags
278
+ sequence:
279
+ class_label:
280
+ names:
281
+ '0': O
282
+ '1': B-PER
283
+ '2': I-PER
284
+ '3': B-LOC
285
+ '4': I-LOC
286
+ '5': B-CORP
287
+ '6': I-CORP
288
+ '7': B-GRP
289
+ '8': I-GRP
290
+ '9': B-PROD
291
+ '10': I-PROD
292
+ '11': B-CW
293
+ '12': I-CW
294
+ splits:
295
+ - name: train
296
+ num_bytes: 307844
297
+ num_examples: 1500
298
+ - name: validation
299
+ num_bytes: 100909
300
+ num_examples: 500
301
+ - name: test
302
+ num_bytes: 20218549
303
+ num_examples: 100000
304
+ download_size: 21802985
305
+ dataset_size: 20627302
306
+ - config_name: multi
307
+ features:
308
+ - name: id
309
+ dtype: int32
310
+ - name: tokens
311
+ sequence: string
312
+ - name: ner_tags
313
+ sequence:
314
+ class_label:
315
+ names:
316
+ '0': O
317
+ '1': B-PER
318
+ '2': I-PER
319
+ '3': B-LOC
320
+ '4': I-LOC
321
+ '5': B-CORP
322
+ '6': I-CORP
323
+ '7': B-GRP
324
+ '8': I-GRP
325
+ '9': B-PROD
326
+ '10': I-PROD
327
+ '11': B-CW
328
+ '12': I-CW
329
+ splits:
330
+ - name: train
331
+ num_bytes: 54119956
332
+ num_examples: 168300
333
+ - name: validation
334
+ num_bytes: 2846552
335
+ num_examples: 8800
336
+ - name: test
337
+ num_bytes: 91509480
338
+ num_examples: 471911
339
+ download_size: 148733494
340
+ dataset_size: 148475988
341
+ - config_name: nl
342
+ features:
343
+ - name: id
344
+ dtype: int32
345
+ - name: tokens
346
+ sequence: string
347
+ - name: ner_tags
348
+ sequence:
349
+ class_label:
350
+ names:
351
+ '0': O
352
+ '1': B-PER
353
+ '2': I-PER
354
+ '3': B-LOC
355
+ '4': I-LOC
356
+ '5': B-CORP
357
+ '6': I-CORP
358
+ '7': B-GRP
359
+ '8': I-GRP
360
+ '9': B-PROD
361
+ '10': I-PROD
362
+ '11': B-CW
363
+ '12': I-CW
364
+ splits:
365
+ - name: train
366
+ num_bytes: 4070487
367
+ num_examples: 15300
368
+ - name: validation
369
+ num_bytes: 209337
370
+ num_examples: 800
371
+ - name: test
372
+ num_bytes: 37128925
373
+ num_examples: 217337
374
+ download_size: 43263864
375
+ dataset_size: 41408749
376
+ - config_name: ru
377
+ features:
378
+ - name: id
379
+ dtype: int32
380
+ - name: tokens
381
+ sequence: string
382
+ - name: ner_tags
383
+ sequence:
384
+ class_label:
385
+ names:
386
+ '0': O
387
+ '1': B-PER
388
+ '2': I-PER
389
+ '3': B-LOC
390
+ '4': I-LOC
391
+ '5': B-CORP
392
+ '6': I-CORP
393
+ '7': B-GRP
394
+ '8': I-GRP
395
+ '9': B-PROD
396
+ '10': I-PROD
397
+ '11': B-CW
398
+ '12': I-CW
399
+ splits:
400
+ - name: train
401
+ num_bytes: 5313989
402
+ num_examples: 15300
403
+ - name: validation
404
+ num_bytes: 279470
405
+ num_examples: 800
406
+ - name: test
407
+ num_bytes: 47458726
408
+ num_examples: 217501
409
+ download_size: 54587257
410
+ dataset_size: 53052185
411
+ - config_name: tr
412
+ features:
413
+ - name: id
414
+ dtype: int32
415
+ - name: tokens
416
+ sequence: string
417
+ - name: ner_tags
418
+ sequence:
419
+ class_label:
420
+ names:
421
+ '0': O
422
+ '1': B-PER
423
+ '2': I-PER
424
+ '3': B-LOC
425
+ '4': I-LOC
426
+ '5': B-CORP
427
+ '6': I-CORP
428
+ '7': B-GRP
429
+ '8': I-GRP
430
+ '9': B-PROD
431
+ '10': I-PROD
432
+ '11': B-CW
433
+ '12': I-CW
434
+ splits:
435
+ - name: train
436
+ num_bytes: 4076774
437
+ num_examples: 15300
438
+ - name: validation
439
+ num_bytes: 213017
440
+ num_examples: 800
441
+ - name: test
442
+ num_bytes: 14779846
443
+ num_examples: 136935
444
+ download_size: 22825291
445
+ dataset_size: 19069637
446
+ - config_name: zh
447
+ features:
448
+ - name: id
449
+ dtype: int32
450
+ - name: tokens
451
+ sequence: string
452
+ - name: ner_tags
453
+ sequence:
454
+ class_label:
455
+ names:
456
+ '0': O
457
+ '1': B-PER
458
+ '2': I-PER
459
+ '3': B-LOC
460
+ '4': I-LOC
461
+ '5': B-CORP
462
+ '6': I-CORP
463
+ '7': B-GRP
464
+ '8': I-GRP
465
+ '9': B-PROD
466
+ '10': I-PROD
467
+ '11': B-CW
468
+ '12': I-CW
469
+ splits:
470
+ - name: train
471
+ num_bytes: 5899475
472
+ num_examples: 15300
473
+ - name: validation
474
+ num_bytes: 310396
475
+ num_examples: 800
476
+ - name: test
477
+ num_bytes: 29349271
478
+ num_examples: 151661
479
+ download_size: 36101525
480
+ dataset_size: 35559142
481
  ---
482
+
RU-Russian/ru_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35315acbd50552d2116d271927fef3904fe27e8dc59ce0831b8e72f1035fa4e9
3
+ size 256342
RU-Russian/ru_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c1b9a4b2422e249b77d269bbce115e88723787668dc46da06379199c42263d3
3
+ size 49418863
RU-Russian/ru_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b05007dee79b6c77a82e9a449c3c1062af01465ce0d508b4eecb6e46794d3e94
3
+ size 4912052
TR-Turkish/tr_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:407267c0795c02536be0bf453d7f00a89324a1376cdf5177d1ce631cc42fe1b8
3
+ size 197590
TR-Turkish/tr_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15cd5c355b24675d78f4f0cb6a72a5516a631c4c2e370b4f8440bb5b01393a3b
3
+ size 18813881
TR-Turkish/tr_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11817e33a7c5992bf5fc7d01f3b8cd0779166f8d9227c04becd343f56967a6be
3
+ size 3813820
ZH-Chinese/zh_dev.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ac16573b5cd113647a311f061efdc2875951059c97442360c808db7fb475cc9
3
+ size 266488
ZH-Chinese/zh_test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c3e59815e5ce7e0707abb0eee38c098dc70e8b4d93a8a9ce0a53ea7b09a7264
3
+ size 30742555
ZH-Chinese/zh_train.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a9cb5d22926e78914d3951c83d380f7b07d945729ca977848bd842d5ed01b31
3
+ size 5092482
multiconer.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """MultiCoNER: A Large-scale Multilingual dataset for Complex Named Entity Recognition"""
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _CITATION = """\
9
+ @misc{malmasi2022multiconer,
10
+ title={MultiCoNER: A Large-scale Multilingual dataset for Complex Named Entity Recognition},
11
+ author={Shervin Malmasi and Anjie Fang and Besnik Fetahu and Sudipta Kar and Oleg Rokhlenko},
12
+ year={2022},
13
+ eprint={2208.14536},
14
+ archivePrefix={arXiv},
15
+ primaryClass={cs.CL}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ We present MultiCoNER, a large multilingual dataset for Named Entity Recognition that covers 3 domains (Wiki \
21
+ sentences, questions, and search queries) across 11 languages, as well as multilingual and code-mixing subsets. \
22
+ This dataset is designed to represent contemporary challenges in NER, including low-context scenarios (short \
23
+ and uncased text), syntactically complex entities like movie titles, and long-tail entity distributions. The \
24
+ 26M token dataset is compiled from public resources using techniques such as heuristic-based sentence sampling, \
25
+ template extraction and slotting, and machine translation. We applied two NER models on our dataset: a baseline \
26
+ XLM-RoBERTa model, and a state-of-the-art GEMNET model that leverages gazetteers. The baseline achieves moderate \
27
+ performance (macro-F1=54%), highlighting the difficulty of our data. GEMNET, which uses gazetteers, improvement \
28
+ significantly (average improvement of macro-F1=+30%). MultiCoNER poses challenges even for large pre-trained \
29
+ language models, and we believe that it can help further research in building robust NER systems. MultiCoNER \
30
+ is publicly available at https://registry.opendata.aws/multiconer/ and we hope that this resource will help \
31
+ advance research in various aspects of NER.
32
+ """
33
+
34
+ subset_to_dir = {
35
+ "bn": "BN-Bangla",
36
+ "de": "DE-German",
37
+ "en": "EN-English",
38
+ "es": "ES-Spanish",
39
+ "fa": "FA-Farsi",
40
+ "hi": "HI-Hindi",
41
+ "ko": "KO-Korean",
42
+ "nl": "NL-Dutch",
43
+ "ru": "RU-Russian",
44
+ "tr": "TR-Turkish",
45
+ "zh": "ZH-Chinese",
46
+ "multi": "MULTI_Multilingual",
47
+ "mix": "MIX_Code_mixed",
48
+ }
49
+
50
+
51
+ class MultiCoNERConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for MultiCoNER"""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig for MultiCoNER.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(MultiCoNERConfig, self).__init__(**kwargs)
60
+
61
+
62
+ class MultiCoNER(datasets.GeneratorBasedBuilder):
63
+ """MultiCoNER dataset."""
64
+
65
+ BUILDER_CONFIGS = [
66
+ MultiCoNERConfig(
67
+ name="bn",
68
+ version=datasets.Version("1.0.0"),
69
+ description="MultiCoNER Bangla dataset",
70
+ ),
71
+ MultiCoNERConfig(
72
+ name="de",
73
+ version=datasets.Version("1.0.0"),
74
+ description="MultiCoNER German dataset",
75
+ ),
76
+ MultiCoNERConfig(
77
+ name="en",
78
+ version=datasets.Version("1.0.0"),
79
+ description="MultiCoNER English dataset",
80
+ ),
81
+ MultiCoNERConfig(
82
+ name="es",
83
+ version=datasets.Version("1.0.0"),
84
+ description="MultiCoNER Spanish dataset",
85
+ ),
86
+ MultiCoNERConfig(
87
+ name="fa",
88
+ version=datasets.Version("1.0.0"),
89
+ description="MultiCoNER Farsi dataset",
90
+ ),
91
+ MultiCoNERConfig(
92
+ name="hi",
93
+ version=datasets.Version("1.0.0"),
94
+ description="MultiCoNER Hindi dataset",
95
+ ),
96
+ MultiCoNERConfig(
97
+ name="ko",
98
+ version=datasets.Version("1.0.0"),
99
+ description="MultiCoNER Korean dataset",
100
+ ),
101
+ MultiCoNERConfig(
102
+ name="nl",
103
+ version=datasets.Version("1.0.0"),
104
+ description="MultiCoNER Dutch dataset",
105
+ ),
106
+ MultiCoNERConfig(
107
+ name="ru",
108
+ version=datasets.Version("1.0.0"),
109
+ description="MultiCoNER Russian dataset",
110
+ ),
111
+ MultiCoNERConfig(
112
+ name="tr",
113
+ version=datasets.Version("1.0.0"),
114
+ description="MultiCoNER Turkish dataset",
115
+ ),
116
+ MultiCoNERConfig(
117
+ name="zh",
118
+ version=datasets.Version("1.0.0"),
119
+ description="MultiCoNER Chinese dataset",
120
+ ),
121
+ MultiCoNERConfig(
122
+ name="multi",
123
+ version=datasets.Version("1.0.0"),
124
+ description="MultiCoNER Multilingual dataset",
125
+ ),
126
+ MultiCoNERConfig(
127
+ name="mix",
128
+ version=datasets.Version("1.0.0"),
129
+ description="MultiCoNER Mixed dataset",
130
+ ),
131
+ ]
132
+
133
+ def _info(self):
134
+ return datasets.DatasetInfo(
135
+ description=_DESCRIPTION,
136
+ features=datasets.Features(
137
+ {
138
+ "id": datasets.Value("int32"),
139
+ "tokens": datasets.Sequence(datasets.Value("string")),
140
+ "ner_tags": datasets.Sequence(
141
+ datasets.features.ClassLabel(
142
+ names=[
143
+ "O",
144
+ "B-PER",
145
+ "I-PER",
146
+ "B-LOC",
147
+ "I-LOC",
148
+ "B-CORP",
149
+ "I-CORP",
150
+ "B-GRP",
151
+ "I-GRP",
152
+ "B-PROD",
153
+ "I-PROD",
154
+ "B-CW",
155
+ "I-CW",
156
+ ]
157
+ )
158
+ ),
159
+ }
160
+ ),
161
+ supervised_keys=None,
162
+ citation=_CITATION,
163
+ )
164
+
165
+ def _split_generators(self, dl_manager):
166
+ """Returns SplitGenerators."""
167
+ urls_to_download = {
168
+ "train": f"{subset_to_dir[self.config.name].upper()}/{self.config.name}_train.conll",
169
+ "dev": f"{subset_to_dir[self.config.name].upper()}/{self.config.name}_dev.conll",
170
+ "test": f"{subset_to_dir[self.config.name].upper()}/{self.config.name}_test.conll",
171
+ }
172
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
173
+
174
+ return [
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TRAIN,
177
+ gen_kwargs={"filepath": downloaded_files["train"]},
178
+ ),
179
+ datasets.SplitGenerator(
180
+ name=datasets.Split.VALIDATION,
181
+ gen_kwargs={"filepath": downloaded_files["dev"]},
182
+ ),
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TEST,
185
+ gen_kwargs={"filepath": downloaded_files["test"]},
186
+ ),
187
+ ]
188
+
189
+ def _generate_examples(self, filepath):
190
+ logger.info("⏳ Generating examples from = %s", filepath)
191
+
192
+ with open(filepath, "r", encoding="utf8") as f:
193
+ guid = -1
194
+ tokens = []
195
+ ner_tags = []
196
+
197
+ for line in f:
198
+ if line.strip().startswith("# id"):
199
+ guid += 1
200
+ tokens = []
201
+ ner_tags = []
202
+ elif " _ _ " in line:
203
+ # Separator is " _ _ "
204
+ splits = line.split(" _ _ ")
205
+ tokens.append(splits[0].strip())
206
+ ner_tags.append(splits[1].strip())
207
+ elif len(line.strip()) == 0:
208
+ if len(tokens) >= 1 and len(tokens) == len(ner_tags):
209
+ yield guid, {
210
+ "id": guid,
211
+ "tokens": tokens,
212
+ "ner_tags": ner_tags,
213
+ }
214
+ tokens = []
215
+ ner_tags = []
216
+ else:
217
+ continue
218
+
219
+ if len(tokens) >= 1 and len(tokens) == len(ner_tags):
220
+ yield guid, {
221
+ "id": guid,
222
+ "tokens": tokens,
223
+ "ner_tags": ner_tags,
224
+ }