Commit
•
8537242
0
Parent(s):
Update files from the datasets library (from 1.2.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.2.0
- .gitattributes +27 -0
- README.md +864 -0
- dataset_infos.json +1 -0
- dummy/mlqa/1.0.0/dummy_data.zip +3 -0
- dummy/nc/1.0.0/dummy_data.zip +3 -0
- dummy/ner/1.0.0/dummy_data.zip +3 -0
- dummy/ntg/1.0.0/dummy_data.zip +3 -0
- dummy/paws-x/1.0.0/dummy_data.zip +3 -0
- dummy/pos/1.0.0/dummy_data.zip +3 -0
- dummy/qadsm/1.0.0/dummy_data.zip +3 -0
- dummy/qam/1.0.0/dummy_data.zip +3 -0
- dummy/qg/1.0.0/dummy_data.zip +3 -0
- dummy/wpr/1.0.0/dummy_data.zip +3 -0
- dummy/xnli/1.0.0/dummy_data.zip +3 -0
- xglue.py +575 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,864 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
mlqa:
|
4 |
+
- crowdsourced
|
5 |
+
nc:
|
6 |
+
- machine-generated
|
7 |
+
ner:
|
8 |
+
- expert-generated
|
9 |
+
- found
|
10 |
+
ntg:
|
11 |
+
- machine-generated
|
12 |
+
paws-x:
|
13 |
+
- expert-generated
|
14 |
+
pos:
|
15 |
+
- expert-generated
|
16 |
+
- found
|
17 |
+
qadsm:
|
18 |
+
- machine-generated
|
19 |
+
qam:
|
20 |
+
- machine-generated
|
21 |
+
qg:
|
22 |
+
- machine-generated
|
23 |
+
wpr:
|
24 |
+
- machine-generated
|
25 |
+
xnli:
|
26 |
+
- machine-generated
|
27 |
+
language_creators:
|
28 |
+
mlqa:
|
29 |
+
- found
|
30 |
+
nc:
|
31 |
+
- found
|
32 |
+
ner:
|
33 |
+
- crowdsourced
|
34 |
+
- expert-generated
|
35 |
+
ntg:
|
36 |
+
- machine-generated
|
37 |
+
paws-x:
|
38 |
+
- expert-generated
|
39 |
+
pos:
|
40 |
+
- crowdsourced
|
41 |
+
- expert-generated
|
42 |
+
qadsm:
|
43 |
+
- found
|
44 |
+
qam:
|
45 |
+
- found
|
46 |
+
qg:
|
47 |
+
- machine-generated
|
48 |
+
wpr:
|
49 |
+
- found
|
50 |
+
xnli:
|
51 |
+
- crowdsourced
|
52 |
+
- expert-generated
|
53 |
+
languages:
|
54 |
+
mlqa:
|
55 |
+
- ar
|
56 |
+
- de
|
57 |
+
- en
|
58 |
+
- es
|
59 |
+
- hi
|
60 |
+
- vi
|
61 |
+
- zh
|
62 |
+
nc:
|
63 |
+
- en
|
64 |
+
- de
|
65 |
+
- es
|
66 |
+
- fr
|
67 |
+
- ru
|
68 |
+
ner:
|
69 |
+
- de
|
70 |
+
- en
|
71 |
+
- es
|
72 |
+
- nl
|
73 |
+
ntg:
|
74 |
+
- en
|
75 |
+
- de
|
76 |
+
- es
|
77 |
+
- fr
|
78 |
+
- ru
|
79 |
+
paws-x:
|
80 |
+
- en
|
81 |
+
- de
|
82 |
+
- es
|
83 |
+
- fr
|
84 |
+
pos:
|
85 |
+
- ar
|
86 |
+
- bg
|
87 |
+
- de
|
88 |
+
- el
|
89 |
+
- en
|
90 |
+
- es
|
91 |
+
- fr
|
92 |
+
- hi
|
93 |
+
- it
|
94 |
+
- nl
|
95 |
+
- pl
|
96 |
+
- ru
|
97 |
+
- th
|
98 |
+
- tr
|
99 |
+
- ur
|
100 |
+
- vi
|
101 |
+
- zh
|
102 |
+
qadsm:
|
103 |
+
- en
|
104 |
+
- de
|
105 |
+
- fr
|
106 |
+
qam:
|
107 |
+
- en
|
108 |
+
- de
|
109 |
+
- fr
|
110 |
+
qg:
|
111 |
+
- en
|
112 |
+
- de
|
113 |
+
- fr
|
114 |
+
- pt
|
115 |
+
- it
|
116 |
+
- zh
|
117 |
+
wpr:
|
118 |
+
- en
|
119 |
+
- de
|
120 |
+
- fr
|
121 |
+
- es
|
122 |
+
- it
|
123 |
+
- pt
|
124 |
+
- zh
|
125 |
+
xnli:
|
126 |
+
- ar
|
127 |
+
- bg
|
128 |
+
- de
|
129 |
+
- el
|
130 |
+
- en
|
131 |
+
- es
|
132 |
+
- fr
|
133 |
+
- hi
|
134 |
+
- ru
|
135 |
+
- sw
|
136 |
+
- th
|
137 |
+
- tr
|
138 |
+
- ur
|
139 |
+
- vi
|
140 |
+
- zh
|
141 |
+
licenses:
|
142 |
+
mlqa:
|
143 |
+
- cc-by-sa-4-0
|
144 |
+
nc:
|
145 |
+
- unknown
|
146 |
+
ner:
|
147 |
+
- unknown
|
148 |
+
ntg:
|
149 |
+
- unknown
|
150 |
+
paws-x:
|
151 |
+
- unknown
|
152 |
+
pos:
|
153 |
+
- other-Licence Universal Dependencies v2-5
|
154 |
+
qadsm:
|
155 |
+
- unknown
|
156 |
+
qam:
|
157 |
+
- unknown
|
158 |
+
qg:
|
159 |
+
- unknown
|
160 |
+
wpr:
|
161 |
+
- unknown
|
162 |
+
xnli:
|
163 |
+
- cc-by-nc-4-0
|
164 |
+
multilinguality:
|
165 |
+
mlqa:
|
166 |
+
- multilingual
|
167 |
+
nc:
|
168 |
+
- multilingual
|
169 |
+
ner:
|
170 |
+
- multilingual
|
171 |
+
ntg:
|
172 |
+
- multilingual
|
173 |
+
paws-x:
|
174 |
+
- multilingual
|
175 |
+
pos:
|
176 |
+
- multilingual
|
177 |
+
qadsm:
|
178 |
+
- multilingual
|
179 |
+
qam:
|
180 |
+
- multilingual
|
181 |
+
qg:
|
182 |
+
- multilingual
|
183 |
+
wpr:
|
184 |
+
- multilingual
|
185 |
+
xnli:
|
186 |
+
- multilingual
|
187 |
+
- translation
|
188 |
+
size_categories:
|
189 |
+
mlqa:
|
190 |
+
- 100K<n<1M
|
191 |
+
nc:
|
192 |
+
- 100K<n<1M
|
193 |
+
ner:
|
194 |
+
- 10K<n<100K
|
195 |
+
ntg:
|
196 |
+
- 100K<n<1M
|
197 |
+
paws-x:
|
198 |
+
- 10K<n<100K
|
199 |
+
pos:
|
200 |
+
- 10K<n<100K
|
201 |
+
qadsm:
|
202 |
+
- 100K<n<1M
|
203 |
+
qam:
|
204 |
+
- 100K<n<1M
|
205 |
+
qg:
|
206 |
+
- 100K<n<1M
|
207 |
+
wpr:
|
208 |
+
- 100K<n<1M
|
209 |
+
xnli:
|
210 |
+
- 100K<n<1M
|
211 |
+
source_datasets:
|
212 |
+
mlqa:
|
213 |
+
- extended|squad
|
214 |
+
nc:
|
215 |
+
- original
|
216 |
+
ner:
|
217 |
+
- extended|conll2003
|
218 |
+
ntg:
|
219 |
+
- original
|
220 |
+
paws-x:
|
221 |
+
- original
|
222 |
+
pos:
|
223 |
+
- original
|
224 |
+
qadsm:
|
225 |
+
- original
|
226 |
+
qam:
|
227 |
+
- original
|
228 |
+
qg:
|
229 |
+
- original
|
230 |
+
wpr:
|
231 |
+
- original
|
232 |
+
xnli:
|
233 |
+
- extended|xnli
|
234 |
+
task_categories:
|
235 |
+
mlqa:
|
236 |
+
- question-answering
|
237 |
+
nc:
|
238 |
+
- text-classification
|
239 |
+
ner:
|
240 |
+
- structure-prediction
|
241 |
+
ntg:
|
242 |
+
- conditional-text-generation
|
243 |
+
paws-x:
|
244 |
+
- text-classification
|
245 |
+
pos:
|
246 |
+
- structure-prediction
|
247 |
+
qadsm:
|
248 |
+
- text-classification
|
249 |
+
qam:
|
250 |
+
- text-classification
|
251 |
+
qg:
|
252 |
+
- conditional-text-generation
|
253 |
+
wpr:
|
254 |
+
- text-classification
|
255 |
+
xnli:
|
256 |
+
- text-classification
|
257 |
+
task_ids:
|
258 |
+
mlqa:
|
259 |
+
- extractive-qa
|
260 |
+
- open-domain-qa
|
261 |
+
nc:
|
262 |
+
- topic-classification
|
263 |
+
ner:
|
264 |
+
- named-entity-recognition
|
265 |
+
ntg:
|
266 |
+
- summarization
|
267 |
+
paws-x:
|
268 |
+
- text-classification-other-paraphrase identification
|
269 |
+
pos:
|
270 |
+
- parsing
|
271 |
+
qadsm:
|
272 |
+
- acceptability-classification
|
273 |
+
qam:
|
274 |
+
- acceptability-classification
|
275 |
+
qg:
|
276 |
+
- conditional-text-generation-other-question-answering
|
277 |
+
wpr:
|
278 |
+
- acceptability-classification
|
279 |
+
xnli:
|
280 |
+
- natural-language-inference
|
281 |
+
---
|
282 |
+
|
283 |
+
# Dataset Card for XGLUE
|
284 |
+
|
285 |
+
## Table of Contents
|
286 |
+
- [Dataset Description](#dataset-description)
|
287 |
+
- [Dataset Summary](#dataset-summary)
|
288 |
+
- [Leaderboards](#leaderboards)
|
289 |
+
- [Dataset Structure](#dataset-structure)
|
290 |
+
- [Data Instances](#data-instances)
|
291 |
+
- [Data Fields](#data-instances)
|
292 |
+
- [Data Splits](#data-instances)
|
293 |
+
- [Dataset Creation](#dataset-creation)
|
294 |
+
- [Curation Rationale](#curation-rationale)
|
295 |
+
- [Source Data](#source-data)
|
296 |
+
- [Annotations](#annotations)
|
297 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
298 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
299 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
300 |
+
- [Discussion of Biases](#discussion-of-biases)
|
301 |
+
- [Other Known Limitations](#other-known-limitations)
|
302 |
+
- [Additional Information](#additional-information)
|
303 |
+
- [Dataset Curators](#dataset-curators)
|
304 |
+
- [Licensing Information](#licensing-information)
|
305 |
+
- [Citation Information](#citation-information)
|
306 |
+
|
307 |
+
## Dataset Description
|
308 |
+
|
309 |
+
- **Homepage:** [XGLUE homepage](https://microsoft.github.io/XGLUE/)
|
310 |
+
- **Paper:** [XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation](https://arxiv.org/abs/1907.09190)
|
311 |
+
|
312 |
+
### Dataset Summary
|
313 |
+
|
314 |
+
XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained models with respect to cross-lingual natural language understanding and generation.
|
315 |
+
|
316 |
+
The training data of each task is in English while the validation and test data is present in multiple different languages.
|
317 |
+
The following table shows which languages are present as validation and test data for each config.
|
318 |
+
|
319 |
+
![Available Languages for Test and Validation Data](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/xglue_langs.png)
|
320 |
+
|
321 |
+
Therefore, for each config, a cross-lingual pre-trained model should be fine-tuned on the English training data, and evaluated on for all languages.
|
322 |
+
|
323 |
+
### Leaderboards
|
324 |
+
|
325 |
+
The XGLUE leaderboard can be found on the [homepage](https://microsoft.github.io/XGLUE/) and
|
326 |
+
consits of a XGLUE-Understanding Score (the average of the tasks `ner`, `pos`, `mlqa`, `nc`, `xnli`, `paws-x`, `qadsm`, `wpr`, `qam`) and a XGLUE-Generation Score (the average of the tasks `qg`, `ntg`).
|
327 |
+
|
328 |
+
## Dataset Structure
|
329 |
+
|
330 |
+
### Data Instances
|
331 |
+
|
332 |
+
#### ner
|
333 |
+
|
334 |
+
An example of 'test.nl' looks as follows.
|
335 |
+
|
336 |
+
```
|
337 |
+
{
|
338 |
+
"ner": [
|
339 |
+
"O",
|
340 |
+
"O",
|
341 |
+
"O",
|
342 |
+
"B-LOC",
|
343 |
+
"O",
|
344 |
+
"B-LOC",
|
345 |
+
"O",
|
346 |
+
"B-LOC",
|
347 |
+
"O",
|
348 |
+
"O",
|
349 |
+
"O",
|
350 |
+
"O",
|
351 |
+
"O",
|
352 |
+
"O",
|
353 |
+
"O",
|
354 |
+
"B-PER",
|
355 |
+
"I-PER",
|
356 |
+
"O",
|
357 |
+
"O",
|
358 |
+
"B-LOC",
|
359 |
+
"O",
|
360 |
+
"O"
|
361 |
+
],
|
362 |
+
"words": [
|
363 |
+
"Dat",
|
364 |
+
"is",
|
365 |
+
"in",
|
366 |
+
"Itali\u00eb",
|
367 |
+
",",
|
368 |
+
"Spanje",
|
369 |
+
"of",
|
370 |
+
"Engeland",
|
371 |
+
"misschien",
|
372 |
+
"geen",
|
373 |
+
"probleem",
|
374 |
+
",",
|
375 |
+
"maar",
|
376 |
+
"volgens",
|
377 |
+
"'",
|
378 |
+
"Der",
|
379 |
+
"Kaiser",
|
380 |
+
"'",
|
381 |
+
"in",
|
382 |
+
"Duitsland",
|
383 |
+
"wel",
|
384 |
+
"."
|
385 |
+
]
|
386 |
+
}
|
387 |
+
```
|
388 |
+
|
389 |
+
#### pos
|
390 |
+
|
391 |
+
An example of 'test.fr' looks as follows.
|
392 |
+
|
393 |
+
```
|
394 |
+
{
|
395 |
+
"pos": [
|
396 |
+
"PRON",
|
397 |
+
"VERB",
|
398 |
+
"SCONJ",
|
399 |
+
"ADP",
|
400 |
+
"PRON",
|
401 |
+
"CCONJ",
|
402 |
+
"DET",
|
403 |
+
"NOUN",
|
404 |
+
"ADP",
|
405 |
+
"NOUN",
|
406 |
+
"CCONJ",
|
407 |
+
"NOUN",
|
408 |
+
"ADJ",
|
409 |
+
"PRON",
|
410 |
+
"PRON",
|
411 |
+
"AUX",
|
412 |
+
"ADV",
|
413 |
+
"VERB",
|
414 |
+
"PUNCT",
|
415 |
+
"PRON",
|
416 |
+
"VERB",
|
417 |
+
"VERB",
|
418 |
+
"DET",
|
419 |
+
"ADJ",
|
420 |
+
"NOUN",
|
421 |
+
"ADP",
|
422 |
+
"DET",
|
423 |
+
"NOUN",
|
424 |
+
"PUNCT"
|
425 |
+
],
|
426 |
+
"words": [
|
427 |
+
"Je",
|
428 |
+
"sens",
|
429 |
+
"qu'",
|
430 |
+
"entre",
|
431 |
+
"\u00e7a",
|
432 |
+
"et",
|
433 |
+
"les",
|
434 |
+
"films",
|
435 |
+
"de",
|
436 |
+
"m\u00e9decins",
|
437 |
+
"et",
|
438 |
+
"scientifiques",
|
439 |
+
"fous",
|
440 |
+
"que",
|
441 |
+
"nous",
|
442 |
+
"avons",
|
443 |
+
"d\u00e9j\u00e0",
|
444 |
+
"vus",
|
445 |
+
",",
|
446 |
+
"nous",
|
447 |
+
"pourrions",
|
448 |
+
"emprunter",
|
449 |
+
"un",
|
450 |
+
"autre",
|
451 |
+
"chemin",
|
452 |
+
"pour",
|
453 |
+
"l'",
|
454 |
+
"origine",
|
455 |
+
"."
|
456 |
+
]
|
457 |
+
}
|
458 |
+
```
|
459 |
+
|
460 |
+
#### mlqa
|
461 |
+
|
462 |
+
An example of 'test.hi' looks as follows.
|
463 |
+
|
464 |
+
```
|
465 |
+
{
|
466 |
+
"answers": {
|
467 |
+
"answer_start": [
|
468 |
+
378
|
469 |
+
],
|
470 |
+
"text": [
|
471 |
+
"\u0909\u0924\u094d\u0924\u0930 \u092a\u0942\u0930\u094d\u0935"
|
472 |
+
]
|
473 |
+
},
|
474 |
+
"context": "\u0909\u0938\u0940 \"\u090f\u0930\u093f\u092f\u093e XX \" \u0928\u093e\u092e\u0915\u0930\u0923 \u092a\u094d\u0930\u0923\u093e\u0932\u0940 \u0915\u093e \u092a\u094d\u0930\u092f\u094b\u0917 \u0928\u0947\u0935\u093e\u0926\u093e \u092a\u0930\u0940\u0915\u094d\u0937\u0923 \u0938\u094d\u0925\u0932 \u0915\u0947 \u0905\u0928\u094d\u092f \u092d\u093e\u0917\u094b\u0902 \u0915\u0947 \u0932\u093f\u090f \u0915\u093f\u092f\u093e \u0917\u092f\u093e \u0939\u0948\u0964\u092e\u0942\u0932 \u0930\u0942\u092a \u092e\u0947\u0902 6 \u092c\u091f\u0947 10 \u092e\u0940\u0932 \u0915\u093e \u092f\u0939 \u0906\u092f\u0924\u093e\u0915\u093e\u0930 \u0905\u0921\u094d\u0921\u093e \u0905\u092c \u0924\u0925\u093e\u0915\u0925\u093f\u0924 '\u0917\u094d\u0930\u0942\u092e \u092c\u0949\u0915\u094d\u0938 \" \u0915\u093e \u090f\u0915 \u092d\u093e\u0917 \u0939\u0948, \u091c\u094b \u0915\u093f 23 \u092c\u091f\u0947 25.3 \u092e\u0940\u0932 \u0915\u093e \u090f\u0915 \u092a\u094d\u0930\u0924\u093f\u092c\u0902\u0927\u093f\u0924 \u0939\u0935\u093e\u0908 \u0915\u094d\u0937\u0947\u0924\u094d\u0930 \u0939\u0948\u0964 \u092f\u0939 \u0915\u094d\u0937\u0947\u0924\u094d\u0930 NTS \u0915\u0947 \u0906\u0902\u0924\u0930\u093f\u0915 \u0938\u0921\u093c\u0915 \u092a\u094d\u0930\u092c\u0902\u0927\u0928 \u0938\u0947 \u091c\u0941\u0921\u093c\u093e \u0939\u0948, \u091c\u093f\u0938\u0915\u0940 \u092a\u0915\u094d\u0915\u0940 \u0938\u0921\u093c\u0915\u0947\u0902 \u0926\u0915\u094d\u0937\u093f\u0923 \u092e\u0947\u0902 \u092e\u0930\u0915\u0930\u0940 \u0915\u0940 \u0913\u0930 \u0914\u0930 \u092a\u0936\u094d\u091a\u093f\u092e \u092e\u0947\u0902 \u092f\u0941\u0915\u094d\u0915\u093e \u092b\u094d\u0932\u0948\u091f \u0915\u0940 \u0913\u0930 \u091c\u093e\u0924\u0940 \u0939\u0948\u0902\u0964 \u091d\u0940\u0932 \u0938\u0947 \u0909\u0924\u094d\u0924\u0930 \u092a\u0942\u0930\u094d\u0935 \u0915\u0940 \u0913\u0930 \u092c\u0922\u093c\u0924\u0947 \u0939\u0941\u090f \u0935\u094d\u092f\u093e\u092a\u0915 \u0914\u0930 \u0914\u0930 \u0938\u0941\u0935\u094d\u092f\u0935\u0938\u094d\u0925\u093f\u0924 \u0917\u094d\u0930\u0942\u092e \u091d\u0940\u0932 \u0915\u0940 \u0938\u0921\u093c\u0915\u0947\u0902 \u090f\u0915 \u0926\u0930\u094d\u0930\u0947 \u0915\u0947 \u091c\u0930\u093f\u092f\u0947 \u092a\u0947\u091a\u0940\u0926\u093e \u092a\u0939\u093e\u0921\u093c\u093f\u092f\u094b\u0902 \u0938\u0947 \u0939\u094b\u0915\u0930 \u0917\u0941\u091c\u0930\u0924\u0940 \u0939\u0948\u0902\u0964 \u092a\u0939\u0932\u0947 \u0938\u0921\u093c\u0915\u0947\u0902 \u0917\u094d\u0930\u0942\u092e \u0918\u093e\u091f\u0940",
|
475 |
+
"question": "\u091d\u0940\u0932 \u0915\u0947 \u0938\u093e\u092a\u0947\u0915\u094d\u0937 \u0917\u094d\u0930\u0942\u092e \u0932\u0947\u0915 \u0930\u094b\u0921 \u0915\u0939\u093e\u0901 \u091c\u093e\u0924\u0940 \u0925\u0940?"
|
476 |
+
}
|
477 |
+
```
|
478 |
+
|
479 |
+
#### nc
|
480 |
+
|
481 |
+
An example of 'test.es' looks as follows.
|
482 |
+
|
483 |
+
```
|
484 |
+
{
|
485 |
+
"news_body": "El bizcocho es seguramente el producto m\u00e1s b\u00e1sico y sencillo de toda la reposter\u00eda : consiste en poco m\u00e1s que mezclar unos cuantos ingredientes, meterlos al horno y esperar a que se hagan. Por obra y gracia del impulsor qu\u00edmico, tambi\u00e9n conocido como \"levadura de tipo Royal\", despu\u00e9s de un rato de calorcito esta combinaci\u00f3n de harina, az\u00facar, huevo, grasa -aceite o mantequilla- y l\u00e1cteo se transforma en uno de los productos m\u00e1s deliciosos que existen para desayunar o merendar . Por muy manazas que seas, es m\u00e1s que probable que tu bizcocho casero supere en calidad a cualquier infamia industrial envasada. Para lograr un bizcocho digno de admiraci\u00f3n s\u00f3lo tienes que respetar unas pocas normas que afectan a los ingredientes, proporciones, mezclado, horneado y desmoldado. Todas las tienes resumidas en unos dos minutos el v\u00eddeo de arriba, en el que adem \u00e1s aprender\u00e1s alg\u00fan truquillo para que tu bizcochaco quede m\u00e1s fino, jugoso, esponjoso y amoroso. M\u00e1s en MSN:",
|
486 |
+
"news_category": "foodanddrink",
|
487 |
+
"news_title": "Cocina para lerdos: las leyes del bizcocho"
|
488 |
+
}
|
489 |
+
```
|
490 |
+
|
491 |
+
#### xnli
|
492 |
+
|
493 |
+
An example of 'validation.th' looks as follows.
|
494 |
+
|
495 |
+
```
|
496 |
+
{
|
497 |
+
"hypothesis": "\u0e40\u0e02\u0e32\u0e42\u0e17\u0e23\u0e2b\u0e32\u0e40\u0e40\u0e21\u0e48\u0e02\u0e2d\u0e07\u0e40\u0e02\u0e32\u0e2d\u0e22\u0e48\u0e32\u0e07\u0e23\u0e27\u0e14\u0e40\u0e23\u0e47\u0e27\u0e2b\u0e25\u0e31\u0e07\u0e08\u0e32\u0e01\u0e17\u0e35\u0e48\u0e23\u0e16\u0e42\u0e23\u0e07\u0e40\u0e23\u0e35\u0e22\u0e19\u0e2a\u0e48\u0e07\u0e40\u0e02\u0e32\u0e40\u0e40\u0e25\u0e49\u0e27",
|
498 |
+
"label": 1,
|
499 |
+
"premise": "\u0e41\u0e25\u0e30\u0e40\u0e02\u0e32\u0e1e\u0e39\u0e14\u0e27\u0e48\u0e32, \u0e21\u0e48\u0e32\u0e21\u0e4a\u0e32 \u0e1c\u0e21\u0e2d\u0e22\u0e39\u0e48\u0e1a\u0e49\u0e32\u0e19"
|
500 |
+
}
|
501 |
+
```
|
502 |
+
|
503 |
+
#### paws-x
|
504 |
+
|
505 |
+
An example of 'test.es' looks as follows.
|
506 |
+
|
507 |
+
```
|
508 |
+
{
|
509 |
+
"label": 1,
|
510 |
+
"sentence1": "La excepci\u00f3n fue entre fines de 2005 y 2009 cuando jug\u00f3 en Suecia con Carlstad United BK, Serbia con FK Borac \u010ca\u010dak y el FC Terek Grozny de Rusia.",
|
511 |
+
"sentence2": "La excepci\u00f3n se dio entre fines del 2005 y 2009, cuando jug\u00f3 con Suecia en el Carlstad United BK, Serbia con el FK Borac \u010ca\u010dak y el FC Terek Grozny de Rusia."
|
512 |
+
}
|
513 |
+
```
|
514 |
+
|
515 |
+
#### qadsm
|
516 |
+
|
517 |
+
An example of 'train' looks as follows.
|
518 |
+
|
519 |
+
```
|
520 |
+
{
|
521 |
+
"ad_description": "Your New England Cruise Awaits! Holland America Line Official Site.",
|
522 |
+
"ad_title": "New England Cruises",
|
523 |
+
"query": "cruise portland maine",
|
524 |
+
"relevance_label": 1
|
525 |
+
}
|
526 |
+
```
|
527 |
+
|
528 |
+
#### wpr
|
529 |
+
|
530 |
+
An example of 'test.zh' looks as follows.
|
531 |
+
|
532 |
+
```
|
533 |
+
{
|
534 |
+
"query": "maxpro\u5b98\u7f51",
|
535 |
+
"relavance_label": 0,
|
536 |
+
"web_page_snippet": "\u5728\u7ebf\u8d2d\u4e70\uff0c\u552e\u540e\u670d\u52a1\u3002vivo\u667a\u80fd\u624b\u673a\u5f53\u5b63\u660e\u661f\u673a\u578b\u6709NEX\uff0cvivo X21\uff0cvivo X20\uff0c\uff0cvivo X23\u7b49\uff0c\u5728vivo\u5b98\u7f51\u8d2d\u4e70\u624b\u673a\u53ef\u4ee5\u4eab\u53d712 \u671f\u514d\u606f\u4ed8\u6b3e\u3002 \u54c1\u724c Funtouch OS \u4f53\u9a8c\u5e97 | ...",
|
537 |
+
"wed_page_title": "vivo\u667a\u80fd\u624b\u673a\u5b98\u65b9\u7f51\u7ad9-AI\u975e\u51e1\u6444\u5f71X23"
|
538 |
+
}
|
539 |
+
```
|
540 |
+
|
541 |
+
#### qam
|
542 |
+
|
543 |
+
An example of 'validation.en' looks as follows.
|
544 |
+
|
545 |
+
```
|
546 |
+
{
|
547 |
+
"annswer": "Erikson has stated that after the last novel of the Malazan Book of the Fallen was finished, he and Esslemont would write a comprehensive guide tentatively named The Encyclopaedia Malazica.",
|
548 |
+
"label": 0,
|
549 |
+
"question": "main character of malazan book of the fallen"
|
550 |
+
}
|
551 |
+
```
|
552 |
+
|
553 |
+
#### qg
|
554 |
+
|
555 |
+
An example of 'test.de' looks as follows.
|
556 |
+
|
557 |
+
```
|
558 |
+
{
|
559 |
+
"answer_passage": "Medien bei WhatsApp automatisch speichern. Tippen Sie oben rechts unter WhatsApp auf die drei Punkte oder auf die Men\u00fc-Taste Ihres Smartphones. Dort wechseln Sie in die \"Einstellungen\" und von hier aus weiter zu den \"Chat-Einstellungen\". Unter dem Punkt \"Medien Auto-Download\" k\u00f6nnen Sie festlegen, wann die WhatsApp-Bilder heruntergeladen werden sollen.",
|
560 |
+
"question": "speichenn von whats app bilder unterbinden"
|
561 |
+
}
|
562 |
+
```
|
563 |
+
|
564 |
+
#### ntg
|
565 |
+
|
566 |
+
An example of 'test.en' looks as follows.
|
567 |
+
|
568 |
+
```
|
569 |
+
{
|
570 |
+
"news_body": "Check out this vintage Willys Pickup! As they say, the devil is in the details, and it's not every day you see such attention paid to every last area of a restoration like with this 1961 Willys Pickup . Already the Pickup has a unique look that shares some styling with the Jeep, plus some original touches you don't get anywhere else. It's a classy way to show up to any event, all thanks to Hollywood Motors . A burgundy paint job contrasts with white lower panels and the roof. Plenty of tasteful chrome details grace the exterior, including the bumpers, headlight bezels, crossmembers on the grille, hood latches, taillight bezels, exhaust finisher, tailgate hinges, etc. Steel wheels painted white and chrome hubs are a tasteful addition. Beautiful oak side steps and bed strips add a touch of craftsmanship to this ride. This truck is of real showroom quality, thanks to the astoundingly detailed restoration work performed on it, making this Willys Pickup a fierce contender for best of show. Under that beautiful hood is a 225 Buick V6 engine mated to a three-speed manual transmission, so you enjoy an ideal level of control. Four wheel drive is functional, making it that much more utilitarian and downright cool. The tires are new, so you can enjoy a lot of life out of them, while the wheels and hubs are in great condition. Just in case, a fifth wheel with a tire and a side mount are included. Just as important, this Pickup runs smoothly, so you can go cruising or even hit the open road if you're interested in participating in some classic rallies. You might associate Willys with the famous Jeep CJ, but the automaker did produce a fair amount of trucks. The Pickup is quite the unique example, thanks to distinct styling that really turns heads, making it a favorite at quite a few shows. Source: Hollywood Motors Check These Rides Out Too: Fear No Trails With These Off-Roaders 1965 Pontiac GTO: American Icon For Sale In Canada Low-Mileage 1955 Chevy 3100 Represents Turn In Pickup Market",
|
571 |
+
"news_title": "This 1961 Willys Pickup Will Let You Cruise In Style"
|
572 |
+
}
|
573 |
+
```
|
574 |
+
|
575 |
+
### Data Fields
|
576 |
+
|
577 |
+
#### ner
|
578 |
+
|
579 |
+
In the following each data field in ner is explained. The data fields are the same among all splits.
|
580 |
+
|
581 |
+
- `words`: a list of words composing the sentence.
|
582 |
+
- `ner`: a list of entitity classes corresponding to each word respectively.
|
583 |
+
|
584 |
+
|
585 |
+
#### pos
|
586 |
+
|
587 |
+
In the following each data field in pos is explained. The data fields are the same among all splits.
|
588 |
+
|
589 |
+
- `words`: a list of words composing the sentence.
|
590 |
+
- `pos`: a list of "part-of-speech" classes corresponding to each word respectively.
|
591 |
+
|
592 |
+
|
593 |
+
#### mlqa
|
594 |
+
|
595 |
+
In the following each data field in mlqa is explained. The data fields are the same among all splits.
|
596 |
+
|
597 |
+
- `context`: a string, the context containing the answer.
|
598 |
+
- `question`: a string, the question to be answered.
|
599 |
+
- `answers`: a string, the answer to `question`.
|
600 |
+
|
601 |
+
|
602 |
+
#### nc
|
603 |
+
|
604 |
+
In the following each data field in nc is explained. The data fields are the same among all splits.
|
605 |
+
|
606 |
+
- `news_title`: a string, to the title of the news report.
|
607 |
+
- `news_body`: a string, to the actual news report.
|
608 |
+
- `news_category`: a string, the category of the news report, *e.g.* `foodanddrink`
|
609 |
+
|
610 |
+
|
611 |
+
#### xnli
|
612 |
+
|
613 |
+
In the following each data field in xnli is explained. The data fields are the same among all splits.
|
614 |
+
|
615 |
+
- `premise`: a string, the context/premise, *i.e.* the first sentence for natural language inference.
|
616 |
+
- `hypothesis`: a string, a sentence whereas its relation to `premise` is to be classified, *i.e.* the second sentence for natural language inference.
|
617 |
+
- `label`: a class catory (int), natural language inference relation class between `hypothesis` and `premise`. One of 0: entailment, 1: contradiction, 2: neutral.
|
618 |
+
|
619 |
+
|
620 |
+
#### paws-x
|
621 |
+
|
622 |
+
In the following each data field in paws-x is explained. The data fields are the same among all splits.
|
623 |
+
|
624 |
+
- `sentence1`: a string, a sentence.
|
625 |
+
- `sentence2`: a string, a sentence whereas the sentence is either a paraphrase of `sentence1` or not.
|
626 |
+
- `label`: a class label (int), whether `sentence2` is a paraphrase of `sentence1` One of 0: different, 1: same.
|
627 |
+
|
628 |
+
|
629 |
+
#### qadsm
|
630 |
+
|
631 |
+
In the following each data field in qadsm is explained. The data fields are the same among all splits.
|
632 |
+
|
633 |
+
- `query`: a string, the search query one would insert into a search engine.
|
634 |
+
- `ad_title`: a string, the title of the advertisement.
|
635 |
+
- `ad_description`: a string, the content of the advertisement, *i.e.* the main body.
|
636 |
+
- `relevance_label`: a class label (int), how relevant the advertisement `ad_title` + `ad_description` is to the search query `query`. One of 0: Bad, 1: Good.
|
637 |
+
|
638 |
+
|
639 |
+
#### wpr
|
640 |
+
|
641 |
+
In the following each data field in wpr is explained. The data fields are the same among all splits.
|
642 |
+
|
643 |
+
- `query`: a string, the search query one would insert into a search engine.
|
644 |
+
- `web_page_title`: a string, the title of a web page.
|
645 |
+
- `web_page_snippet`: a string, the content of a web page, *i.e.* the main body.
|
646 |
+
- `relavance_label`: a class label (int), how relevant the web page `web_page_snippet` + `web_page_snippet` is to the search query `query`. One of 0: Bad, 1: Fair, 2: Good, 3: Excellent, 4: Perfect.
|
647 |
+
|
648 |
+
|
649 |
+
#### qam
|
650 |
+
|
651 |
+
In the following each data field in qam is explained. The data fields are the same among all splits.
|
652 |
+
|
653 |
+
- `question`: a string, a question.
|
654 |
+
- `answer`: a string, a possible answer to `question`.
|
655 |
+
- `label`: a class label (int), whether the `answer` is relevant to the `question`. One of 0: False, 1: True.
|
656 |
+
|
657 |
+
|
658 |
+
#### qg
|
659 |
+
|
660 |
+
In the following each data field in qg is explained. The data fields are the same among all splits.
|
661 |
+
|
662 |
+
- `answer_passage`: a string, a detailed answer to the `question`.
|
663 |
+
- `question`: a string, a question.
|
664 |
+
|
665 |
+
|
666 |
+
#### ntg
|
667 |
+
|
668 |
+
In the following each data field in ntg is explained. The data fields are the same among all splits.
|
669 |
+
|
670 |
+
- `news_body`: a string, the content of a news article.
|
671 |
+
- `news_title`: a string, the title corresponding to the news article `news_body`.
|
672 |
+
|
673 |
+
|
674 |
+
### Data Splits
|
675 |
+
|
676 |
+
#### ner
|
677 |
+
|
678 |
+
The following table shows the number of data samples/number of rows for each split in ner.
|
679 |
+
|
680 |
+
| |train|validation.en|validation.de|validation.es|validation.nl|test.en|test.de|test.es|test.nl|
|
681 |
+
|---|----:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|
|
682 |
+
|ner|14042| 3252| 2874| 1923| 2895| 3454| 3007| 1523| 5202|
|
683 |
+
|
684 |
+
|
685 |
+
#### pos
|
686 |
+
|
687 |
+
The following table shows the number of data samples/number of rows for each split in pos.
|
688 |
+
|
689 |
+
| |train|validation.en|validation.de|validation.es|validation.nl|validation.bg|validation.el|validation.fr|validation.pl|validation.tr|validation.vi|validation.zh|validation.ur|validation.hi|validation.it|validation.ar|validation.ru|validation.th|test.en|test.de|test.es|test.nl|test.bg|test.el|test.fr|test.pl|test.tr|test.vi|test.zh|test.ur|test.hi|test.it|test.ar|test.ru|test.th|
|
690 |
+
|---|----:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|
|
691 |
+
|pos|25376| 2001| 798| 1399| 717| 1114| 402| 1475| 2214| 987| 799| 499| 551| 1658| 563| 908| 578| 497| 2076| 976| 425| 595| 1115| 455| 415| 2214| 982| 799| 499| 534| 1683| 481| 679| 600| 497|
|
692 |
+
|
693 |
+
|
694 |
+
#### mlqa
|
695 |
+
|
696 |
+
The following table shows the number of data samples/number of rows for each split in mlqa.
|
697 |
+
|
698 |
+
| |train|validation.en|validation.de|validation.ar|validation.es|validation.hi|validation.vi|validation.zh|test.en|test.de|test.ar|test.es|test.hi|test.vi|test.zh|
|
699 |
+
|----|----:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|------:|
|
700 |
+
|mlqa|87599| 1148| 512| 517| 500| 507| 511| 504| 11590| 4517| 5335| 5253| 4918| 5495| 5137|
|
701 |
+
|
702 |
+
|
703 |
+
#### nc
|
704 |
+
|
705 |
+
The following table shows the number of data samples/number of rows for each split in nc.
|
706 |
+
|
707 |
+
| |train |validation.en|validation.de|validation.es|validation.fr|validation.ru|test.en|test.de|test.es|test.fr|test.ru|
|
708 |
+
|---|-----:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|
|
709 |
+
|nc |100000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000|
|
710 |
+
|
711 |
+
|
712 |
+
#### xnli
|
713 |
+
|
714 |
+
The following table shows the number of data samples/number of rows for each split in xnli.
|
715 |
+
|
716 |
+
| |train |validation.en|validation.ar|validation.bg|validation.de|validation.el|validation.es|validation.fr|validation.hi|validation.ru|validation.sw|validation.th|validation.tr|validation.ur|validation.vi|validation.zh|test.en|test.ar|test.bg|test.de|test.el|test.es|test.fr|test.hi|test.ru|test.sw|test.th|test.tr|test.ur|test.vi|test.zh|
|
717 |
+
|----|-----:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|
|
718 |
+
|xnli|392702| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010|
|
719 |
+
|
720 |
+
The following table shows the number of data samples/number of rows for each split in mlqa.
|
721 |
+
|
722 |
+
| |train|validation.en|validation.de|validation.ar|validation.es|validation.hi|validation.vi|validation.zh|test.en|test.de|test.ar|test.es|test.hi|test.vi|test.zh|
|
723 |
+
|----|----:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|------:|
|
724 |
+
|mlqa|87599| 1148| 512| 517| 500| 507| 511| 504| 11590| 4517| 5335| 5253| 4918| 5495| 5137|
|
725 |
+
|
726 |
+
|
727 |
+
#### nc
|
728 |
+
|
729 |
+
The following table shows the number of data samples/number of rows for each split in nc.
|
730 |
+
|
731 |
+
| |train |validation.en|validation.de|validation.es|validation.fr|validation.ru|test.en|test.de|test.es|test.fr|test.ru|
|
732 |
+
|---|-----:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|
|
733 |
+
|nc |100000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000|
|
734 |
+
|
735 |
+
|
736 |
+
#### xnli
|
737 |
+
|
738 |
+
The following table shows the number of data samples/number of rows for each split in xnli.
|
739 |
+
|
740 |
+
| |train |validation.en|validation.ar|validation.bg|validation.de|validation.el|validation.es|validation.fr|validation.hi|validation.ru|validation.sw|validation.th|validation.tr|validation.ur|validation.vi|validation.zh|test.en|test.ar|test.bg|test.de|test.el|test.es|test.fr|test.hi|test.ru|test.sw|test.th|test.tr|test.ur|test.vi|test.zh|
|
741 |
+
|----|-----:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|
|
742 |
+
|xnli|392702| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 2490| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010| 5010|
|
743 |
+
|
744 |
+
|
745 |
+
#### paws-x
|
746 |
+
|
747 |
+
The following table shows the number of data samples/number of rows for each split in paws-x.
|
748 |
+
|
749 |
+
| |train|validation.en|validation.de|validation.es|validation.fr|test.en|test.de|test.es|test.fr|
|
750 |
+
|------|----:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|
|
751 |
+
|paws-x|49401| 2000| 2000| 2000| 2000| 2000| 2000| 2000| 2000|
|
752 |
+
|
753 |
+
|
754 |
+
#### qadsm
|
755 |
+
|
756 |
+
The following table shows the number of data samples/number of rows for each split in qadsm.
|
757 |
+
|
758 |
+
| |train |validation.en|validation.de|validation.fr|test.en|test.de|test.fr|
|
759 |
+
|-----|-----:|------------:|------------:|------------:|------:|------:|------:|
|
760 |
+
|qadsm|100000| 10000| 10000| 10000| 10000| 10000| 10000|
|
761 |
+
|
762 |
+
|
763 |
+
#### wpr
|
764 |
+
|
765 |
+
The following table shows the number of data samples/number of rows for each split in wpr.
|
766 |
+
|
767 |
+
| |train|validation.en|validation.de|validation.es|validation.fr|validation.it|validation.pt|validation.zh|test.en|test.de|test.es|test.fr|test.it|test.pt|test.zh|
|
768 |
+
|---|----:|------------:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|------:|
|
769 |
+
|wpr|99997| 10008| 10004| 10004| 10005| 10003| 10001| 10002| 10004| 9997| 10006| 10020| 10001| 10015| 9999|
|
770 |
+
|
771 |
+
|
772 |
+
#### qam
|
773 |
+
|
774 |
+
The following table shows the number of data samples/number of rows for each split in qam.
|
775 |
+
|
776 |
+
| |train |validation.en|validation.de|validation.fr|test.en|test.de|test.fr|
|
777 |
+
|---|-----:|------------:|------------:|------------:|------:|------:|------:|
|
778 |
+
|qam|100000| 10000| 10000| 10000| 10000| 10000| 10000|
|
779 |
+
|
780 |
+
|
781 |
+
#### qg
|
782 |
+
|
783 |
+
The following table shows the number of data samples/number of rows for each split in qg.
|
784 |
+
|
785 |
+
| |train |validation.en|validation.de|validation.es|validation.fr|validation.it|validation.pt|test.en|test.de|test.es|test.fr|test.it|test.pt|
|
786 |
+
|---|-----:|------------:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|------:|
|
787 |
+
|qg |100000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000|
|
788 |
+
|
789 |
+
|
790 |
+
#### ntg
|
791 |
+
|
792 |
+
The following table shows the number of data samples/number of rows for each split in ntg.
|
793 |
+
|
794 |
+
| |train |validation.en|validation.de|validation.es|validation.fr|validation.ru|test.en|test.de|test.es|test.fr|test.ru|
|
795 |
+
|---|-----:|------------:|------------:|------------:|------------:|------------:|------:|------:|------:|------:|------:|
|
796 |
+
|ntg|300000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000| 10000|
|
797 |
+
|
798 |
+
## Dataset Creation
|
799 |
+
|
800 |
+
### Curation Rationale
|
801 |
+
|
802 |
+
[More Information Needed]
|
803 |
+
|
804 |
+
### Source Data
|
805 |
+
|
806 |
+
#### Initial Data Collection and Normalization
|
807 |
+
|
808 |
+
[More Information Needed]
|
809 |
+
|
810 |
+
#### Who are the source language producers?
|
811 |
+
|
812 |
+
[More Information Needed]
|
813 |
+
|
814 |
+
### Annotations
|
815 |
+
|
816 |
+
[More Information Needed]
|
817 |
+
|
818 |
+
#### Annotation process
|
819 |
+
|
820 |
+
[More Information Needed]
|
821 |
+
|
822 |
+
#### Who are the annotators?
|
823 |
+
|
824 |
+
[More Information Needed]
|
825 |
+
|
826 |
+
### Personal and Sensitive Information
|
827 |
+
|
828 |
+
[More Information Needed]
|
829 |
+
|
830 |
+
## Considerations for Using the Data
|
831 |
+
|
832 |
+
### Social Impact of Dataset
|
833 |
+
|
834 |
+
[More Information Needed]
|
835 |
+
|
836 |
+
### Discussion of Biases
|
837 |
+
|
838 |
+
[More Information Needed]
|
839 |
+
|
840 |
+
### Other Known Limitations
|
841 |
+
|
842 |
+
[More Information Needed]
|
843 |
+
|
844 |
+
## Additional Information
|
845 |
+
|
846 |
+
### Dataset Curators
|
847 |
+
|
848 |
+
The dataset is maintained mainly by Yaobo Liang, Yeyun Gong, Nan Duan, Ming Gong, Linjun Shou, and Daniel Campos from Microsoft Research.
|
849 |
+
|
850 |
+
### Licensing Information
|
851 |
+
|
852 |
+
The licensing status of the dataset hinges on the legal status of [XGLUE](https://microsoft.github.io/XGLUE/) hich is unclear.
|
853 |
+
|
854 |
+
### Citation Information
|
855 |
+
|
856 |
+
```
|
857 |
+
@article{Liang2020XGLUEAN,
|
858 |
+
title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},
|
859 |
+
author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos and Rangan Majumder and Ming Zhou},
|
860 |
+
journal={arXiv},
|
861 |
+
year={2020},
|
862 |
+
volume={abs/2004.01401}
|
863 |
+
}
|
864 |
+
```
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ner": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "@article{Sang2003IntroductionTT,\n title={Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition},\n author={Erik F. Tjong Kim Sang and Fien De Meulder},\n journal={ArXiv},\n year={2003},\n volume={cs.CL/0306050}\n},\n@article{Sang2002IntroductionTT,\n title={Introduction to the CoNLL-2002 Shared Task: Language-Independent Named Entity Recognition},\n author={Erik F. Tjong Kim Sang},\n journal={ArXiv},\n year={2002},\n volume={cs.CL/0209010}\n}\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "https://www.clips.uantwerpen.be/conll2003/ner/", "license": "", "features": {"words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner": {"feature": {"num_classes": 9, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "ner", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3445854, "num_examples": 14042, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 866569, "num_examples": 3252, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 917967, "num_examples": 2874, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 888551, "num_examples": 1923, "dataset_name": "x_glue"}, "validation.nl": {"name": "validation.nl", "num_bytes": 659144, "num_examples": 2895, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 784976, "num_examples": 3454, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 922741, "num_examples": 3007, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 864804, "num_examples": 1523, "dataset_name": "x_glue"}, "test.nl": {"name": "test.nl", "num_bytes": 1196660, "num_examples": 5202, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 10547266, "size_in_bytes": 886453137}, "pos": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "@misc{11234/1-3105,\n title={Universal Dependencies 2.5},\n author={Zeman, Daniel and Nivre, Joakim and Abrams, Mitchell and Aepli, et al.},\n url={http://hdl.handle.net/11234/1-3105},\n note={{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{'U}FAL}), Faculty of Mathematics and Physics, Charles University},\n copyright={Licence Universal Dependencies v2.5},\n year={2019}\n}\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "https://universaldependencies.org/", "license": "", "features": {"words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "pos", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7279459, "num_examples": 25376, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 421410, "num_examples": 2001, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 219328, "num_examples": 798, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 620491, "num_examples": 1399, "dataset_name": "x_glue"}, "validation.nl": {"name": "validation.nl", "num_bytes": 198003, "num_examples": 717, "dataset_name": "x_glue"}, "validation.bg": {"name": "validation.bg", "num_bytes": 346802, "num_examples": 1114, "dataset_name": "x_glue"}, "validation.el": {"name": "validation.el", "num_bytes": 229447, "num_examples": 402, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 600964, "num_examples": 1475, "dataset_name": "x_glue"}, "validation.pl": {"name": "validation.pl", "num_bytes": 620694, "num_examples": 2214, "dataset_name": "x_glue"}, "validation.tr": {"name": "validation.tr", "num_bytes": 186196, "num_examples": 987, "dataset_name": "x_glue"}, "validation.vi": {"name": "validation.vi", "num_bytes": 203669, "num_examples": 799, "dataset_name": "x_glue"}, "validation.zh": {"name": "validation.zh", "num_bytes": 212579, "num_examples": 499, "dataset_name": "x_glue"}, "validation.ur": {"name": "validation.ur", "num_bytes": 284016, "num_examples": 551, "dataset_name": "x_glue"}, "validation.hi": {"name": "validation.hi", "num_bytes": 838700, "num_examples": 1658, "dataset_name": "x_glue"}, "validation.it": {"name": "validation.it", "num_bytes": 198608, "num_examples": 563, "dataset_name": "x_glue"}, "validation.ar": {"name": "validation.ar", "num_bytes": 592943, "num_examples": 908, "dataset_name": "x_glue"}, "validation.ru": {"name": "validation.ru", "num_bytes": 261563, "num_examples": 578, "dataset_name": "x_glue"}, "validation.th": {"name": "validation.th", "num_bytes": 272834, "num_examples": 497, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 420613, "num_examples": 2076, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 291759, "num_examples": 976, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 200003, "num_examples": 425, "dataset_name": "x_glue"}, "test.nl": {"name": "test.nl", "num_bytes": 193337, "num_examples": 595, "dataset_name": "x_glue"}, "test.bg": {"name": "test.bg", "num_bytes": 339460, "num_examples": 1115, "dataset_name": "x_glue"}, "test.el": {"name": "test.el", "num_bytes": 235137, "num_examples": 455, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 166865, "num_examples": 415, "dataset_name": "x_glue"}, "test.pl": {"name": "test.pl", "num_bytes": 600534, "num_examples": 2214, "dataset_name": "x_glue"}, "test.tr": {"name": "test.tr", "num_bytes": 186519, "num_examples": 982, "dataset_name": "x_glue"}, "test.vi": {"name": "test.vi", "num_bytes": 211408, "num_examples": 799, "dataset_name": "x_glue"}, "test.zh": {"name": "test.zh", "num_bytes": 202055, "num_examples": 499, "dataset_name": "x_glue"}, "test.ur": {"name": "test.ur", "num_bytes": 288189, "num_examples": 534, "dataset_name": "x_glue"}, "test.hi": {"name": "test.hi", "num_bytes": 839659, "num_examples": 1683, "dataset_name": "x_glue"}, "test.it": {"name": "test.it", "num_bytes": 173861, "num_examples": 481, "dataset_name": "x_glue"}, "test.ar": {"name": "test.ar", "num_bytes": 561709, "num_examples": 679, "dataset_name": "x_glue"}, "test.ru": {"name": "test.ru", "num_bytes": 255393, "num_examples": 600, "dataset_name": "x_glue"}, "test.th": {"name": "test.th", "num_bytes": 272834, "num_examples": 497, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 19027041, "size_in_bytes": 894932912}, "mlqa": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "@article{Lewis2019MLQAEC,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Patrick Lewis and Barlas Oguz and Ruty Rinott and Sebastian Riedel and Holger Schwenk},\n journal={ArXiv},\n year={2019},\n volume={abs/1910.07475}\n}\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "mlqa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 75307933, "num_examples": 87599, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 1255587, "num_examples": 1148, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 454258, "num_examples": 512, "dataset_name": "x_glue"}, "validation.ar": {"name": "validation.ar", "num_bytes": 785493, "num_examples": 517, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 388625, "num_examples": 500, "dataset_name": "x_glue"}, "validation.hi": {"name": "validation.hi", "num_bytes": 1092167, "num_examples": 507, "dataset_name": "x_glue"}, "validation.vi": {"name": "validation.vi", "num_bytes": 692227, "num_examples": 511, "dataset_name": "x_glue"}, "validation.zh": {"name": "validation.zh", "num_bytes": 411213, "num_examples": 504, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 13264513, "num_examples": 11590, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 4070659, "num_examples": 4517, "dataset_name": "x_glue"}, "test.ar": {"name": "test.ar", "num_bytes": 7976090, "num_examples": 5335, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 4044224, "num_examples": 5253, "dataset_name": "x_glue"}, "test.hi": {"name": "test.hi", "num_bytes": 11385051, "num_examples": 4918, "dataset_name": "x_glue"}, "test.vi": {"name": "test.vi", "num_bytes": 7559078, "num_examples": 5495, "dataset_name": "x_glue"}, "test.zh": {"name": "test.zh", "num_bytes": 4092921, "num_examples": 5137, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 132780039, "size_in_bytes": 1008685910}, "nc": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "", "license": "", "features": {"news_title": {"dtype": "string", "id": null, "_type": "Value"}, "news_body": {"dtype": "string", "id": null, "_type": "Value"}, "news_category": {"num_classes": 10, "names": ["foodanddrink", "sports", "travel", "finance", "lifestyle", "news", "entertainment", "health", "video", "autos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "nc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 280615806, "num_examples": 100000, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 33389140, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 26757254, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 31781308, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 27154099, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.ru": {"name": "validation.ru", "num_bytes": 46053007, "num_examples": 10000, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 34437987, "num_examples": 10000, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 26632007, "num_examples": 10000, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 31350078, "num_examples": 10000, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 27589545, "num_examples": 10000, "dataset_name": "x_glue"}, "test.ru": {"name": "test.ru", "num_bytes": 46183830, "num_examples": 10000, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 611944061, "size_in_bytes": 1487849932}, "xnli": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "@inproceedings{Conneau2018XNLIEC,\n title={XNLI: Evaluating Cross-lingual Sentence Representations},\n author={Alexis Conneau and Guillaume Lample and Ruty Rinott and Adina Williams and Samuel R. Bowman and Holger Schwenk and Veselin Stoyanov},\n booktitle={EMNLP},\n year={2018}\n}\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "https://github.com/facebookresearch/XNLI", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "xnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 74444346, "num_examples": 392702, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 433471, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.ar": {"name": "validation.ar", "num_bytes": 633009, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.bg": {"name": "validation.bg", "num_bytes": 774069, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 494612, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.el": {"name": "validation.el", "num_bytes": 841234, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 478430, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 510112, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.hi": {"name": "validation.hi", "num_bytes": 1023923, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.ru": {"name": "validation.ru", "num_bytes": 786450, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.sw": {"name": "validation.sw", "num_bytes": 429858, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.th": {"name": "validation.th", "num_bytes": 1061168, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.tr": {"name": "validation.tr", "num_bytes": 459316, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.ur": {"name": "validation.ur", "num_bytes": 699960, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.vi": {"name": "validation.vi", "num_bytes": 590688, "num_examples": 2490, "dataset_name": "x_glue"}, "validation.zh": {"name": "validation.zh", "num_bytes": 384859, "num_examples": 2490, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 875142, "num_examples": 5010, "dataset_name": "x_glue"}, "test.ar": {"name": "test.ar", "num_bytes": 1294561, "num_examples": 5010, "dataset_name": "x_glue"}, "test.bg": {"name": "test.bg", "num_bytes": 1573042, "num_examples": 5010, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 996487, "num_examples": 5010, "dataset_name": "x_glue"}, "test.el": {"name": "test.el", "num_bytes": 1704793, "num_examples": 5010, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 969821, "num_examples": 5010, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 1029247, "num_examples": 5010, "dataset_name": "x_glue"}, "test.hi": {"name": "test.hi", "num_bytes": 2073081, "num_examples": 5010, "dataset_name": "x_glue"}, "test.ru": {"name": "test.ru", "num_bytes": 1603474, "num_examples": 5010, "dataset_name": "x_glue"}, "test.sw": {"name": "test.sw", "num_bytes": 871659, "num_examples": 5010, "dataset_name": "x_glue"}, "test.th": {"name": "test.th", "num_bytes": 2147023, "num_examples": 5010, "dataset_name": "x_glue"}, "test.tr": {"name": "test.tr", "num_bytes": 934942, "num_examples": 5010, "dataset_name": "x_glue"}, "test.ur": {"name": "test.ur", "num_bytes": 1416246, "num_examples": 5010, "dataset_name": "x_glue"}, "test.vi": {"name": "test.vi", "num_bytes": 1190225, "num_examples": 5010, "dataset_name": "x_glue"}, "test.zh": {"name": "test.zh", "num_bytes": 777937, "num_examples": 5010, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 103503185, "size_in_bytes": 979409056}, "paws-x": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "@article{Yang2019PAWSXAC,\n title={PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification},\n author={Yinfei Yang and Yuan Zhang and Chris Tar and Jason Baldridge},\n journal={ArXiv},\n year={2019},\n volume={abs/1908.11828}\n}\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "https://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["different", "same"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "paws-x", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12018349, "num_examples": 49401, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 484287, "num_examples": 2000, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 506009, "num_examples": 2000, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 505888, "num_examples": 2000, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 525031, "num_examples": 2000, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 486734, "num_examples": 2000, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 516214, "num_examples": 2000, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 511111, "num_examples": 2000, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 527101, "num_examples": 2000, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 16080724, "size_in_bytes": 891986595}, "qadsm": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "", "license": "", "features": {"query": {"dtype": "string", "id": null, "_type": "Value"}, "ad_title": {"dtype": "string", "id": null, "_type": "Value"}, "ad_description": {"dtype": "string", "id": null, "_type": "Value"}, "relevance_label": {"num_classes": 2, "names": ["Bad", "Good"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "qadsm", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12528141, "num_examples": 100000, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 1248839, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 1566011, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 1651804, "num_examples": 10000, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 1236997, "num_examples": 10000, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 1563985, "num_examples": 10000, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 1594118, "num_examples": 10000, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 21389895, "size_in_bytes": 897295766}, "wpr": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "", "license": "", "features": {"query": {"dtype": "string", "id": null, "_type": "Value"}, "web_page_title": {"dtype": "string", "id": null, "_type": "Value"}, "web_page_snippet": {"dtype": "string", "id": null, "_type": "Value"}, "relavance_label": {"num_classes": 5, "names": ["Bad", "Fair", "Good", "Excellent", "Perfect"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "wpr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 33885931, "num_examples": 99997, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 3417760, "num_examples": 10008, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 2929029, "num_examples": 10004, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 2451026, "num_examples": 10004, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 3055899, "num_examples": 10005, "dataset_name": "x_glue"}, "validation.it": {"name": "validation.it", "num_bytes": 2416388, "num_examples": 10003, "dataset_name": "x_glue"}, "validation.pt": {"name": "validation.pt", "num_bytes": 2449797, "num_examples": 10001, "dataset_name": "x_glue"}, "validation.zh": {"name": "validation.zh", "num_bytes": 3118577, "num_examples": 10002, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 3402487, "num_examples": 10004, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 2923577, "num_examples": 9997, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 2422895, "num_examples": 10006, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 3059392, "num_examples": 10020, "dataset_name": "x_glue"}, "test.it": {"name": "test.it", "num_bytes": 2403736, "num_examples": 10001, "dataset_name": "x_glue"}, "test.pt": {"name": "test.pt", "num_bytes": 2462350, "num_examples": 10015, "dataset_name": "x_glue"}, "test.zh": {"name": "test.zh", "num_bytes": 3141598, "num_examples": 9999, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 73540442, "size_in_bytes": 949446313}, "qam": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["False", "True"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "qam", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 28357964, "num_examples": 100000, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 3085501, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 3304031, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 3142833, "num_examples": 10000, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 3082297, "num_examples": 10000, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 3309496, "num_examples": 10000, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 3140213, "num_examples": 10000, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 47422335, "size_in_bytes": 923328206}, "qg": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "", "license": "", "features": {"answer_passage": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "qg", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 27464034, "num_examples": 100000, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 3047040, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 3270877, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 3341775, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 3175615, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.it": {"name": "validation.it", "num_bytes": 3191193, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.pt": {"name": "validation.pt", "num_bytes": 3328434, "num_examples": 10000, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 3043813, "num_examples": 10000, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 3270190, "num_examples": 10000, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 3353522, "num_examples": 10000, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 3178352, "num_examples": 10000, "dataset_name": "x_glue"}, "test.it": {"name": "test.it", "num_bytes": 3195684, "num_examples": 10000, "dataset_name": "x_glue"}, "test.pt": {"name": "test.pt", "num_bytes": 3340296, "num_examples": 10000, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 66200825, "size_in_bytes": 942106696}, "ntg": {"description": "XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained\nmodels with respect to cross-lingual natural language understanding and generation.\nThe benchmark is composed of the following 11 tasks:\n- NER\n- POS Tagging (POS)\n- News Classification (NC)\n- MLQA\n- XNLI\n- PAWS-X\n- Query-Ad Matching (QADSM)\n- Web Page Ranking (WPR)\n- QA Matching (QAM)\n- Question Generation (QG)\n- News Title Generation (NTG)\n\nFor more information, please take a look at https://microsoft.github.io/XGLUE/.\n", "citation": "\n@article{Liang2020XGLUEAN,\n title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},\n author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi\n and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei\n Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao\n and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos\n and Rangan Majumder and Ming Zhou},\n journal={arXiv},\n year={2020},\n volume={abs/2004.01401}\n}\n", "homepage": "", "license": "", "features": {"news_body": {"dtype": "string", "id": null, "_type": "Value"}, "news_title": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "x_glue", "config_name": "ntg", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 890709581, "num_examples": 300000, "dataset_name": "x_glue"}, "validation.en": {"name": "validation.en", "num_bytes": 34317076, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.de": {"name": "validation.de", "num_bytes": 27404379, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.es": {"name": "validation.es", "num_bytes": 30896109, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.fr": {"name": "validation.fr", "num_bytes": 27261523, "num_examples": 10000, "dataset_name": "x_glue"}, "validation.ru": {"name": "validation.ru", "num_bytes": 43247386, "num_examples": 10000, "dataset_name": "x_glue"}, "test.en": {"name": "test.en", "num_bytes": 33697284, "num_examples": 10000, "dataset_name": "x_glue"}, "test.de": {"name": "test.de", "num_bytes": 26738202, "num_examples": 10000, "dataset_name": "x_glue"}, "test.es": {"name": "test.es", "num_bytes": 31111489, "num_examples": 10000, "dataset_name": "x_glue"}, "test.fr": {"name": "test.fr", "num_bytes": 26997447, "num_examples": 10000, "dataset_name": "x_glue"}, "test.ru": {"name": "test.ru", "num_bytes": 44050350, "num_examples": 10000, "dataset_name": "x_glue"}}, "download_checksums": {"https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz": {"num_bytes": 875905871, "checksum": "e11016c02d8565d00119833a16679bbbe0fec437f5ad53c2d3f9eef6fa03f65b"}}, "download_size": 875905871, "post_processing_size": null, "dataset_size": 1216430826, "size_in_bytes": 2092336697}}
|
dummy/mlqa/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c0ec57d1ce7a4196d847749be1af2e6698d4473cdf4a0c2ebc01724e1b769b4
|
3 |
+
size 273195
|
dummy/nc/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04d8f0a30ab7b4e59b150fface3a780b305220fd36c7a10e04b2d6a45548955a
|
3 |
+
size 273195
|
dummy/ner/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae730fdb92ade30143cf51dbba4a72fbe77887123aaafa87f13bd1035befee1c
|
3 |
+
size 5673
|
dummy/ntg/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de98f80f9390e68cfffe50c14367d8e7a83030751303a05e5bc326efab1882b9
|
3 |
+
size 273195
|
dummy/paws-x/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efc41331a68e3c65b9243ae83a0777d698996b9949015985bf32bededd9f728e
|
3 |
+
size 273195
|
dummy/pos/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5eb9946758681b7a86fcd532a71e0c11199c62c9b82f3472107dbd629ea57ae5
|
3 |
+
size 21565
|
dummy/qadsm/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d5918a9006e4fa230771a3833a0c3b76e247bc1c8e7f0d9d85746e735d400be
|
3 |
+
size 273195
|
dummy/qam/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d6d400603ee01814ffbda274f0ec43af87ca87a6528592a779434603624644e
|
3 |
+
size 273195
|
dummy/qg/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc2d159843d141c52d30aee92c7d4600c6fb5125339c82b2c7f6ee4a30032678
|
3 |
+
size 273195
|
dummy/wpr/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:455b242cc79465fe45592ead71500c1311bd4a8cdc4eb5c16a8a81aa470459a9
|
3 |
+
size 273195
|
dummy/xnli/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1a87b1636a77a580deda50097832cce0b8567dee07b6aabfd2e66ca9b2a418e
|
3 |
+
size 273195
|
xglue.py
ADDED
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# Lint as: python3
|
17 |
+
"""The General Language Understanding Evaluation (GLUE) benchmark."""
|
18 |
+
|
19 |
+
from __future__ import absolute_import, division, print_function
|
20 |
+
|
21 |
+
import json
|
22 |
+
import os
|
23 |
+
import textwrap
|
24 |
+
|
25 |
+
import datasets
|
26 |
+
|
27 |
+
|
28 |
+
_XGLUE_CITATION = """\
|
29 |
+
@article{Liang2020XGLUEAN,
|
30 |
+
title={XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},
|
31 |
+
author={Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi
|
32 |
+
and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei
|
33 |
+
Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao
|
34 |
+
and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos
|
35 |
+
and Rangan Majumder and Ming Zhou},
|
36 |
+
journal={arXiv},
|
37 |
+
year={2020},
|
38 |
+
volume={abs/2004.01401}
|
39 |
+
}
|
40 |
+
"""
|
41 |
+
|
42 |
+
_XGLUE_DESCRIPTION = """\
|
43 |
+
XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained
|
44 |
+
models with respect to cross-lingual natural language understanding and generation.
|
45 |
+
The benchmark is composed of the following 11 tasks:
|
46 |
+
- NER
|
47 |
+
- POS Tagging (POS)
|
48 |
+
- News Classification (NC)
|
49 |
+
- MLQA
|
50 |
+
- XNLI
|
51 |
+
- PAWS-X
|
52 |
+
- Query-Ad Matching (QADSM)
|
53 |
+
- Web Page Ranking (WPR)
|
54 |
+
- QA Matching (QAM)
|
55 |
+
- Question Generation (QG)
|
56 |
+
- News Title Generation (NTG)
|
57 |
+
|
58 |
+
For more information, please take a look at https://microsoft.github.io/XGLUE/.
|
59 |
+
"""
|
60 |
+
|
61 |
+
_XGLUE_ALL_DATA = "https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz"
|
62 |
+
|
63 |
+
_LANGUAGES = {
|
64 |
+
"ner": ["en", "de", "es", "nl"],
|
65 |
+
"pos": ["en", "de", "es", "nl", "bg", "el", "fr", "pl", "tr", "vi", "zh", "ur", "hi", "it", "ar", "ru", "th"],
|
66 |
+
"mlqa": ["en", "de", "ar", "es", "hi", "vi", "zh"],
|
67 |
+
"nc": ["en", "de", "es", "fr", "ru"],
|
68 |
+
"xnli": ["en", "ar", "bg", "de", "el", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"],
|
69 |
+
"paws-x": ["en", "de", "es", "fr"],
|
70 |
+
"qadsm": ["en", "de", "fr"],
|
71 |
+
"wpr": ["en", "de", "es", "fr", "it", "pt", "zh"],
|
72 |
+
"qam": ["en", "de", "fr"],
|
73 |
+
"qg": ["en", "de", "es", "fr", "it", "pt"],
|
74 |
+
"ntg": ["en", "de", "es", "fr", "ru"],
|
75 |
+
}
|
76 |
+
|
77 |
+
_PATHS = {
|
78 |
+
"mlqa": {
|
79 |
+
"train": os.path.join("squad1.1", "train-v1.1.json"),
|
80 |
+
"dev": os.path.join("MLQA_V1", "dev", "dev-context-{0}-question-{0}.json"),
|
81 |
+
"test": os.path.join("MLQA_V1", "test", "test-context-{0}-question-{0}.json"),
|
82 |
+
},
|
83 |
+
"xnli": {"train": "multinli.train.en.tsv", "dev": "{}.dev", "test": "{}.test"},
|
84 |
+
"paws-x": {
|
85 |
+
"train": os.path.join("en", "train.tsv"),
|
86 |
+
"dev": os.path.join("{}", "dev_2k.tsv"),
|
87 |
+
"test": os.path.join("{}", "test_2k.tsv"),
|
88 |
+
},
|
89 |
+
}
|
90 |
+
for name in ["ner", "pos"]:
|
91 |
+
_PATHS[name] = {"train": "en.train", "dev": "{}.dev", "test": "{}.test"}
|
92 |
+
for name in ["nc", "qadsm", "wpr", "qam"]:
|
93 |
+
_PATHS[name] = {
|
94 |
+
"train": "xglue." + name + ".en.train",
|
95 |
+
"dev": "xglue." + name + ".{}.dev",
|
96 |
+
"test": "xglue." + name + ".{}.test",
|
97 |
+
}
|
98 |
+
for name in ["qg", "ntg"]:
|
99 |
+
_PATHS[name] = {"train": "xglue." + name + ".en", "dev": "xglue." + name + ".{}", "test": "xglue." + name + ".{}"}
|
100 |
+
|
101 |
+
|
102 |
+
class XGlueConfig(datasets.BuilderConfig):
|
103 |
+
"""BuilderConfig for XGLUE."""
|
104 |
+
|
105 |
+
def __init__(
|
106 |
+
self,
|
107 |
+
data_dir,
|
108 |
+
citation,
|
109 |
+
url,
|
110 |
+
**kwargs,
|
111 |
+
):
|
112 |
+
"""BuilderConfig for XGLUE.
|
113 |
+
|
114 |
+
Args:
|
115 |
+
data_dir: `string`, the path to the folder containing the files in the
|
116 |
+
downloaded .tar
|
117 |
+
citation: `string`, citation for the data set
|
118 |
+
url: `string`, url for information about the data set
|
119 |
+
**kwargs: keyword arguments forwarded to super.
|
120 |
+
"""
|
121 |
+
super(XGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
122 |
+
self.data_dir = data_dir
|
123 |
+
self.citation = citation
|
124 |
+
self.url = url
|
125 |
+
|
126 |
+
|
127 |
+
class XGlue(datasets.GeneratorBasedBuilder):
|
128 |
+
"""The Cross-lingual Pre-training, Understanding and Generation (XGlue) Benchmark."""
|
129 |
+
|
130 |
+
BUILDER_CONFIGS = [
|
131 |
+
XGlueConfig(
|
132 |
+
name="ner",
|
133 |
+
description=textwrap.dedent(
|
134 |
+
"""\
|
135 |
+
The shared task of CoNLL-2003 concerns language-independent named entity recognition.
|
136 |
+
We will concentrate on four types of named entities:
|
137 |
+
persons, locations, organizations and names of miscellaneous entities
|
138 |
+
that do not belong to the previous three groups.
|
139 |
+
"""
|
140 |
+
),
|
141 |
+
data_dir="NER",
|
142 |
+
citation=textwrap.dedent(
|
143 |
+
"""\
|
144 |
+
@article{Sang2003IntroductionTT,
|
145 |
+
title={Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition},
|
146 |
+
author={Erik F. Tjong Kim Sang and Fien De Meulder},
|
147 |
+
journal={ArXiv},
|
148 |
+
year={2003},
|
149 |
+
volume={cs.CL/0306050}
|
150 |
+
},
|
151 |
+
@article{Sang2002IntroductionTT,
|
152 |
+
title={Introduction to the CoNLL-2002 Shared Task: Language-Independent Named Entity Recognition},
|
153 |
+
author={Erik F. Tjong Kim Sang},
|
154 |
+
journal={ArXiv},
|
155 |
+
year={2002},
|
156 |
+
volume={cs.CL/0209010}
|
157 |
+
}"""
|
158 |
+
),
|
159 |
+
url="https://www.clips.uantwerpen.be/conll2003/ner/",
|
160 |
+
),
|
161 |
+
XGlueConfig(
|
162 |
+
name="pos",
|
163 |
+
description=textwrap.dedent(
|
164 |
+
"""\
|
165 |
+
Universal Dependencies (UD) is a project that is developing cross-linguistically consistent treebank
|
166 |
+
annotation for many languages, with the goal of facilitating multilingual parser development, cross-lingual
|
167 |
+
learning, and parsing research from a language typology perspective. The annotation scheme is based on an
|
168 |
+
evolution of (universal) Stanford dependencies (de Marneffe et al., 2006, 2008, 2014), Google universal
|
169 |
+
part-of-speech tags (Petrov et al., 2012), and the Interset interlingua for morphosyntactic tagsets
|
170 |
+
(Zeman, 2008). The general philosophy is to provide a universal inventory of categories and guidelines
|
171 |
+
to facilitate consistent annotation of similar constructions across languages, while
|
172 |
+
allowing language-specific extensions when necessary.
|
173 |
+
"""
|
174 |
+
),
|
175 |
+
data_dir="POS",
|
176 |
+
citation=textwrap.dedent(
|
177 |
+
"""\
|
178 |
+
@misc{11234/1-3105,
|
179 |
+
title={Universal Dependencies 2.5},
|
180 |
+
author={Zeman, Daniel and Nivre, Joakim and Abrams, Mitchell and Aepli, et al.},
|
181 |
+
url={http://hdl.handle.net/11234/1-3105},
|
182 |
+
note={{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University},
|
183 |
+
copyright={Licence Universal Dependencies v2.5},
|
184 |
+
year={2019}
|
185 |
+
}"""
|
186 |
+
),
|
187 |
+
url="https://universaldependencies.org/",
|
188 |
+
),
|
189 |
+
XGlueConfig(
|
190 |
+
name="mlqa",
|
191 |
+
description=textwrap.dedent(
|
192 |
+
"""\
|
193 |
+
MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering
|
194 |
+
performance. MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages
|
195 |
+
- English, Arabic, German, Spanish, Hindi, Vietnamese and Simplified Chinese.
|
196 |
+
MLQA is highly parallel, with QA instances parallel between 4 different languages on average.
|
197 |
+
"""
|
198 |
+
),
|
199 |
+
data_dir="MLQA",
|
200 |
+
citation=textwrap.dedent(
|
201 |
+
"""\
|
202 |
+
@article{Lewis2019MLQAEC,
|
203 |
+
title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
|
204 |
+
author={Patrick Lewis and Barlas Oguz and Ruty Rinott and Sebastian Riedel and Holger Schwenk},
|
205 |
+
journal={ArXiv},
|
206 |
+
year={2019},
|
207 |
+
volume={abs/1910.07475}
|
208 |
+
}"""
|
209 |
+
),
|
210 |
+
url="https://github.com/facebookresearch/MLQA",
|
211 |
+
),
|
212 |
+
XGlueConfig(
|
213 |
+
name="nc",
|
214 |
+
description=textwrap.dedent(
|
215 |
+
"""\
|
216 |
+
This task aims to predict the category given a news article. It covers
|
217 |
+
5 languages, including English, Spanish, French,
|
218 |
+
German and Russian. Each labeled instance is a
|
219 |
+
3-tuple: <news title, news body, category>. The
|
220 |
+
category number is 10. We crawl this dataset from
|
221 |
+
a commercial news website. Accuracy (ACC) of
|
222 |
+
the multi-class classification is used as the metric.
|
223 |
+
"""
|
224 |
+
),
|
225 |
+
data_dir="NC",
|
226 |
+
citation="",
|
227 |
+
url="",
|
228 |
+
),
|
229 |
+
XGlueConfig(
|
230 |
+
name="xnli",
|
231 |
+
description=textwrap.dedent(
|
232 |
+
"""\
|
233 |
+
XNLI is a subset of a few thousand examples from MNLI which has been translated
|
234 |
+
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
|
235 |
+
to predict textual entailment (does sentence A imply/contradict/neither sentence
|
236 |
+
B) and is a classification task (given two sentences, predict one of three
|
237 |
+
labels).
|
238 |
+
"""
|
239 |
+
),
|
240 |
+
data_dir="XNLI",
|
241 |
+
citation=textwrap.dedent(
|
242 |
+
"""\
|
243 |
+
@inproceedings{Conneau2018XNLIEC,
|
244 |
+
title={XNLI: Evaluating Cross-lingual Sentence Representations},
|
245 |
+
author={Alexis Conneau and Guillaume Lample and Ruty Rinott and Adina Williams and Samuel R. Bowman and Holger Schwenk and Veselin Stoyanov},
|
246 |
+
booktitle={EMNLP},
|
247 |
+
year={2018}
|
248 |
+
}"""
|
249 |
+
),
|
250 |
+
url="https://github.com/facebookresearch/XNLI",
|
251 |
+
),
|
252 |
+
XGlueConfig(
|
253 |
+
name="paws-x",
|
254 |
+
description=textwrap.dedent(
|
255 |
+
"""\
|
256 |
+
PAWS-X contains 23,659 human translated PAWS (Paraphrase Adversaries from Word Scrambling) evaluation pairs and 296,406 machine translated training pairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All translated pairs are sourced from examples in PAWS-Wiki.
|
257 |
+
"""
|
258 |
+
),
|
259 |
+
data_dir="PAWSX",
|
260 |
+
citation=textwrap.dedent(
|
261 |
+
"""\
|
262 |
+
@article{Yang2019PAWSXAC,
|
263 |
+
title={PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification},
|
264 |
+
author={Yinfei Yang and Yuan Zhang and Chris Tar and Jason Baldridge},
|
265 |
+
journal={ArXiv},
|
266 |
+
year={2019},
|
267 |
+
volume={abs/1908.11828}
|
268 |
+
}"""
|
269 |
+
),
|
270 |
+
url="https://github.com/google-research-datasets/paws/tree/master/pawsx",
|
271 |
+
),
|
272 |
+
XGlueConfig(
|
273 |
+
name="qadsm",
|
274 |
+
description=textwrap.dedent(
|
275 |
+
"""\
|
276 |
+
Query-Ad Matching (QADSM) task aims
|
277 |
+
to predict whether an advertisement (ad) is relevant to an input query. It covers 3 languages, including English, French and German. Each labeled instance is a 4-tuple: <query, ad title, ad description, label>. The label indicates whether the
|
278 |
+
ad is relevant to the query (Good), or not (Bad).
|
279 |
+
This dataset was constructed based on a commercial search engine. Accuracy (ACC) of the binary classification should be used as the metric.
|
280 |
+
"""
|
281 |
+
),
|
282 |
+
data_dir="QADSM",
|
283 |
+
citation="",
|
284 |
+
url="",
|
285 |
+
),
|
286 |
+
XGlueConfig(
|
287 |
+
name="wpr",
|
288 |
+
description=textwrap.dedent(
|
289 |
+
"""\
|
290 |
+
Tthe Web Page Ranking (WPR) task aims to
|
291 |
+
predict whether a web page is relevant to an input query. It covers 7 languages, including English, German, French, Spanish, Italian, Portuguese and Chinese. Each labeled instance is a
|
292 |
+
4-tuple: <query, web page title, web page snippet, label>. The relevance label contains 5 ratings: Perfect (4), Excellent (3), Good (2), Fair (1)
|
293 |
+
and Bad (0). The dataset is constructed based on a
|
294 |
+
commercial search engine. Normalize Discounted
|
295 |
+
Cumulative Gain (nDCG) should be used as the metric.
|
296 |
+
"""
|
297 |
+
),
|
298 |
+
data_dir="WPR",
|
299 |
+
citation="",
|
300 |
+
url="",
|
301 |
+
),
|
302 |
+
XGlueConfig(
|
303 |
+
name="qam",
|
304 |
+
description=textwrap.dedent(
|
305 |
+
"""\
|
306 |
+
The QA Matching (QAM) task aims to predict whether a <question, passage> pair is a QA pair.
|
307 |
+
It covers 3 languages, including English, French
|
308 |
+
and German. Each labeled instance is a 3-tuple:
|
309 |
+
<question, passage, label>. The label indicates
|
310 |
+
whether the passage is the answer of the question
|
311 |
+
(1), or not (0). This dataset is constructed based on
|
312 |
+
a commercial search engine. Accuracy (ACC) of
|
313 |
+
the binary classification should be used as the metric.
|
314 |
+
"""
|
315 |
+
),
|
316 |
+
data_dir="QAM",
|
317 |
+
citation="",
|
318 |
+
url="",
|
319 |
+
),
|
320 |
+
XGlueConfig(
|
321 |
+
name="qg",
|
322 |
+
description=textwrap.dedent(
|
323 |
+
"""\
|
324 |
+
The Question Generation (QG) task aims to
|
325 |
+
generate a question for a given passage. <passage, question> pairs were collected from a commercial search engine. It covers 6 languages, including English, French, German, Spanish, Italian and
|
326 |
+
Portuguese. BLEU-4 score should be used as the metric.
|
327 |
+
"""
|
328 |
+
),
|
329 |
+
data_dir="QG",
|
330 |
+
citation="",
|
331 |
+
url="",
|
332 |
+
),
|
333 |
+
XGlueConfig(
|
334 |
+
name="ntg",
|
335 |
+
description=textwrap.dedent(
|
336 |
+
"""\
|
337 |
+
News Title Generation (NTG) task aims
|
338 |
+
to generate a proper title for a given news body.
|
339 |
+
We collect <news body, news title> pairs from a
|
340 |
+
commercial news website. It covers 5 languages,
|
341 |
+
including German, English, French, Spanish and
|
342 |
+
Russian. BLEU-4 score should be used as the metric.
|
343 |
+
"""
|
344 |
+
),
|
345 |
+
data_dir="NTG",
|
346 |
+
citation="",
|
347 |
+
url="",
|
348 |
+
),
|
349 |
+
]
|
350 |
+
|
351 |
+
def _info(self):
|
352 |
+
if self.config.name == "ner":
|
353 |
+
features = {
|
354 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
355 |
+
"ner": datasets.Sequence(
|
356 |
+
datasets.features.ClassLabel(
|
357 |
+
names=[
|
358 |
+
"O",
|
359 |
+
"B-PER",
|
360 |
+
"I-PER",
|
361 |
+
"B-ORG",
|
362 |
+
"I-ORG",
|
363 |
+
"B-LOC",
|
364 |
+
"I-LOC",
|
365 |
+
"B-MISC",
|
366 |
+
"I-MISC",
|
367 |
+
]
|
368 |
+
)
|
369 |
+
),
|
370 |
+
}
|
371 |
+
elif self.config.name == "pos":
|
372 |
+
features = {
|
373 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
374 |
+
"pos": datasets.Sequence(
|
375 |
+
datasets.features.ClassLabel(
|
376 |
+
names=[
|
377 |
+
"ADJ",
|
378 |
+
"ADP",
|
379 |
+
"ADV",
|
380 |
+
"AUX",
|
381 |
+
"CCONJ",
|
382 |
+
"DET",
|
383 |
+
"INTJ",
|
384 |
+
"NOUN",
|
385 |
+
"NUM",
|
386 |
+
"PART",
|
387 |
+
"PRON",
|
388 |
+
"PROPN",
|
389 |
+
"PUNCT",
|
390 |
+
"SCONJ",
|
391 |
+
"SYM",
|
392 |
+
"VERB",
|
393 |
+
"X",
|
394 |
+
]
|
395 |
+
)
|
396 |
+
),
|
397 |
+
}
|
398 |
+
elif self.config.name == "mlqa":
|
399 |
+
features = {
|
400 |
+
"context": datasets.Value("string"),
|
401 |
+
"question": datasets.Value("string"),
|
402 |
+
"answers": datasets.features.Sequence(
|
403 |
+
{"answer_start": datasets.Value("int32"), "text": datasets.Value("string")}
|
404 |
+
),
|
405 |
+
# These are the features of your dataset like images, labels ...
|
406 |
+
}
|
407 |
+
elif self.config.name == "nc":
|
408 |
+
features = {
|
409 |
+
"news_title": datasets.Value("string"),
|
410 |
+
"news_body": datasets.Value("string"),
|
411 |
+
"news_category": datasets.ClassLabel(
|
412 |
+
names=[
|
413 |
+
"foodanddrink",
|
414 |
+
"sports",
|
415 |
+
"travel",
|
416 |
+
"finance",
|
417 |
+
"lifestyle",
|
418 |
+
"news",
|
419 |
+
"entertainment",
|
420 |
+
"health",
|
421 |
+
"video",
|
422 |
+
"autos",
|
423 |
+
]
|
424 |
+
),
|
425 |
+
}
|
426 |
+
elif self.config.name == "xnli":
|
427 |
+
features = {
|
428 |
+
"premise": datasets.Value("string"),
|
429 |
+
"hypothesis": datasets.Value("string"),
|
430 |
+
"label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
|
431 |
+
}
|
432 |
+
elif self.config.name == "paws-x":
|
433 |
+
features = {
|
434 |
+
"sentence1": datasets.Value("string"),
|
435 |
+
"sentence2": datasets.Value("string"),
|
436 |
+
"label": datasets.features.ClassLabel(names=["different", "same"]),
|
437 |
+
}
|
438 |
+
elif self.config.name == "qadsm":
|
439 |
+
features = {
|
440 |
+
"query": datasets.Value("string"),
|
441 |
+
"ad_title": datasets.Value("string"),
|
442 |
+
"ad_description": datasets.Value("string"),
|
443 |
+
"relevance_label": datasets.features.ClassLabel(names=["Bad", "Good"]),
|
444 |
+
}
|
445 |
+
elif self.config.name == "wpr":
|
446 |
+
features = {
|
447 |
+
"query": datasets.Value("string"),
|
448 |
+
"web_page_title": datasets.Value("string"),
|
449 |
+
"web_page_snippet": datasets.Value("string"),
|
450 |
+
"relavance_label": datasets.features.ClassLabel(names=["Bad", "Fair", "Good", "Excellent", "Perfect"]),
|
451 |
+
}
|
452 |
+
elif self.config.name == "qam":
|
453 |
+
features = {
|
454 |
+
"question": datasets.Value("string"),
|
455 |
+
"answer": datasets.Value("string"),
|
456 |
+
"label": datasets.features.ClassLabel(names=["False", "True"]),
|
457 |
+
}
|
458 |
+
elif self.config.name == "qg":
|
459 |
+
features = {
|
460 |
+
"answer_passage": datasets.Value("string"),
|
461 |
+
"question": datasets.Value("string"),
|
462 |
+
}
|
463 |
+
elif self.config.name == "ntg":
|
464 |
+
features = {
|
465 |
+
"news_body": datasets.Value("string"),
|
466 |
+
"news_title": datasets.Value("string"),
|
467 |
+
}
|
468 |
+
|
469 |
+
return datasets.DatasetInfo(
|
470 |
+
description=_XGLUE_DESCRIPTION,
|
471 |
+
features=datasets.Features(features),
|
472 |
+
homepage=self.config.url,
|
473 |
+
citation=self.config.citation + "\n" + _XGLUE_CITATION,
|
474 |
+
)
|
475 |
+
|
476 |
+
def _split_generators(self, dl_manager):
|
477 |
+
all_data_folder = dl_manager.download_and_extract(_XGLUE_ALL_DATA)
|
478 |
+
data_folder = os.path.join(all_data_folder, "xglue_full_dataset", self.config.data_dir)
|
479 |
+
name = self.config.name
|
480 |
+
|
481 |
+
languages = _LANGUAGES[name]
|
482 |
+
return (
|
483 |
+
[
|
484 |
+
datasets.SplitGenerator(
|
485 |
+
name=datasets.Split.TRAIN,
|
486 |
+
gen_kwargs={"data_file": os.path.join(data_folder, _PATHS[name]["train"]), "split": "train"},
|
487 |
+
),
|
488 |
+
]
|
489 |
+
+ [
|
490 |
+
datasets.SplitGenerator(
|
491 |
+
name=datasets.Split(f"validation.{lang}"),
|
492 |
+
gen_kwargs={
|
493 |
+
"data_file": os.path.join(data_folder, _PATHS[name]["dev"].format(lang)),
|
494 |
+
"split": "dev",
|
495 |
+
},
|
496 |
+
)
|
497 |
+
for lang in languages
|
498 |
+
]
|
499 |
+
+ [
|
500 |
+
datasets.SplitGenerator(
|
501 |
+
name=datasets.Split(f"test.{lang}"),
|
502 |
+
gen_kwargs={
|
503 |
+
"data_file": os.path.join(data_folder, _PATHS[name]["test"].format(lang)),
|
504 |
+
"split": "test",
|
505 |
+
},
|
506 |
+
)
|
507 |
+
for lang in languages
|
508 |
+
]
|
509 |
+
)
|
510 |
+
|
511 |
+
def _generate_examples(self, data_file, split=None):
|
512 |
+
keys = list(self._info().features.keys())
|
513 |
+
|
514 |
+
if self.config.name == "mlqa":
|
515 |
+
with open(data_file, encoding="utf-8") as f:
|
516 |
+
data = json.load(f)
|
517 |
+
for examples in data["data"]:
|
518 |
+
for example in examples["paragraphs"]:
|
519 |
+
context = example["context"]
|
520 |
+
for qa in example["qas"]:
|
521 |
+
question = qa["question"]
|
522 |
+
id_ = qa["id"]
|
523 |
+
answers = qa["answers"]
|
524 |
+
answers_start = [answer["answer_start"] for answer in answers]
|
525 |
+
answers_text = [answer["text"] for answer in answers]
|
526 |
+
yield id_, {
|
527 |
+
"context": context,
|
528 |
+
"question": question,
|
529 |
+
"answers": {"answer_start": answers_start, "text": answers_text},
|
530 |
+
}
|
531 |
+
elif self.config.name in ["ner", "pos"]:
|
532 |
+
words = []
|
533 |
+
result = []
|
534 |
+
idx = -1
|
535 |
+
with open(data_file, encoding="utf-8") as f:
|
536 |
+
for line in f:
|
537 |
+
if line.strip() == "":
|
538 |
+
if len(words) > 0:
|
539 |
+
out_dict = {keys[0]: words, keys[1]: result}
|
540 |
+
words = []
|
541 |
+
result = []
|
542 |
+
idx += 1
|
543 |
+
yield idx, out_dict
|
544 |
+
else:
|
545 |
+
splits = line.strip().split(" ")
|
546 |
+
words.append(splits[0])
|
547 |
+
result.append(splits[1])
|
548 |
+
elif self.config.name in ["ntg", "qg"]:
|
549 |
+
with open(data_file + ".src." + split, encoding="utf-8") as src_f, open(
|
550 |
+
data_file + ".tgt." + split, encoding="utf-8"
|
551 |
+
) as tgt_f:
|
552 |
+
for idx, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)):
|
553 |
+
yield idx, {keys[0]: src_line.strip(), keys[1]: tgt_line.strip()}
|
554 |
+
else:
|
555 |
+
_process_dict = {
|
556 |
+
"paws-x": {"0": "different", "1": "same"},
|
557 |
+
"xnli": {"contradictory": "contradiction"},
|
558 |
+
"qam": {"0": "False", "1": "True"},
|
559 |
+
"wpr": {"0": "Bad", "1": "Fair", "2": "Good", "3": "Excellent", "4": "Perfect"},
|
560 |
+
}
|
561 |
+
|
562 |
+
def _process(value):
|
563 |
+
if self.config.name in _process_dict and value in _process_dict[self.config.name]:
|
564 |
+
return _process_dict[self.config.name][value]
|
565 |
+
return value
|
566 |
+
|
567 |
+
with open(data_file, encoding="utf-8") as f:
|
568 |
+
for idx, line in enumerate(f):
|
569 |
+
if data_file.split(".")[-1] == "tsv" and idx == 0:
|
570 |
+
continue
|
571 |
+
items = line.strip().split("\t")
|
572 |
+
yield idx, {
|
573 |
+
key: _process(value)
|
574 |
+
for key, value in zip(keys, items[1:] if self.config.name == "paws-x" else items)
|
575 |
+
}
|