system HF staff commited on
Commit
f2ccc99
0 Parent(s):

Update files from the datasets library (from 1.3.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.3.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,960 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ common_gen:
4
+ - crowdsourced
5
+ - found
6
+ cs_restaurants:
7
+ - crowdsourced
8
+ - found
9
+ dart:
10
+ - crowdsourced
11
+ - found
12
+ e2e_nlg:
13
+ - crowdsourced
14
+ - found
15
+ mlsum_de:
16
+ - found
17
+ mlsum_es:
18
+ - found
19
+ schema_guided_dialog:
20
+ - crowdsourced
21
+ totto:
22
+ - crowdsourced
23
+ - found
24
+ web_nlg_en:
25
+ - crowdsourced
26
+ - found
27
+ web_nlg_ru:
28
+ - crowdsourced
29
+ - found
30
+ wiki_auto_asset_turk:
31
+ - crowdsourced
32
+ - found
33
+ wiki_lingua_es_en:
34
+ - found
35
+ wiki_lingua_ru_en:
36
+ - found
37
+ wiki_lingua_tr_en:
38
+ - found
39
+ wiki_lingua_vi_en:
40
+ - found
41
+ xsum:
42
+ - found
43
+ language_creators:
44
+ common_gen:
45
+ - found
46
+ cs_restaurants:
47
+ - found
48
+ dart:
49
+ - found
50
+ e2e_nlg:
51
+ - found
52
+ mlsum_de:
53
+ - found
54
+ mlsum_es:
55
+ - found
56
+ schema_guided_dialog:
57
+ - crowdsourced
58
+ - machine-generated
59
+ totto:
60
+ - found
61
+ web_nlg_en:
62
+ - found
63
+ web_nlg_ru:
64
+ - found
65
+ wiki_auto_asset_turk:
66
+ - found
67
+ wiki_lingua_es_en:
68
+ - found
69
+ wiki_lingua_ru_en:
70
+ - found
71
+ wiki_lingua_tr_en:
72
+ - found
73
+ wiki_lingua_vi_en:
74
+ - found
75
+ xsum:
76
+ - found
77
+ languages:
78
+ common_gen:
79
+ - en
80
+ cs_restaurants:
81
+ - cs
82
+ dart:
83
+ - en
84
+ e2e_nlg:
85
+ - en
86
+ mlsum_de:
87
+ - de
88
+ mlsum_es:
89
+ - es
90
+ schema_guided_dialog:
91
+ - en
92
+ totto:
93
+ - en
94
+ web_nlg_en:
95
+ - en
96
+ web_nlg_ru:
97
+ - ru
98
+ wiki_auto_asset_turk:
99
+ - en
100
+ wiki_lingua_es_en:
101
+ - en
102
+ - es
103
+ wiki_lingua_ru_en:
104
+ - en
105
+ - ru
106
+ wiki_lingua_tr_en:
107
+ - en
108
+ - tr
109
+ wiki_lingua_vi_en:
110
+ - en
111
+ - vi
112
+ xsum:
113
+ - en
114
+ licenses:
115
+ - other-research-only
116
+ multilinguality:
117
+ common_gen:
118
+ - monolingual
119
+ cs_restaurants:
120
+ - monolingual
121
+ dart:
122
+ - monolingual
123
+ e2e_nlg:
124
+ - monolingual
125
+ mlsum_de:
126
+ - monolingual
127
+ mlsum_es:
128
+ - monolingual
129
+ schema_guided_dialog:
130
+ - monolingual
131
+ totto:
132
+ - monolingual
133
+ web_nlg_en:
134
+ - monolingual
135
+ web_nlg_ru:
136
+ - monolingual
137
+ wiki_auto_asset_turk:
138
+ - monolingual
139
+ wiki_lingua_es_en:
140
+ - multilingual
141
+ wiki_lingua_ru_en:
142
+ - multilingual
143
+ wiki_lingua_tr_en:
144
+ - multilingual
145
+ wiki_lingua_vi_en:
146
+ - multilingual
147
+ xsum:
148
+ - monolingual
149
+ size_categories:
150
+ common_gen:
151
+ - 10K<n<100K
152
+ cs_restaurants:
153
+ - 1K<n<10K
154
+ dart:
155
+ - 10K<n<100K
156
+ e2e_nlg:
157
+ - 10K<n<100K
158
+ mlsum_de:
159
+ - 100K<n<1M
160
+ mlsum_es:
161
+ - 100K<n<1M
162
+ schema_guided_dialog:
163
+ - 100K<n<1M
164
+ totto:
165
+ - 100K<n<1M
166
+ web_nlg_en:
167
+ - 10K<n<100K
168
+ web_nlg_ru:
169
+ - 10K<n<100K
170
+ wiki_auto_asset_turk:
171
+ - 100K<n<1M
172
+ wiki_lingua_es_en:
173
+ - 100K<n<1M
174
+ wiki_lingua_ru_en:
175
+ - 10K<n<100K
176
+ wiki_lingua_tr_en:
177
+ - 1K<n<10K
178
+ wiki_lingua_vi_en:
179
+ - 10K<n<100K
180
+ xsum:
181
+ - 10K<n<100K
182
+ source_datasets:
183
+ common_gen:
184
+ - extended|other-vision-datasets
185
+ - original
186
+ cs_restaurants:
187
+ - original
188
+ dart:
189
+ - original
190
+ e2e_nlg:
191
+ - original
192
+ mlsum_de:
193
+ - original
194
+ mlsum_es:
195
+ - original
196
+ schema_guided_dialog:
197
+ - original
198
+ totto:
199
+ - original
200
+ web_nlg_en:
201
+ - original
202
+ web_nlg_ru:
203
+ - original
204
+ wiki_auto_asset_turk:
205
+ - original
206
+ wiki_lingua_es_en:
207
+ - original
208
+ wiki_lingua_ru_en:
209
+ - original
210
+ wiki_lingua_tr_en:
211
+ - original
212
+ wiki_lingua_vi_en:
213
+ - original
214
+ xsum:
215
+ - original
216
+ task_categories:
217
+ common_gen:
218
+ - conditional-text-generation
219
+ cs_restaurants:
220
+ - conditional-text-generation
221
+ dart:
222
+ - conditional-text-generation
223
+ e2e_nlg:
224
+ - conditional-text-generation
225
+ mlsum_de:
226
+ - conditional-text-generation
227
+ mlsum_es:
228
+ - conditional-text-generation
229
+ schema_guided_dialog:
230
+ - sequence-modeling
231
+ totto:
232
+ - conditional-text-generation
233
+ web_nlg_en:
234
+ - conditional-text-generation
235
+ web_nlg_ru:
236
+ - conditional-text-generation
237
+ wiki_auto_asset_turk:
238
+ - conditional-text-generation
239
+ wiki_lingua_es_en:
240
+ - conditional-text-generation
241
+ wiki_lingua_ru_en:
242
+ - conditional-text-generation
243
+ wiki_lingua_tr_en:
244
+ - conditional-text-generation
245
+ wiki_lingua_vi_en:
246
+ - conditional-text-generation
247
+ xsum:
248
+ - conditional-text-generation
249
+ task_ids:
250
+ common_gen:
251
+ - other-stuctured-to-text
252
+ cs_restaurants:
253
+ - other-stuctured-to-text
254
+ dart:
255
+ - other-stuctured-to-text
256
+ e2e_nlg:
257
+ - other-stuctured-to-text
258
+ mlsum_de:
259
+ - summarization
260
+ mlsum_es:
261
+ - summarization
262
+ schema_guided_dialog:
263
+ - dialogue-modeling
264
+ totto:
265
+ - table-to-text
266
+ web_nlg_en:
267
+ - other-stuctured-to-text
268
+ web_nlg_ru:
269
+ - other-stuctured-to-text
270
+ wiki_auto_asset_turk:
271
+ - text-simplification
272
+ wiki_lingua_es_en:
273
+ - summarization
274
+ wiki_lingua_ru_en:
275
+ - summarization
276
+ wiki_lingua_tr_en:
277
+ - summarization
278
+ wiki_lingua_vi_en:
279
+ - summarization
280
+ xsum:
281
+ - summarization
282
+ ---
283
+
284
+ # Dataset Card for "gem"
285
+
286
+ ## Table of Contents
287
+ - [Dataset Description](#dataset-description)
288
+ - [Dataset Summary](#dataset-summary)
289
+ - [Supported Tasks](#supported-tasks)
290
+ - [Languages](#languages)
291
+ - [Dataset Structure](#dataset-structure)
292
+ - [Data Instances](#data-instances)
293
+ - [Data Fields](#data-fields)
294
+ - [Data Splits Sample Size](#data-splits-sample-size)
295
+ - [Dataset Creation](#dataset-creation)
296
+ - [Curation Rationale](#curation-rationale)
297
+ - [Source Data](#source-data)
298
+ - [Annotations](#annotations)
299
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
300
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
301
+ - [Social Impact of Dataset](#social-impact-of-dataset)
302
+ - [Discussion of Biases](#discussion-of-biases)
303
+ - [Other Known Limitations](#other-known-limitations)
304
+ - [Additional Information](#additional-information)
305
+ - [Dataset Curators](#dataset-curators)
306
+ - [Licensing Information](#licensing-information)
307
+ - [Citation Information](#citation-information)
308
+ - [Contributions](#contributions)
309
+
310
+ ## [Dataset Description](#dataset-description)
311
+
312
+ - **Homepage:** [https://gem-benchmark.github.io/](https://gem-benchmark.github.io/)
313
+ - **Repository:**
314
+ - **Paper:**
315
+ - **Point of Contact:** [Sebastian Gehrman]([email protected])
316
+ - **Size of downloaded dataset files:** 2084.23 MB
317
+ - **Size of the generated dataset:** 3734.73 MB
318
+ - **Total amount of disk used:** 5818.96 MB
319
+
320
+ ### [Dataset Summary](#dataset-summary)
321
+
322
+ GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,
323
+ both through human annotations and automated Metrics.
324
+
325
+ GEM aims to:
326
+ - measure NLG progress across 13 datasets spanning many NLG tasks and languages.
327
+ - provide an in-depth analysis of data and models presented via data statements and challenge sets.
328
+ - develop standards for evaluation of generated text using both automated and human metrics.
329
+
330
+ It is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development
331
+ by extending existing data or developing datasets for additional languages.
332
+
333
+ You can find more complete information in the dataset cards for each of the subsets:
334
+ - [CommonGen](https://gem-benchmark.github.io/data_cards/CommonGen)
335
+ - [Czech Restaurant](https://gem-benchmark.github.io/data_cards/Czech%20Restaurant)
336
+ - [DART](https://gem-benchmark.github.io/data_cards/DART)
337
+ - [E2E](https://gem-benchmark.github.io/data_cards/E2E)
338
+ - [MLSum](https://gem-benchmark.github.io/data_cards/MLSum)
339
+ - [Schema-Guided Dialog](https://gem-benchmark.github.io/data_cards/Schema-Guided%20DIalog)
340
+ - [WebNLG](https://gem-benchmark.github.io/data_cards/WebNLG)
341
+ - [Wiki-Auto](https://gem-benchmark.github.io/data_cards/Wiki-Auto)/[ASSET](https://gem-benchmark.github.io/data_cards/ASSET)/[TURK](https://gem-benchmark.github.io/data_cards/TURK)
342
+ - [WikiLingua](https://gem-benchmark.github.io/data_cards/WikiLingua)
343
+ - [XSum](https://gem-benchmark.github.io/data_cards/XSum)
344
+
345
+ The subsets are organized by task:
346
+ ```
347
+ {
348
+ "summarization": {
349
+ "mlsum": ["mlsum_de", "mlsum_es"],
350
+ "wiki_lingua": ["wiki_lingua_es_en", "wiki_lingua_ru_en", "wiki_lingua_tr_en", "wiki_lingua_vi_en"],
351
+ "xsum": ["xsum"],
352
+ },
353
+ "struct2text": {
354
+ "common_gen": ["common_gen"],
355
+ "cs_restaurants": ["cs_restaurants"],
356
+ "dart": ["dart"],
357
+ "e2e": ["e2e_nlg"],
358
+ "totto": ["totto"],
359
+ "web_nlg": ["web_nlg_en", "web_nlg_ru"],
360
+ },
361
+ "simplification": {
362
+ "wiki_auto_asset_turk": ["wiki_auto_asset_turk"],
363
+ },
364
+ "dialog": {
365
+ "schema_guided_dialog": ["schema_guided_dialog"],
366
+ },
367
+ }
368
+ ```
369
+
370
+ Each example has one `target` per example in its training set, and a set of `references` (with one or more items) in its validation and test set.
371
+
372
+ ### [Supported Tasks](#supported-tasks)
373
+
374
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
375
+
376
+ ### [Languages](#languages)
377
+
378
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
379
+
380
+ ## [Dataset Structure](#dataset-structure)
381
+
382
+ We show detailed information for up to 5 configurations of the dataset.
383
+
384
+ ### [Data Instances](#data-instances)
385
+
386
+ #### common_gen
387
+
388
+ - **Size of downloaded dataset files:** 1.76 MB
389
+ - **Size of the generated dataset:** 8.80 MB
390
+ - **Total amount of disk used:** 10.56 MB
391
+
392
+ An example of `validation` looks as follows.
393
+ ```
394
+ {'concept_set_id': 0,
395
+ 'concepts': ['field', 'look', 'stand'],
396
+ 'gem_id': 'common_gen-validation-0',
397
+ 'references': ['The player stood in the field looking at the batter.',
398
+ 'The coach stands along the field, looking at the goalkeeper.',
399
+ 'I stood and looked across the field, peacefully.',
400
+ 'Someone stands, looking around the empty field.'],
401
+ 'target': 'The player stood in the field looking at the batter.'}
402
+ ```
403
+
404
+ #### cs_restaurants
405
+
406
+ - **Size of downloaded dataset files:** 1.40 MB
407
+ - **Size of the generated dataset:** 1.25 MB
408
+ - **Total amount of disk used:** 2.64 MB
409
+
410
+ An example of `validation` looks as follows.
411
+ ```
412
+ {'dialog_act': '?request(area)',
413
+ 'dialog_act_delexicalized': '?request(area)',
414
+ 'gem_id': 'cs_restaurants-validation-0',
415
+ 'references': ['Jakou lokalitu hledáte ?'],
416
+ 'target': 'Jakou lokalitu hledáte ?',
417
+ 'target_delexicalized': 'Jakou lokalitu hledáte ?'}
418
+ ```
419
+
420
+ #### dart
421
+
422
+ - **Size of downloaded dataset files:** 28.01 MB
423
+ - **Size of the generated dataset:** 26.17 MB
424
+ - **Total amount of disk used:** 54.18 MB
425
+
426
+ An example of `validation` looks as follows.
427
+ ```
428
+ {'dart_id': 0,
429
+ 'gem_id': 'dart-validation-0',
430
+ 'references': ['A school from Mars Hill, North Carolina, joined in 1973.'],
431
+ 'subtree_was_extended': True,
432
+ 'target': 'A school from Mars Hill, North Carolina, joined in 1973.',
433
+ 'target_sources': ['WikiSQL_decl_sents'],
434
+ 'tripleset': [['Mars Hill College', 'JOINED', '1973'], ['Mars Hill College', 'LOCATION', 'Mars Hill, North Carolina']]}
435
+ ```
436
+
437
+ #### e2e_nlg
438
+
439
+ - **Size of downloaded dataset files:** 13.92 MB
440
+ - **Size of the generated dataset:** 11.58 MB
441
+ - **Total amount of disk used:** 25.50 MB
442
+
443
+ An example of `validation` looks as follows.
444
+ ```
445
+ {'gem_id': 'e2e_nlg-validation-0',
446
+ 'meaning_representation': 'name[Alimentum], area[city centre], familyFriendly[no]',
447
+ 'references': ['There is a place in the city centre, Alimentum, that is not family-friendly.'],
448
+ 'target': 'There is a place in the city centre, Alimentum, that is not family-friendly.'}
449
+ ```
450
+
451
+ #### mlsum_de
452
+
453
+ - **Size of downloaded dataset files:** 331.27 MB
454
+ - **Size of the generated dataset:** 907.00 MB
455
+ - **Total amount of disk used:** 1238.27 MB
456
+
457
+ An example of `validation` looks as follows.
458
+ ```
459
+ {'date': '00/04/2019',
460
+ 'gem_id': 'mlsum_de-validation-0',
461
+ 'references': ['In einer Kleinstadt auf der Insel Usedom war eine junge Frau tot in ihrer Wohnung gefunden worden. Nun stehen zwei Bekannte unter Verdacht.'],
462
+ 'target': 'In einer Kleinstadt auf der Insel Usedom war eine junge Frau tot in ihrer Wohnung gefunden worden. Nun stehen zwei Bekannte unter Verdacht.',
463
+ 'text': 'Kerzen und Blumen stehen vor dem Eingang eines Hauses, in dem eine 18-jährige Frau tot aufgefunden wurde. In einer Kleinstadt auf der Insel Usedom war eine junge Frau tot in ...',
464
+ 'title': 'Tod von 18-Jähriger auf Usedom: Zwei Festnahmen',
465
+ 'topic': 'panorama',
466
+ 'url': 'https://www.sueddeutsche.de/panorama/usedom-frau-tot-festnahme-verdaechtige-1.4412256'}
467
+ ```
468
+
469
+ #### mlsum_es
470
+
471
+ - **Size of downloaded dataset files:** 490.29 MB
472
+ - **Size of the generated dataset:** 1253.63 MB
473
+ - **Total amount of disk used:** 1743.92 MB
474
+
475
+ An example of `validation` looks as follows.
476
+ ```
477
+ {'date': '05/01/2019',
478
+ 'gem_id': 'mlsum_es-validation-0',
479
+ 'references': ['El diseñador que dio carta de naturaleza al estilo genuinamente americano celebra el medio siglo de su marca entre grandes fastos y problemas financieros. Conectar con las nuevas generaciones es el regalo que precisa más que nunca'],
480
+ 'target': 'El diseñador que dio carta de naturaleza al estilo genuinamente americano celebra el medio siglo de su marca entre grandes fastos y problemas financieros. Conectar con las nuevas generaciones es el regalo que precisa más que nunca',
481
+ 'text': 'Un oso de peluche marcándose un heelflip de monopatín es todo lo que Ralph Lauren necesitaba esta Navidad. Estampado en un jersey de lana azul marino, supone la guinda que corona ...',
482
+ 'title': 'Ralph Lauren busca el secreto de la eterna juventud',
483
+ 'topic': 'elpais estilo',
484
+ 'url': 'http://elpais.com/elpais/2019/01/04/estilo/1546617396_933318.html'}
485
+ ```
486
+
487
+ #### schema_guided_dialog
488
+
489
+ - **Size of downloaded dataset files:** 8.24 MB
490
+ - **Size of the generated dataset:** 43.66 MB
491
+ - **Total amount of disk used:** 51.91 MB
492
+
493
+ An example of `validation` looks as follows.
494
+ ```
495
+ {'dialog_acts': [{'act': 2, 'slot': 'song_name', 'values': ['Carnivore']}, {'act': 2, 'slot': 'playback_device', 'values': ['TV']}],
496
+ 'dialog_id': '10_00054',
497
+ 'gem_id': 'schema_guided_dialog-validation-0',
498
+ 'prompt': 'Yes, I would.',
499
+ 'references': ['Please confirm the song Carnivore on tv.'],
500
+ 'target': 'Please confirm the song Carnivore on tv.',
501
+ 'turn_id': 15}
502
+ ```
503
+
504
+ #### totto
505
+
506
+ - **Size of downloaded dataset files:** 179.03 MB
507
+ - **Size of the generated dataset:** 722.88 MB
508
+ - **Total amount of disk used:** 901.91 MB
509
+
510
+ An example of `validation` looks as follows.
511
+ ```
512
+ {'example_id': '7391450717765563190',
513
+ 'gem_id': 'totto-validation-0',
514
+ 'highlighted_cells': [[3, 0], [3, 2], [3, 3]],
515
+ 'overlap_subset': 'True',
516
+ 'references': ['Daniel Henry Chamberlain was the 76th Governor of South Carolina from 1874.',
517
+ 'Daniel Henry Chamberlain was the 76th Governor of South Carolina, beginning in 1874.',
518
+ 'Daniel Henry Chamberlain was the 76th Governor of South Carolina who took office in 1874.'],
519
+ 'sentence_annotations': [{'final_sentence': 'Daniel Henry Chamberlain was the 76th Governor of South Carolina from 1874.',
520
+ 'original_sentence': 'Daniel Henry Chamberlain (June 23, 1835 – April 13, 1907) was an American planter, lawyer, author and the 76th Governor of South Carolina '
521
+ 'from 1874 until 1877.',
522
+ 'sentence_after_ambiguity': 'Daniel Henry Chamberlain was the 76th Governor of South Carolina from 1874.',
523
+ 'sentence_after_deletion': 'Daniel Henry Chamberlain was the 76th Governor of South Carolina from 1874.'},
524
+ ...
525
+ ],
526
+ 'table': [[{'column_span': 1, 'is_header': True, 'row_span': 1, 'value': '#'},
527
+ {'column_span': 2, 'is_header': True, 'row_span': 1, 'value': 'Governor'},
528
+ {'column_span': 1, 'is_header': True, 'row_span': 1, 'value': 'Took Office'},
529
+ {'column_span': 1, 'is_header': True, 'row_span': 1, 'value': 'Left Office'}],
530
+ [{'column_span': 1, 'is_header': True, 'row_span': 1, 'value': '74'},
531
+ {'column_span': 1, 'is_header': False, 'row_span': 1, 'value': '-'},
532
+ {'column_span': 1, 'is_header': False, 'row_span': 1, 'value': 'Robert Kingston Scott'},
533
+ {'column_span': 1, 'is_header': False, 'row_span': 1, 'value': 'July 6, 1868'}],
534
+ ...
535
+ ],
536
+ 'table_page_title': 'List of Governors of South Carolina',
537
+ 'table_section_text': 'Parties Democratic Republican',
538
+ 'table_section_title': 'Governors under the Constitution of 1868',
539
+ 'table_webpage_url': 'http://en.wikipedia.org/wiki/List_of_Governors_of_South_Carolina',
540
+ 'target': 'Daniel Henry Chamberlain was the 76th Governor of South Carolina from 1874.',
541
+ 'totto_id': 0}
542
+ ```
543
+
544
+ #### web_nlg_en
545
+
546
+ - **Size of downloaded dataset files:** 12.35 MB
547
+ - **Size of the generated dataset:** 13.95 MB
548
+ - **Total amount of disk used:** 26.29 MB
549
+
550
+ An example of `validation` looks as follows.
551
+ ```
552
+ {'category': 'Airport',
553
+ 'gem_id': 'web_nlg_en-validation-0',
554
+ 'input': ['Aarhus | leader | Jacob_Bundsgaard'],
555
+ 'references': ['The leader of Aarhus is Jacob Bundsgaard.'],
556
+ 'target': 'The leader of Aarhus is Jacob Bundsgaard.',
557
+ 'webnlg_id': 'dev/Airport/1/Id1'}
558
+ ```
559
+
560
+ #### web_nlg_ru
561
+
562
+ - **Size of downloaded dataset files:** 7.28 MB
563
+ - **Size of the generated dataset:** 8.02 MB
564
+ - **Total amount of disk used:** 15.30 MB
565
+
566
+ An example of `validation` looks as follows.
567
+ ```
568
+ {'category': 'Airport',
569
+ 'gem_id': 'web_nlg_ru-validation-0',
570
+ 'input': ['Punjab,_Pakistan | leaderTitle | Provincial_Assembly_of_the_Punjab'],
571
+ 'references': ['Пенджаб, Пакистан, возглавляется Провинциальной ассамблеей Пенджаба.', 'Пенджаб, Пакистан возглавляется Провинциальной ассамблеей Пенджаба.'],
572
+ 'target': 'Пенджаб, Пакистан, возглавляется Провинциальной ассамблеей Пенджаба.',
573
+ 'webnlg_id': 'dev/Airport/1/Id1'}
574
+ ```
575
+
576
+ #### wiki_auto_asset_turk
577
+
578
+ - **Size of downloaded dataset files:** 121.37 MB
579
+ - **Size of the generated dataset:** 145.69 MB
580
+ - **Total amount of disk used:** 267.07 MB
581
+
582
+ An example of `validation` looks as follows.
583
+ ```
584
+ {'gem_id': 'wiki_auto_asset_turk-validation-0',
585
+ 'references': ['The Gandalf Awards honor excellent writing in in fantasy literature.'],
586
+ 'source': 'The Gandalf Awards, honoring achievement in fantasy literature, were conferred by the World Science Fiction Society annually from 1974 to 1981.',
587
+ 'source_id': '350_691837-1-0-0',
588
+ 'target': 'The Gandalf Awards honor excellent writing in in fantasy literature.',
589
+ 'target_id': '350_691837-0-0-0'}
590
+ ```
591
+
592
+ #### wiki_lingua_es_en
593
+
594
+ - **Size of downloaded dataset files:** 161.56 MB
595
+ - **Size of the generated dataset:** 274.28 MB
596
+ - **Total amount of disk used:** 435.84 MB
597
+
598
+ An example of `validation` looks as follows.
599
+ ```
600
+ 'references': ["Practice matted hair prevention from early in your cat's life. Make sure that your cat is grooming itself effectively. Keep a close eye on cats with long hair."],
601
+ 'source': 'Muchas personas presentan problemas porque no cepillaron el pelaje de sus gatos en una etapa temprana de su vida, ya que no lo consideraban necesario. Sin embargo, a medida que...',
602
+ 'target': "Practice matted hair prevention from early in your cat's life. Make sure that your cat is grooming itself effectively. Keep a close eye on cats with long hair."}
603
+ ```
604
+
605
+ #### wiki_lingua_ru_en
606
+
607
+ - **Size of downloaded dataset files:** 161.56 MB
608
+ - **Size of the generated dataset:** 201.43 MB
609
+ - **Total amount of disk used:** 362.99 MB
610
+
611
+ An example of `validation` looks as follows.
612
+ ```
613
+ {'gem_id': 'wiki_lingua_ru_en-val-0',
614
+ 'references': ['Get immediate medical care if you notice signs of a complication. Undergo diagnostic tests to check for gallstones and complications. Ask your doctor about your treatment '
615
+ 'options.'],
616
+ 'source': 'И хотя, скорее всего, вам не о чем волноваться, следует незамедлительно обратиться к врачу, если вы подозреваете, что у вас возникло осложнение желчекаменной болезни. Это ...',
617
+ 'target': 'Get immediate medical care if you notice signs of a complication. Undergo diagnostic tests to check for gallstones and complications. Ask your doctor about your treatment '
618
+ 'options.'}
619
+ ```
620
+
621
+ #### wiki_lingua_tr_en
622
+
623
+ - **Size of downloaded dataset files:** 161.56 MB
624
+ - **Size of the generated dataset:** 9.87 MB
625
+ - **Total amount of disk used:** 171.42 MB
626
+
627
+ An example of `validation` looks as follows.
628
+ ```
629
+ {'gem_id': 'wiki_lingua_tr_en-val-0',
630
+ 'references': ['Open Instagram. Go to the video you want to download. Tap ⋮. Tap Copy Link. Open Google Chrome. Tap the address bar. Go to the SaveFromWeb site. Tap the "Paste Instagram Video" text box. Tap and hold the text box. Tap PASTE. Tap Download. Download the video. Find the video on your Android.'],
631
+ 'source': 'Instagram uygulamasının çok renkli kamera şeklindeki simgesine dokun. Daha önce giriş yaptıysan Instagram haber kaynağı açılır. Giriş yapmadıysan istendiğinde e-posta adresini ...',
632
+ 'target': 'Open Instagram. Go to the video you want to download. Tap ⋮. Tap Copy Link. Open Google Chrome. Tap the address bar. Go to the SaveFromWeb site. Tap the "Paste Instagram Video" text box. Tap and hold the text box. Tap PASTE. Tap Download. Download the video. Find the video on your Android.'}
633
+ ```
634
+
635
+ #### wiki_lingua_vi_en
636
+
637
+ - **Size of downloaded dataset files:** 161.56 MB
638
+ - **Size of the generated dataset:** 39.12 MB
639
+ - **Total amount of disk used:** 200.68 MB
640
+
641
+ An example of `validation` looks as follows.
642
+ ```
643
+ {'gem_id': 'wiki_lingua_vi_en-val-0',
644
+ 'references': ['Select the right time of year for planting the tree. You will usually want to plant your tree when it is dormant, or not flowering, during cooler or colder times of year.'],
645
+ 'source': 'Bạn muốn cung cấp cho cây cơ hội tốt nhất để phát triển và sinh tồn. Trồng cây đúng thời điểm trong năm chính là yếu tố then chốt. Thời điểm sẽ thay đổi phụ thuộc vào loài cây ...',
646
+ 'target': 'Select the right time of year for planting the tree. You will usually want to plant your tree when it is dormant, or not flowering, during cooler or colder times of year.'}
647
+ ```
648
+
649
+ #### xsum
650
+
651
+ - **Size of downloaded dataset files:** 243.08 MB
652
+ - **Size of the generated dataset:** 67.40 MB
653
+ - **Total amount of disk used:** 310.48 MB
654
+
655
+ An example of `validation` looks as follows.
656
+ ```
657
+ {'document': 'Burberry reported pre-tax profits of £166m for the year to March. A year ago it made a loss of £16.1m, hit by charges at its Spanish operations.\n'
658
+ 'In the past year it has opened 21 new stores and closed nine. It plans to open 20-30 stores this year worldwide.\n'
659
+ 'The group has also focused on promoting the Burberry brand online...',
660
+ 'gem_id': 'xsum-validation-0',
661
+ 'references': ['Luxury fashion designer Burberry has returned to profit after opening new stores and spending more on online marketing'],
662
+ 'target': 'Luxury fashion designer Burberry has returned to profit after opening new stores and spending more on online marketing',
663
+ 'xsum_id': '10162122'}
664
+ ```
665
+
666
+ ### [Data Fields](#data-fields)
667
+
668
+ The data fields are the same among all splits.
669
+
670
+ #### common_gen
671
+ - `gem_id`: a `string` feature.
672
+ - `concept_set_id`: a `int32` feature.
673
+ - `concepts`: a `list` of `string` features.
674
+ - `target`: a `string` feature.
675
+ - `references`: a `list` of `string` features.
676
+
677
+ #### cs_restaurants
678
+ - `gem_id`: a `string` feature.
679
+ - `dialog_act`: a `string` feature.
680
+ - `dialog_act_delexicalized`: a `string` feature.
681
+ - `target_delexicalized`: a `string` feature.
682
+ - `target`: a `string` feature.
683
+ - `references`: a `list` of `string` features.
684
+
685
+ #### dart
686
+ - `gem_id`: a `string` feature.
687
+ - `dart_id`: a `int32` feature.
688
+ - `tripleset`: a `list` of `string` features.
689
+ - `subtree_was_extended`: a `bool` feature.
690
+ - `target_sources`: a `list` of `string` features.
691
+ - `target`: a `string` feature.
692
+ - `references`: a `list` of `string` features.
693
+
694
+ #### e2e_nlg
695
+ - `gem_id`: a `string` feature.
696
+ - `meaning_representation`: a `string` feature.
697
+ - `target`: a `string` feature.
698
+ - `references`: a `list` of `string` features.
699
+
700
+ #### mlsum_de
701
+ - `gem_id`: a `string` feature.
702
+ - `text`: a `string` feature.
703
+ - `topic`: a `string` feature.
704
+ - `url`: a `string` feature.
705
+ - `title`: a `string` feature.
706
+ - `date`: a `string` feature.
707
+ - `target`: a `string` feature.
708
+ - `references`: a `list` of `string` features.
709
+
710
+ #### mlsum_es
711
+ - `gem_id`: a `string` feature.
712
+ - `text`: a `string` feature.
713
+ - `topic`: a `string` feature.
714
+ - `url`: a `string` feature.
715
+ - `title`: a `string` feature.
716
+ - `date`: a `string` feature.
717
+ - `target`: a `string` feature.
718
+ - `references`: a `list` of `string` features.
719
+
720
+ #### schema_guided_dialog
721
+ - `gem_id`: a `string` feature.
722
+ - `act`: a classification label, with possible values including `AFFIRM` (0), `AFFIRM_INTENT` (1), `CONFIRM` (2), `GOODBYE` (3), `INFORM` (4).
723
+ - `slot`: a `string` feature.
724
+ - `values`: a `list` of `string` features.
725
+ - `dialog_id`: a `string` feature.
726
+ - `turn_id`: a `int32` feature.
727
+ - `prompt`: a `string` feature.
728
+ - `target`: a `string` feature.
729
+ - `references`: a `list` of `string` features.
730
+
731
+ #### totto
732
+ - `gem_id`: a `string` feature.
733
+ - `totto_id`: a `int32` feature.
734
+ - `table_page_title`: a `string` feature.
735
+ - `table_webpage_url`: a `string` feature.
736
+ - `table_section_title`: a `string` feature.
737
+ - `table_section_text`: a `string` feature.
738
+ - `column_span`: a `int32` feature.
739
+ - `is_header`: a `bool` feature.
740
+ - `row_span`: a `int32` feature.
741
+ - `value`: a `string` feature.
742
+ - `highlighted_cells`: a `list` of `int32` features.
743
+ - `example_id`: a `string` feature.
744
+ - `original_sentence`: a `string` feature.
745
+ - `sentence_after_deletion`: a `string` feature.
746
+ - `sentence_after_ambiguity`: a `string` feature.
747
+ - `final_sentence`: a `string` feature.
748
+ - `overlap_subset`: a `string` feature.
749
+ - `target`: a `string` feature.
750
+ - `references`: a `list` of `string` features.
751
+
752
+ #### web_nlg_en
753
+ - `gem_id`: a `string` feature.
754
+ - `input`: a `list` of `string` features.
755
+ - `target`: a `string` feature.
756
+ - `references`: a `list` of `string` features.
757
+ - `category`: a `string` feature.
758
+ - `webnlg_id`: a `string` feature.
759
+
760
+ #### web_nlg_ru
761
+ - `gem_id`: a `string` feature.
762
+ - `input`: a `list` of `string` features.
763
+ - `target`: a `string` feature.
764
+ - `references`: a `list` of `string` features.
765
+ - `category`: a `string` feature.
766
+ - `webnlg_id`: a `string` feature.
767
+
768
+ #### wiki_auto_asset_turk
769
+ - `gem_id`: a `string` feature.
770
+ - `source_id`: a `string` feature.
771
+ - `target_id`: a `string` feature.
772
+ - `source`: a `string` feature.
773
+ - `target`: a `string` feature.
774
+ - `references`: a `list` of `string` features.
775
+
776
+ #### wiki_lingua_es_en
777
+ - `gem_id`: a `string` feature.
778
+ - `source`: a `string` feature.
779
+ - `target`: a `string` feature.
780
+ - `references`: a `list` of `string` features.
781
+
782
+ #### wiki_lingua_ru_en
783
+ - `gem_id`: a `string` feature.
784
+ - `source`: a `string` feature.
785
+ - `target`: a `string` feature.
786
+ - `references`: a `list` of `string` features.
787
+
788
+ #### wiki_lingua_tr_en
789
+ - `gem_id`: a `string` feature.
790
+ - `source`: a `string` feature.
791
+ - `target`: a `string` feature.
792
+ - `references`: a `list` of `string` features.
793
+
794
+ #### wiki_lingua_vi_en
795
+ - `gem_id`: a `string` feature.
796
+ - `source`: a `string` feature.
797
+ - `target`: a `string` feature.
798
+ - `references`: a `list` of `string` features.
799
+
800
+ #### xsum
801
+ - `gem_id`: a `string` feature.
802
+ - `xsum_id`: a `string` feature.
803
+ - `document`: a `string` feature.
804
+ - `target`: a `string` feature.
805
+ - `references`: a `list` of `string` features.
806
+
807
+ ### [Data Splits Sample Size](#data-splits-sample-size)
808
+
809
+ #### common_gen
810
+
811
+ | |train|validation|test|
812
+ |----------|----:|---------:|---:|
813
+ |common_gen|67389| 993|1497|
814
+
815
+ #### cs_restaurants
816
+
817
+ | |train|validation|test|
818
+ |--------------|----:|---------:|---:|
819
+ |cs_restaurants| 3569| 781| 842|
820
+
821
+ #### dart
822
+
823
+ | |train|validation|test|
824
+ |----|----:|---------:|---:|
825
+ |dart|62659| 2768|6959|
826
+
827
+ #### e2e_nlg
828
+
829
+ | |train|validation|test|
830
+ |-------|----:|---------:|---:|
831
+ |e2e_nlg|33525| 4299|4693|
832
+
833
+ #### mlsum_de
834
+
835
+ | |train |validation|test |
836
+ |--------|-----:|---------:|----:|
837
+ |mlsum_de|220748| 11392|10695|
838
+
839
+ #### mlsum_es
840
+
841
+ | |train |validation|test |
842
+ |--------|-----:|---------:|----:|
843
+ |mlsum_es|259886| 9977|13365|
844
+
845
+ #### schema_guided_dialog
846
+
847
+ | |train |validation|test |
848
+ |--------------------|-----:|---------:|----:|
849
+ |schema_guided_dialog|164982| 10000|10000|
850
+
851
+ #### totto
852
+
853
+ | |train |validation|test|
854
+ |-----|-----:|---------:|---:|
855
+ |totto|121153| 7700|7700|
856
+
857
+ #### web_nlg_en
858
+
859
+ | |train|validation|test|
860
+ |----------|----:|---------:|---:|
861
+ |web_nlg_en|35426| 1667|1779|
862
+
863
+ #### web_nlg_ru
864
+
865
+ | |train|validation|test|
866
+ |----------|----:|---------:|---:|
867
+ |web_nlg_ru|14630| 790|1102|
868
+
869
+ #### wiki_auto_asset_turk
870
+
871
+ | |train |validation|test_asset|test_turk|
872
+ |--------------------|-----:|---------:|---------:|--------:|
873
+ |wiki_auto_asset_turk|373801| 73249| 359| 359|
874
+
875
+ #### wiki_lingua_es_en
876
+
877
+ | |train|validation|test |
878
+ |-----------------|----:|---------:|----:|
879
+ |wiki_lingua_es_en|79515| 8835|19797|
880
+
881
+ #### wiki_lingua_ru_en
882
+
883
+ | |train|validation|test|
884
+ |-----------------|----:|---------:|---:|
885
+ |wiki_lingua_ru_en|36898| 4100|9094|
886
+
887
+ #### wiki_lingua_tr_en
888
+
889
+ | |train|validation|test|
890
+ |-----------------|----:|---------:|---:|
891
+ |wiki_lingua_tr_en| 3193| 355| 808|
892
+
893
+ #### wiki_lingua_vi_en
894
+
895
+ | |train|validation|test|
896
+ |-----------------|----:|---------:|---:|
897
+ |wiki_lingua_vi_en| 9206| 1023|2167|
898
+
899
+ #### xsum
900
+
901
+ | |train|validation|test|
902
+ |----|----:|---------:|---:|
903
+ |xsum|23206| 1117|1166|
904
+
905
+ ## [Dataset Creation](#dataset-creation)
906
+
907
+ ### [Curation Rationale](#curation-rationale)
908
+
909
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
910
+
911
+ ### [Source Data](#source-data)
912
+
913
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
914
+
915
+ ### [Annotations](#annotations)
916
+
917
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
918
+
919
+ ### [Personal and Sensitive Information](#personal-and-sensitive-information)
920
+
921
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
922
+
923
+ ## [Considerations for Using the Data](#considerations-for-using-the-data)
924
+
925
+ ### [Social Impact of Dataset](#social-impact-of-dataset)
926
+
927
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
928
+
929
+ ### [Discussion of Biases](#discussion-of-biases)
930
+
931
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
932
+
933
+ ### [Other Known Limitations](#other-known-limitations)
934
+
935
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
936
+
937
+ ## [Additional Information](#additional-information)
938
+
939
+ ### [Dataset Curators](#dataset-curators)
940
+
941
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
942
+
943
+ ### [Licensing Information](#licensing-information)
944
+
945
+ CC-BY-SA-4.0
946
+
947
+ ### [Citation Information](#citation-information)
948
+
949
+ ```
950
+ @InProceedings{acl:gem,
951
+ title = {The GEM Benchmark:Natural Language Generation, its Evaluation and Metrics},
952
+ authors={Sebastian Gehrmann et al.},
953
+ year={2021}
954
+ }
955
+
956
+ ```
957
+
958
+ ### Contributions
959
+
960
+ Thanks to [@yjernite](https://github.com/yjernite) for adding this dataset.
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"mlsum_de": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "mlsum_de", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 852755869, "num_examples": 220748, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 49392647, "num_examples": 11392, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 48909345, "num_examples": 10695, "dataset_name": "gem"}}, "download_checksums": {"https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip": {"num_bytes": 311059697, "checksum": "88e788437bae48af6b3d18a554af4b2794cc6143a137df3f56daa91a37e3ea7e"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip": {"num_bytes": 17771216, "checksum": "732620c32e1d3f393ee3193f57f1217d8549499eb4906e144252aaab39aa910b"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip": {"num_bytes": 17741147, "checksum": "447e3b1839ab94d5700cc2aedc0b52521404865b2589656acc90a654ed0de4ff"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids.json": {"num_bytes": 789135, "checksum": "4d34d9712997fcf4ef8cdd7e396d69e529b8bdbecef9e9ff1f0000f9b222a299"}}, "download_size": 347361195, "post_processing_size": null, "dataset_size": 951057861, "size_in_bytes": 1298419056}, "mlsum_es": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "mlsum_es", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1193361133, "num_examples": 259886, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 50540104, "num_examples": 9977, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 70626686, "num_examples": 13365, "dataset_name": "gem"}}, "download_checksums": {"https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip": {"num_bytes": 466443036, "checksum": "a01f4b4b873aa6cdeae15952a22ede2146734d0b60e7297470a35956507c863a"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip": {"num_bytes": 19483214, "checksum": "e38fce9950008ec4b48963692891c4c94d51a1e307286fb596e093aeb1230c92"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip": {"num_bytes": 27386169, "checksum": "177cfcf358bc4aa9bce2753b8e9de4f6eb41d2c30b1a99ef29d64e70537a1c0d"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids.json": {"num_bytes": 789135, "checksum": "4d34d9712997fcf4ef8cdd7e396d69e529b8bdbecef9e9ff1f0000f9b222a299"}}, "download_size": 514101554, "post_processing_size": null, "dataset_size": 1314527923, "size_in_bytes": 1828629477}, "wiki_lingua_es_en": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "wiki_lingua_es_en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 212575461, "num_examples": 79515, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 25574054, "num_examples": 8835, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 49454121, "num_examples": 19797, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip": {"num_bytes": 169406387, "checksum": "be0f11b80c496874f8e395a1f5eb4474bbc1cb2ddf2bcf74928475b033020d03"}}, "download_size": 169406387, "post_processing_size": null, "dataset_size": 287603636, "size_in_bytes": 457010023}, "wiki_lingua_ru_en": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "wiki_lingua_ru_en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 158203277, "num_examples": 36898, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 18480479, "num_examples": 4100, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 34529939, "num_examples": 9094, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip": {"num_bytes": 169406387, "checksum": "be0f11b80c496874f8e395a1f5eb4474bbc1cb2ddf2bcf74928475b033020d03"}}, "download_size": 169406387, "post_processing_size": null, "dataset_size": 211213695, "size_in_bytes": 380620082}, "wiki_lingua_tr_en": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "wiki_lingua_tr_en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7569617, "num_examples": 3193, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 929803, "num_examples": 355, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 1846128, "num_examples": 808, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip": {"num_bytes": 169406387, "checksum": "be0f11b80c496874f8e395a1f5eb4474bbc1cb2ddf2bcf74928475b033020d03"}}, "download_size": 169406387, "post_processing_size": null, "dataset_size": 10345548, "size_in_bytes": 179751935}, "wiki_lingua_vi_en": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "wiki_lingua_vi_en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 31250858, "num_examples": 9206, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 3582938, "num_examples": 1023, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 6188286, "num_examples": 2167, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip": {"num_bytes": 169406387, "checksum": "be0f11b80c496874f8e395a1f5eb4474bbc1cb2ddf2bcf74928475b033020d03"}}, "download_size": 169406387, "post_processing_size": null, "dataset_size": 41022082, "size_in_bytes": 210428469}, "xsum": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "xsum_id": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "xsum", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 65846114, "num_examples": 23206, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 2244604, "num_examples": 1117, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 2578627, "num_examples": 1166, "dataset_name": "gem"}}, "download_checksums": {"http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {"num_bytes": 254582292, "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json": {"num_bytes": 305905, "checksum": "a3c03b5367fd2c21a44f30ce3605f1f1cd9eb3a0383f0120a31e2225a13d72ed"}}, "download_size": 254888197, "post_processing_size": null, "dataset_size": 70669345, "size_in_bytes": 325557542}, "common_gen": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "concept_set_id": {"dtype": "int32", "id": null, "_type": "Value"}, "concepts": [{"dtype": "string", "id": null, "_type": "Value"}], "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "common_gen", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8734894, "num_examples": 67389, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 377181, "num_examples": 993, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 118348, "num_examples": 1497, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip": {"num_bytes": 1845699, "checksum": "a3f19ca607da4e874fc5f2dd1f53c13a6788a497f883d74cc3f9a1fcda44c594"}}, "download_size": 1845699, "post_processing_size": null, "dataset_size": 9230423, "size_in_bytes": 11076122}, "cs_restaurants": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "dialog_act": {"dtype": "string", "id": null, "_type": "Value"}, "dialog_act_delexicalized": {"dtype": "string", "id": null, "_type": "Value"}, "target_delexicalized": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "cs_restaurants", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 770750, "num_examples": 3569, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 262555, "num_examples": 781, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 273068, "num_examples": 842, "dataset_name": "gem"}}, "download_checksums": {"https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/train.json": {"num_bytes": 953853, "checksum": "4dc46649dd44d4fb0c32ac56211ba1c5409b366129102a62b28a3a67cec4a2e7"}, "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/devel.json": {"num_bytes": 247118, "checksum": "433cbcf069fbf1254b2be33d0ec799c55b46d06cc1d84ae19db758301fbe3adf"}, "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/test.json": {"num_bytes": 262048, "checksum": "0af728246699009f9d3702386c7a2b4db0318697ffb5333f088b393eb33d03a2"}}, "download_size": 1463019, "post_processing_size": null, "dataset_size": 1306373, "size_in_bytes": 2769392}, "dart": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "dart_id": {"dtype": "int32", "id": null, "_type": "Value"}, "tripleset": [[{"dtype": "string", "id": null, "_type": "Value"}]], "subtree_was_extended": {"dtype": "bool", "id": null, "_type": "Value"}, "target_sources": [{"dtype": "string", "id": null, "_type": "Value"}], "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "dart", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 21805512, "num_examples": 62659, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 1868728, "num_examples": 2768, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 3767444, "num_examples": 6959, "dataset_name": "gem"}}, "download_checksums": {"https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json": {"num_bytes": 22001131, "checksum": "0671b56f4b090ccf1c0187364d45c6f1214421d6f1081a21800596860f314e70"}, "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json": {"num_bytes": 2370637, "checksum": "5038f3543b6d59b94ac4e3f69d15a0b01d8578913f862142e7c560200dd6e434"}, "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json": {"num_bytes": 5001020, "checksum": "c772553b482dd5fc7b8ad90d68889062a2603e28d4449ee1f162006819e0565e"}}, "download_size": 29372788, "post_processing_size": null, "dataset_size": 27441684, "size_in_bytes": 56814472}, "e2e_nlg": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "meaning_representation": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "e2e_nlg", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8369049, "num_examples": 33525, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 1741130, "num_examples": 4299, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 2036248, "num_examples": 4693, "dataset_name": "gem"}}, "download_checksums": {"https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv": {"num_bytes": 11100744, "checksum": "12a4f59ec85ddd2586244aaf166f65d1b8cd468b6227e6620108baf118d5b325"}, "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv": {"num_bytes": 1581285, "checksum": "bb88df2565826a463f96e93a5ab69a8c6460de54f2e68179eb94f0019f430d4d"}, "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv": {"num_bytes": 1915378, "checksum": "99b43c2769a09d62fc5d37dcffaa59d4092bcffdc611f226258681df61269b17"}}, "download_size": 14597407, "post_processing_size": null, "dataset_size": 12146427, "size_in_bytes": 26743834}, "totto": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "totto_id": {"dtype": "int32", "id": null, "_type": "Value"}, "table_page_title": {"dtype": "string", "id": null, "_type": "Value"}, "table_webpage_url": {"dtype": "string", "id": null, "_type": "Value"}, "table_section_title": {"dtype": "string", "id": null, "_type": "Value"}, "table_section_text": {"dtype": "string", "id": null, "_type": "Value"}, "table": [[{"column_span": {"dtype": "int32", "id": null, "_type": "Value"}, "is_header": {"dtype": "bool", "id": null, "_type": "Value"}, "row_span": {"dtype": "int32", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}]], "highlighted_cells": [[{"dtype": "int32", "id": null, "_type": "Value"}]], "example_id": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_annotations": [{"original_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_after_deletion": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_after_ambiguity": {"dtype": "string", "id": null, "_type": "Value"}, "final_sentence": {"dtype": "string", "id": null, "_type": "Value"}}], "overlap_subset": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "totto", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 667100769, "num_examples": 121153, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 50143072, "num_examples": 7700, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 40751580, "num_examples": 7700, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/totto/totto_data.zip": {"num_bytes": 187724372, "checksum": "0aab72597057394514fd9659745fd2b318d1a64bf0b2ca1b2c339abe0692fdf2"}}, "download_size": 187724372, "post_processing_size": null, "dataset_size": 757995421, "size_in_bytes": 945719793}, "web_nlg_en": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "input": [{"dtype": "string", "id": null, "_type": "Value"}], "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}], "category": {"dtype": "string", "id": null, "_type": "Value"}, "webnlg_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "web_nlg_en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12157633, "num_examples": 35426, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 1105091, "num_examples": 1667, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 1362011, "num_examples": 1779, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json": {"num_bytes": 10135450, "checksum": "959646a986465c436362dfc44bb4966d5a2d39f2725b39fe32701981daf666d0"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json": {"num_bytes": 1273018, "checksum": "8214bf87ff0369e505ba5c11cdbbaa1127f7908ad77a75a2f1d1a76730c3a954"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json": {"num_bytes": 1537460, "checksum": "68a4a919a9b805e17959a52f7d5c14a6083bba1459645b4189824fca468e362d"}}, "download_size": 12945928, "post_processing_size": null, "dataset_size": 14624735, "size_in_bytes": 27570663}, "web_nlg_ru": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "input": [{"dtype": "string", "id": null, "_type": "Value"}], "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}], "category": {"dtype": "string", "id": null, "_type": "Value"}, "webnlg_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "web_nlg_ru", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6518731, "num_examples": 14630, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 773194, "num_examples": 790, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 1119940, "num_examples": 1102, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json": {"num_bytes": 5724246, "checksum": "bfaa20bd792a34fda25cff766fbabaf12c56c60b898865a2f976cfaad9c04d2e"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json": {"num_bytes": 783342, "checksum": "ac2e74d8618196ccf44be695dbdf4960e1f15dc9a39ebd754a808e793327aafd"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json": {"num_bytes": 1123674, "checksum": "24f4282eb6aa8dc424b6b676e1531a730b508e999b2c55d52215e72e4c7ec524"}}, "download_size": 7631262, "post_processing_size": null, "dataset_size": 8411865, "size_in_bytes": 16043127}, "wiki_auto_asset_turk": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "source_id": {"dtype": "string", "id": null, "_type": "Value"}, "target_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "wiki_auto_asset_turk", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 121568050, "num_examples": 373801, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 30379300, "num_examples": 73249, "dataset_name": "gem"}, "test_asset": {"name": "test_asset", "num_bytes": 416007, "num_examples": 359, "dataset_name": "gem"}, "test_turk": {"name": "test_turk", "num_bytes": 405247, "num_examples": 359, "dataset_name": "gem"}}, "download_checksums": {"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/train.tsv": {"num_bytes": 106346588, "checksum": "82fa388de3ded6d303b95fcd11ba70e0b6158d2df1cbf24913bb54503bd32e95"}, "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv": {"num_bytes": 20232621, "checksum": "c56a9d2a739f9da83f90c54e266e1d60dd036cb80c463f118cb55613232e2e41"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.0": {"num_bytes": 35457, "checksum": "66f36029d0c732eb92886021faefe531c6cfd0a32bdbe7ae4aa97fd45bd1b046"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.1": {"num_bytes": 34096, "checksum": "d323ceb364abbe84c79b14b028aa1ff563cd94955fbab19049612548dbb0f83f"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.2": {"num_bytes": 34348, "checksum": "786b55f8425ce4a993e98be5e2bea9ef87bf536b96dc13f7a57c4733fdb63e06"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.3": {"num_bytes": 37292, "checksum": "e211c9e2ede1dfe315097132dbe4feda76b309bdc636a5394cb5d2664ba5bf52"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.4": {"num_bytes": 35887, "checksum": "37be9cf0592c0f68d87848dc9c442fe62f344518c1993896c00788bf943b755d"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.5": {"num_bytes": 35351, "checksum": "8485210573a3bd76116de8e978b227677c6c207111a4938729397c4e603dfa46"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.6": {"num_bytes": 35846, "checksum": "f0cb3ab823d23203ea044f81bd7e67cc823db0632095e43b78a54a9891a0b0a8"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.7": {"num_bytes": 34560, "checksum": "35cbb8b9964252a1470607634f19ad946c6bc2951b3e500eedd826baf12bd3c8"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.8": {"num_bytes": 35830, "checksum": "047b6419590b88f93b435d3177bba1883dc9c0dc178676e48470b408236446f4"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.9": {"num_bytes": 35313, "checksum": "3f5745e4f2743563b88ea4284ec35fa4ddb68d62de80b63ffb87751b998fe6b8"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.0": {"num_bytes": 42071, "checksum": "1dd953869c842f35de4b97e521e30ce383319dd880d1e03b4471794d8d44c810"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.1": {"num_bytes": 41998, "checksum": "e0c5151e5d7f8206f0084982cc41e79ea8c235e897b01b6847d368dac2c58eb3"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.2": {"num_bytes": 42322, "checksum": "7b7abbc1a8aeca4cb629dff1b4f54fa6788e1275fc88d3bb8a1588270935b62d"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.3": {"num_bytes": 41923, "checksum": "b2fc918a0d3a6dac0f22375758ff81579067860620429de6e4efaf3321f50b16"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.4": {"num_bytes": 42165, "checksum": "a02359147dc651e71d27f09a1a941fb667f57d3b4e86241945d0a0eb9b969c42"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.5": {"num_bytes": 42372, "checksum": "c376aceedd417c1b49eadc69987e05b51d04046a1628671e298dcb97827ff747"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.6": {"num_bytes": 42441, "checksum": "5b0ff64f32ccc6818e7167c0a62a7b621052c58faec9a57a18195b870b0f5a73"}, "https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.7": {"num_bytes": 42191, "checksum": "0f557e657c0dc37263b4a25dfa56778fed54e5306761c7279751e92768d09eef"}}, "download_size": 127270672, "post_processing_size": null, "dataset_size": 152768604, "size_in_bytes": 280039276}, "schema_guided_dialog": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthors={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "dialog_acts": [{"act": {"num_classes": 18, "names": ["AFFIRM", "AFFIRM_INTENT", "CONFIRM", "GOODBYE", "INFORM", "INFORM_COUNT", "INFORM_INTENT", "NEGATE", "NEGATE_INTENT", "NOTIFY_FAILURE", "NOTIFY_SUCCESS", "OFFER", "OFFER_INTENT", "REQUEST", "REQUEST_ALTS", "REQ_MORE", "SELECT", "THANK_YOU"], "names_file": null, "id": null, "_type": "ClassLabel"}, "slot": {"dtype": "string", "id": null, "_type": "Value"}, "values": [{"dtype": "string", "id": null, "_type": "Value"}]}], "dialog_id": {"dtype": "string", "id": null, "_type": "Value"}, "turn_id": {"dtype": "int32", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "builder_name": "gem", "config_name": "schema_guided_dialog", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 39690792, "num_examples": 164982, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 3037236, "num_examples": 10000, "dataset_name": "gem"}, "test": {"name": "test", "num_bytes": 3055370, "num_examples": 10000, "dataset_name": "gem"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd.json.zip": {"num_bytes": 8645279, "checksum": "043e6e1aa43c922d364643405afb7e96184271108d51a353c0bcef6b99bdd82b"}}, "download_size": 8645279, "post_processing_size": null, "dataset_size": 45783398, "size_in_bytes": 54428677}}
dummy/common_gen/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59e6e129316e63c1a54d6ef7bd9a0c540c62ff4c1a36df33327a9e3facd3b4e3
3
+ size 2333
dummy/cs_restaurants/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25cab7c6d4e34d5fb08d476ce278d5e01ee6211c1b99629795bef0156e6aa785
3
+ size 1841
dummy/dart/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cb45f27226107fb40f87082ba336a81daa2b1c80c0ac56e58a756e6ac985e99
3
+ size 2115
dummy/e2e_nlg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02c353e0079daa9fc1b1d2e63f4f94a389ab340ad15f78e874c226dc355836ae
3
+ size 1338
dummy/mlsum_de/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f697a1e9790e5d549f666ccde174a53b9fd07c8ec133007b844b646431ee057
3
+ size 17313
dummy/mlsum_es/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c687ab8eae7138c11884087cf4dbb28b42b8eed9be16ca70b3b9e383eca86be3
3
+ size 23054
dummy/schema_guided_dialog/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73573be9eb634941d2daa888cfcf504cc3bbabab7a8e0d1712a55e7037b230b0
3
+ size 1899
dummy/totto/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a730949a9fa8a9d5affcd9ec6069470a531903856f97f73971d5a3ef2f8a8801
3
+ size 24427
dummy/web_nlg_en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e43d5dc953eae0070317b95ad533a46b8f2dc0c5751d33234d29b1e832bc75
3
+ size 2623
dummy/web_nlg_ru/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:428efef997ade4b3c7f9b110a681d2a24abe57f40c4f342826f57f85f8fb9ca7
3
+ size 3822
dummy/wiki_auto_asset_turk/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80352624751ac6f5a3cb44439470ec3ffec0a901e9eafe83bcf14c61372dbfa0
3
+ size 10318
dummy/wiki_lingua_es_en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f4888f92cf604094d003cf43efd422186dd3d706e633f08b1e63b0c11964b54
3
+ size 17768
dummy/wiki_lingua_ru_en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:568dcfeeabc0d5cee3ef8b89a466bc7a635ba7c86bc1c1466d75c41622f72ee8
3
+ size 21539
dummy/wiki_lingua_tr_en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b2b3a9e8191a3d17ac4f1eb5ac83f6d6fcc0ba89a5092831710910157d6c177
3
+ size 18336
dummy/wiki_lingua_vi_en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a41c1262cdd62d6d3457f6e4ccac79302bd484cbc96f9db5ac5078df14ab1f6
3
+ size 21530
dummy/xsum/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5f62f61f9fdb8eed99b3368c890cfc148e950665e53957f575d4c2b65d9fc48
3
+ size 2919
gem.py ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """GEM: Generation Evaluation Metrics supporting datasets"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import csv
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ # TODO: Add BibTeX citation
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ authors={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,
38
+ both through human annotations and automated Metrics.
39
+
40
+ GEM aims to:
41
+ - measure NLG progress across 13 datasets spanning many NLG tasks and languages.
42
+ - provide an in-depth analysis of data and models presented via data statements and challenge sets.
43
+ - develop standards for evaluation of generated text using both automated and human metrics.
44
+
45
+ It is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development
46
+ by extending existing data or developing datasets for additional languages.
47
+ """
48
+
49
+ _HOMEPAGE = "https://gem-benchmark.github.io/"
50
+
51
+ _LICENSE = "CC-BY-SA-4.0"
52
+
53
+ _TASKS = {
54
+ "summarization": {
55
+ "mlsum": ["mlsum_de", "mlsum_es"],
56
+ "wiki_lingua": ["wiki_lingua_es_en", "wiki_lingua_ru_en", "wiki_lingua_tr_en", "wiki_lingua_vi_en"],
57
+ "xsum": ["xsum"],
58
+ },
59
+ "struct2text": {
60
+ "common_gen": ["common_gen"],
61
+ "cs_restaurants": ["cs_restaurants"],
62
+ "dart": ["dart"],
63
+ "e2e": ["e2e_nlg"],
64
+ "totto": ["totto"],
65
+ "web_nlg": ["web_nlg_en", "web_nlg_ru"],
66
+ },
67
+ "simplification": {
68
+ "wiki_auto_asset_turk": ["wiki_auto_asset_turk"],
69
+ },
70
+ "dialog": {
71
+ "schema_guided_dialog": ["schema_guided_dialog"],
72
+ },
73
+ }
74
+
75
+ _URLs = {
76
+ "common_gen": {
77
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip",
78
+ },
79
+ "cs_restaurants": {
80
+ "train": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/train.json",
81
+ "validation": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/devel.json",
82
+ "test": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/test.json",
83
+ },
84
+ "dart": {
85
+ "train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
86
+ "validation": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json",
87
+ "test": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json",
88
+ },
89
+ "e2e_nlg": {
90
+ "train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv",
91
+ "validation": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv",
92
+ "test": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv",
93
+ },
94
+ "mlsum_de": {
95
+ "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip",
96
+ "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip",
97
+ "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip",
98
+ "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids.json",
99
+ },
100
+ "mlsum_es": {
101
+ "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip",
102
+ "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip",
103
+ "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip",
104
+ "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids.json",
105
+ },
106
+ "schema_guided_dialog": {
107
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd.json.zip",
108
+ },
109
+ "totto": {
110
+ "data": "https://storage.googleapis.com/totto/totto_data.zip",
111
+ },
112
+ "web_nlg_en": {
113
+ "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json",
114
+ "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json",
115
+ "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json",
116
+ },
117
+ "web_nlg_ru": {
118
+ "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json",
119
+ "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json",
120
+ "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json",
121
+ },
122
+ "wiki_auto_asset_turk": {
123
+ "train": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/train.tsv",
124
+ "validation": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv",
125
+ },
126
+ "wiki_lingua_es_en": {
127
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
128
+ },
129
+ "wiki_lingua_ru_en": {
130
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
131
+ },
132
+ "wiki_lingua_tr_en": {
133
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
134
+ },
135
+ "wiki_lingua_vi_en": {
136
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
137
+ },
138
+ "xsum": {
139
+ "data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz",
140
+ "splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json",
141
+ },
142
+ }
143
+
144
+ # Add Turk and Asset files
145
+ for i in range(10):
146
+ _URLs["wiki_auto_asset_turk"][
147
+ f"test_asset_{i}"
148
+ ] = f"https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.{i}"
149
+
150
+ for i in range(8):
151
+ _URLs["wiki_auto_asset_turk"][
152
+ f"test_turk_{i}"
153
+ ] = f"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.{i}"
154
+
155
+ _SGD_ACTS = [
156
+ "AFFIRM",
157
+ "AFFIRM_INTENT",
158
+ "CONFIRM",
159
+ "GOODBYE",
160
+ "INFORM",
161
+ "INFORM_COUNT",
162
+ "INFORM_INTENT",
163
+ "NEGATE",
164
+ "NEGATE_INTENT",
165
+ "NOTIFY_FAILURE",
166
+ "NOTIFY_SUCCESS",
167
+ "OFFER",
168
+ "OFFER_INTENT",
169
+ "REQUEST",
170
+ "REQUEST_ALTS",
171
+ "REQ_MORE",
172
+ "SELECT",
173
+ "THANK_YOU",
174
+ ]
175
+
176
+ _XSUM_REMOVE_LINES = set(
177
+ [
178
+ "Share this with\n",
179
+ "Email\n",
180
+ "Facebook\n",
181
+ "Messenger\n",
182
+ "Twitter\n",
183
+ "Pinterest\n",
184
+ "WhatsApp\n",
185
+ "Linkedin\n",
186
+ "LinkedIn\n",
187
+ "Copy this link\n",
188
+ "These are external links and will open in a new window\n",
189
+ ]
190
+ )
191
+
192
+
193
+ class Gem(datasets.GeneratorBasedBuilder):
194
+ """GEM: datasets supporting the Generation Evaluation Metrics 2021 shared task."""
195
+
196
+ BUILDER_CONFIGS = [
197
+ datasets.BuilderConfig(
198
+ name=conf,
199
+ version=datasets.Version("1.0.0"),
200
+ description=f"GEM benchmark: {task} task, {conf} subset",
201
+ )
202
+ for task, dset_confs in _TASKS.items()
203
+ for conf_list in dset_confs.values()
204
+ for conf in conf_list
205
+ ]
206
+
207
+ DEFAULT_CONFIG_NAME = "common_gen" # First alphabetical
208
+
209
+ def _info(self):
210
+ if self.config.name == "common_gen":
211
+ features = datasets.Features(
212
+ {
213
+ "gem_id": datasets.Value("string"),
214
+ "concept_set_id": datasets.Value("int32"),
215
+ "concepts": [datasets.Value("string")],
216
+ "target": datasets.Value("string"), # single target for train
217
+ "references": [datasets.Value("string")], # multiple references for validation
218
+ }
219
+ )
220
+ elif self.config.name == "cs_restaurants":
221
+ features = datasets.Features(
222
+ {
223
+ "gem_id": datasets.Value("string"),
224
+ "dialog_act": datasets.Value("string"),
225
+ "dialog_act_delexicalized": datasets.Value("string"),
226
+ "target_delexicalized": datasets.Value("string"),
227
+ "target": datasets.Value("string"),
228
+ "references": [datasets.Value("string")],
229
+ }
230
+ )
231
+ elif self.config.name == "dart":
232
+ features = datasets.Features(
233
+ {
234
+ "gem_id": datasets.Value("string"),
235
+ "dart_id": datasets.Value("int32"),
236
+ "tripleset": [[datasets.Value("string")]], # list of triples
237
+ "subtree_was_extended": datasets.Value("bool"),
238
+ "target_sources": [datasets.Value("string")],
239
+ "target": datasets.Value("string"), # single target for train
240
+ "references": [datasets.Value("string")],
241
+ }
242
+ )
243
+ elif self.config.name == "e2e_nlg":
244
+ features = datasets.Features(
245
+ {
246
+ "gem_id": datasets.Value("string"),
247
+ "meaning_representation": datasets.Value("string"),
248
+ "target": datasets.Value("string"),
249
+ "references": [datasets.Value("string")],
250
+ }
251
+ )
252
+ elif self.config.name.startswith("mlsum"):
253
+ features = datasets.Features(
254
+ {
255
+ "gem_id": datasets.Value("string"),
256
+ "text": datasets.Value("string"),
257
+ "topic": datasets.Value("string"),
258
+ "url": datasets.Value("string"),
259
+ "title": datasets.Value("string"),
260
+ "date": datasets.Value("string"),
261
+ "target": datasets.Value("string"),
262
+ "references": [datasets.Value("string")],
263
+ }
264
+ )
265
+ elif self.config.name == "schema_guided_dialog":
266
+ features = datasets.Features(
267
+ {
268
+ "gem_id": datasets.Value("string"),
269
+ "dialog_acts": [
270
+ {
271
+ "act": datasets.ClassLabel(names=_SGD_ACTS),
272
+ "slot": datasets.Value("string"),
273
+ "values": [datasets.Value("string")],
274
+ }
275
+ ],
276
+ "dialog_id": datasets.Value("string"),
277
+ "turn_id": datasets.Value("int32"),
278
+ "prompt": datasets.Value("string"),
279
+ "target": datasets.Value("string"),
280
+ "references": [datasets.Value("string")],
281
+ }
282
+ )
283
+ elif self.config.name == "totto":
284
+ features = datasets.Features(
285
+ {
286
+ "gem_id": datasets.Value("string"),
287
+ "totto_id": datasets.Value("int32"),
288
+ "table_page_title": datasets.Value("string"),
289
+ "table_webpage_url": datasets.Value("string"),
290
+ "table_section_title": datasets.Value("string"),
291
+ "table_section_text": datasets.Value("string"),
292
+ "table": [
293
+ [
294
+ {
295
+ "column_span": datasets.Value("int32"),
296
+ "is_header": datasets.Value("bool"),
297
+ "row_span": datasets.Value("int32"),
298
+ "value": datasets.Value("string"),
299
+ }
300
+ ]
301
+ ],
302
+ "highlighted_cells": [[datasets.Value("int32")]],
303
+ "example_id": datasets.Value("string"),
304
+ "sentence_annotations": [
305
+ {
306
+ "original_sentence": datasets.Value("string"),
307
+ "sentence_after_deletion": datasets.Value("string"),
308
+ "sentence_after_ambiguity": datasets.Value("string"),
309
+ "final_sentence": datasets.Value("string"),
310
+ }
311
+ ],
312
+ "overlap_subset": datasets.Value("string"),
313
+ "target": datasets.Value("string"), # single target for train
314
+ "references": [datasets.Value("string")],
315
+ },
316
+ )
317
+ elif self.config.name.startswith("web_nlg"):
318
+ features = datasets.Features(
319
+ {
320
+ "gem_id": datasets.Value("string"),
321
+ "input": [datasets.Value("string")],
322
+ "target": datasets.Value("string"), # single target for train
323
+ "references": [datasets.Value("string")],
324
+ "category": datasets.Value("string"),
325
+ "webnlg_id": datasets.Value("string"),
326
+ }
327
+ )
328
+ elif self.config.name == "wiki_auto_asset_turk":
329
+ features = datasets.Features(
330
+ {
331
+ "gem_id": datasets.Value("string"),
332
+ "source_id": datasets.Value("string"),
333
+ "target_id": datasets.Value("string"),
334
+ "source": datasets.Value("string"),
335
+ "target": datasets.Value("string"),
336
+ "references": [datasets.Value("string")],
337
+ }
338
+ )
339
+ elif self.config.name.startswith("wiki_lingua"):
340
+ features = datasets.Features(
341
+ {
342
+ "gem_id": datasets.Value("string"),
343
+ "source": datasets.Value("string"),
344
+ "target": datasets.Value("string"),
345
+ "references": [datasets.Value("string")],
346
+ }
347
+ )
348
+ elif self.config.name == "xsum":
349
+ features = datasets.Features(
350
+ {
351
+ "gem_id": datasets.Value("string"),
352
+ "xsum_id": datasets.Value("string"),
353
+ "document": datasets.Value("string"),
354
+ "target": datasets.Value("string"),
355
+ "references": [datasets.Value("string")],
356
+ }
357
+ )
358
+ return datasets.DatasetInfo(
359
+ description=_DESCRIPTION,
360
+ features=features,
361
+ supervised_keys=None,
362
+ homepage=_HOMEPAGE,
363
+ license=_LICENSE,
364
+ citation=_CITATION,
365
+ )
366
+
367
+ def _split_generators(self, dl_manager):
368
+ """Returns SplitGenerators."""
369
+ dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
370
+ if self.config.name == "common_gen":
371
+ return [
372
+ datasets.SplitGenerator(
373
+ name=datasets.Split.TRAIN,
374
+ gen_kwargs={
375
+ "filepath": os.path.join(dl_dir["data"], "commongen.train.jsonl"),
376
+ "split": "train",
377
+ },
378
+ ),
379
+ datasets.SplitGenerator(
380
+ name=datasets.Split.VALIDATION,
381
+ gen_kwargs={
382
+ "filepath": os.path.join(dl_dir["data"], "commongen.dev.jsonl"),
383
+ "split": "validation",
384
+ },
385
+ ),
386
+ datasets.SplitGenerator(
387
+ name=datasets.Split.TEST,
388
+ gen_kwargs={
389
+ "filepath": os.path.join(dl_dir["data"], "commongen.test_noref.jsonl"),
390
+ "split": "test",
391
+ },
392
+ ),
393
+ ]
394
+ elif self.config.name == "cs_restaurants":
395
+ return [
396
+ datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
397
+ for spl in ["train", "validation", "test"]
398
+ ]
399
+ elif self.config.name == "dart":
400
+ return [
401
+ datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
402
+ for spl in ["train", "validation", "test"]
403
+ ]
404
+ elif self.config.name == "e2e_nlg":
405
+ return [
406
+ datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
407
+ for spl in ["train", "validation", "test"]
408
+ ]
409
+ elif self.config.name.startswith("mlsum"):
410
+ lang = self.config.name.split("_")[1]
411
+ return [
412
+ datasets.SplitGenerator(
413
+ name=datasets.Split.TRAIN,
414
+ gen_kwargs={
415
+ "filepath": os.path.join(dl_dir["train"], lang + "_train.jsonl"),
416
+ "split": "train",
417
+ "lang": lang,
418
+ "filepaths": dl_dir["bad_ids"],
419
+ },
420
+ ),
421
+ datasets.SplitGenerator(
422
+ name=datasets.Split.VALIDATION,
423
+ gen_kwargs={
424
+ "filepath": os.path.join(dl_dir["validation"], lang + "_val.jsonl"),
425
+ "split": "validation",
426
+ "lang": lang,
427
+ "filepaths": dl_dir["bad_ids"],
428
+ },
429
+ ),
430
+ datasets.SplitGenerator(
431
+ name=datasets.Split.TEST,
432
+ gen_kwargs={
433
+ "filepath": os.path.join(dl_dir["test"], lang + "_test.jsonl"),
434
+ "split": "test",
435
+ "lang": lang,
436
+ "filepaths": dl_dir["bad_ids"],
437
+ },
438
+ ),
439
+ ]
440
+ elif self.config.name == "schema_guided_dialog":
441
+ return [
442
+ datasets.SplitGenerator(
443
+ name=spl, gen_kwargs={"filepath": os.path.join(dl_dir["data"], "gem_sgd.json"), "split": spl}
444
+ )
445
+ for spl in ["train", "validation", "test"]
446
+ ]
447
+ elif self.config.name == "totto":
448
+ return [
449
+ datasets.SplitGenerator(
450
+ name=datasets.Split.TRAIN,
451
+ gen_kwargs={
452
+ "filepath": os.path.join(dl_dir["data"], "totto_data/totto_train_data.jsonl"),
453
+ "split": "train",
454
+ },
455
+ ),
456
+ datasets.SplitGenerator(
457
+ name=datasets.Split.VALIDATION,
458
+ gen_kwargs={
459
+ "filepath": os.path.join(dl_dir["data"], "totto_data/totto_dev_data.jsonl"),
460
+ "split": "validation",
461
+ },
462
+ ),
463
+ datasets.SplitGenerator(
464
+ name=datasets.Split.TEST,
465
+ gen_kwargs={
466
+ "filepath": os.path.join(dl_dir["data"], "totto_data/unlabeled_totto_test_data.jsonl"),
467
+ "split": "test",
468
+ },
469
+ ),
470
+ ]
471
+ elif self.config.name.startswith("web_nlg"):
472
+ return [
473
+ datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
474
+ for spl in ["train", "validation", "test"]
475
+ ]
476
+ elif self.config.name == "wiki_auto_asset_turk":
477
+ return [
478
+ datasets.SplitGenerator(
479
+ name=datasets.Split.TRAIN,
480
+ gen_kwargs={
481
+ "filepath": dl_dir["train"],
482
+ "split": "train",
483
+ },
484
+ ),
485
+ datasets.SplitGenerator(
486
+ name=datasets.Split.VALIDATION,
487
+ gen_kwargs={
488
+ "filepath": dl_dir["validation"],
489
+ "split": "validation",
490
+ },
491
+ ),
492
+ datasets.SplitGenerator(
493
+ name="test_asset",
494
+ gen_kwargs={
495
+ "filepath": "",
496
+ "split": "test",
497
+ "filepaths": [dl_dir[f"test_asset_{i}"] for i in range(10)],
498
+ },
499
+ ),
500
+ datasets.SplitGenerator(
501
+ name="test_turk",
502
+ gen_kwargs={
503
+ "filepath": "",
504
+ "split": "test",
505
+ "filepaths": [dl_dir[f"test_turk_{i}"] for i in range(8)],
506
+ },
507
+ ),
508
+ ]
509
+ elif self.config.name.startswith("wiki_lingua"):
510
+ lang = self.config.name.split("_")[-2]
511
+ base_dir = os.path.join(dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en")
512
+ return [
513
+ datasets.SplitGenerator(
514
+ name=datasets.Split.TRAIN,
515
+ gen_kwargs={
516
+ "filepath": base_dir,
517
+ "split": "train",
518
+ },
519
+ ),
520
+ datasets.SplitGenerator(
521
+ name=datasets.Split.VALIDATION,
522
+ gen_kwargs={
523
+ "filepath": base_dir,
524
+ "split": "val",
525
+ },
526
+ ),
527
+ datasets.SplitGenerator(
528
+ name=datasets.Split.TEST,
529
+ gen_kwargs={
530
+ "filepath": base_dir,
531
+ "split": "test",
532
+ },
533
+ ),
534
+ ]
535
+ elif self.config.name == "xsum":
536
+ return [
537
+ datasets.SplitGenerator(
538
+ name=datasets.Split.TRAIN,
539
+ gen_kwargs={
540
+ "filepath": dl_dir["splits"],
541
+ "split": "train",
542
+ "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
543
+ },
544
+ ),
545
+ datasets.SplitGenerator(
546
+ name=datasets.Split.VALIDATION,
547
+ gen_kwargs={
548
+ "filepath": dl_dir["splits"],
549
+ "split": "validation",
550
+ "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
551
+ },
552
+ ),
553
+ datasets.SplitGenerator(
554
+ name=datasets.Split.TEST,
555
+ gen_kwargs={
556
+ "filepath": dl_dir["splits"],
557
+ "split": "test",
558
+ "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
559
+ },
560
+ ),
561
+ ]
562
+
563
+ def _generate_examples(self, filepath, split, filepaths=None, lang=None):
564
+ """ Yields examples. """
565
+ if self.config.name == "common_gen":
566
+ with open(filepath, encoding="utf-8") as f:
567
+ id_ = -1
568
+ i = -1
569
+ for row in f:
570
+ row = row.replace(", }", "}") # Fix possible JSON format error
571
+ data = json.loads(row)
572
+ concepts = [word for word in data["concept_set"].split("#")]
573
+ if split == "train":
574
+ i += 1
575
+ for scene in data["scene"]:
576
+ id_ += 1
577
+ yield id_, {
578
+ "gem_id": f"{self.config.name}-{split}-{id_}",
579
+ "concept_set_id": i,
580
+ "concepts": concepts,
581
+ "target": scene,
582
+ "references": [],
583
+ }
584
+ else:
585
+ id_ += 1
586
+ yield id_, {
587
+ "gem_id": f"{self.config.name}-{split}-{id_}",
588
+ "concept_set_id": id_,
589
+ "concepts": concepts,
590
+ "target": "" if split == "test" else data["scene"][0],
591
+ "references": [] if split == "test" else data["scene"],
592
+ }
593
+ elif self.config.name == "cs_restaurants":
594
+ with open(filepath, encoding="utf8") as f:
595
+ data = json.load(f)
596
+ for id_, instance in enumerate(data):
597
+ yield id_, {
598
+ "gem_id": f"{self.config.name}-{split}-{id_}",
599
+ "dialog_act": instance["da"],
600
+ "dialog_act_delexicalized": instance["delex_da"],
601
+ "target": instance["text"],
602
+ "target_delexicalized": instance["delex_text"],
603
+ "references": [] if split == "train" else [instance["text"]],
604
+ }
605
+ elif self.config.name == "dart":
606
+ with open(filepath, encoding="utf-8") as f:
607
+ data = json.loads(f.read())
608
+ id_ = -1
609
+ i = -1
610
+ for example in data:
611
+ if split == "train":
612
+ i += 1
613
+ for annotation in example["annotations"]:
614
+ id_ += 1
615
+ yield id_, {
616
+ "gem_id": f"{self.config.name}-{split}-{id_}",
617
+ "dart_id": i,
618
+ "tripleset": example["tripleset"],
619
+ "subtree_was_extended": example.get("subtree_was_extended", None), # some are missing
620
+ "target_sources": [annotation["source"] for annotation in example["annotations"]],
621
+ "target": annotation["text"],
622
+ "references": [],
623
+ }
624
+ else:
625
+ id_ += 1
626
+ yield id_, {
627
+ "gem_id": f"{self.config.name}-{split}-{id_}",
628
+ "dart_id": id_,
629
+ "tripleset": example["tripleset"],
630
+ "subtree_was_extended": example.get("subtree_was_extended", None), # some are missing
631
+ "target_sources": [annotation["source"] for annotation in example["annotations"]],
632
+ "target": example["annotations"][0]["text"] if len(example["annotations"]) > 0 else "",
633
+ "references": [annotation["text"] for annotation in example["annotations"]],
634
+ }
635
+ elif self.config.name == "e2e_nlg":
636
+ with open(filepath, encoding="utf-8") as f:
637
+ reader = csv.DictReader(f)
638
+ for id_, example in enumerate(reader):
639
+ yield id_, {
640
+ "gem_id": f"{self.config.name}-{split}-{id_}",
641
+ "meaning_representation": example["mr"],
642
+ "target": example["ref"],
643
+ "references": [] if split == "train" else [example["ref"]],
644
+ }
645
+ elif self.config.name.startswith("mlsum"):
646
+ bad_ids_dct = json.load(open(filepaths, encoding="utf-8"))
647
+ bad_ids = dict((bad_url, True) for _, bad_url in bad_ids_dct[f"{lang}-{split}"])
648
+ with open(filepath, encoding="utf-8") as f:
649
+ id_ = -1
650
+ for line in f:
651
+ data = json.loads(line)
652
+ if data["url"] in bad_ids: # TODO : check | i or i-1?
653
+ continue
654
+ else:
655
+ id_ += 1
656
+ yield id_, {
657
+ "gem_id": f"{self.config.name}-{split}-{id_}",
658
+ "text": data["text"],
659
+ "target": data["summary"],
660
+ "references": [] if split == "train" else [data["summary"]],
661
+ "topic": data["topic"],
662
+ "url": data["url"],
663
+ "title": data["title"],
664
+ "date": data["date"],
665
+ }
666
+ elif self.config.name == "schema_guided_dialog":
667
+ examples = json.load(open(filepath, encoding="utf-8"))[split]
668
+ for id_, example in enumerate(examples):
669
+ yield id_, {
670
+ "gem_id": f"{self.config.name}-{split}-{id_}",
671
+ "dialog_acts": [
672
+ {
673
+ "act": act_id,
674
+ "slot": slot,
675
+ "values": values,
676
+ }
677
+ for act_id, slot, values in example["da"]
678
+ ],
679
+ "dialog_id": example["dialog_id"],
680
+ "turn_id": example["turn_ix"],
681
+ "prompt": example["prompt"],
682
+ "target": example["target"],
683
+ "references": [] if split == "train" else [example["target"]],
684
+ }
685
+ elif self.config.name == "totto":
686
+ with open(filepath, "r", encoding="utf-8") as json_file:
687
+ json_list = list(json_file)
688
+ id_ = -1
689
+ i = -1
690
+ for json_str in json_list:
691
+ result = json.loads(json_str)
692
+ if split == "train":
693
+ i += 1
694
+ for sentence in result["sentence_annotations"]:
695
+ id_ += 1
696
+ response = {
697
+ "gem_id": f"{self.config.name}-{split}-{id_}",
698
+ "totto_id": i,
699
+ "table_page_title": result["table_page_title"],
700
+ "table_webpage_url": result["table_webpage_url"],
701
+ "table_section_title": result["table_section_title"],
702
+ "table_section_text": result["table_section_text"],
703
+ "table": result["table"],
704
+ "highlighted_cells": result["highlighted_cells"],
705
+ "example_id": str(result["example_id"]),
706
+ "overlap_subset": "none",
707
+ "sentence_annotations": [sentence],
708
+ "references": [],
709
+ "target": sentence["final_sentence"],
710
+ }
711
+ yield id_, response
712
+ else:
713
+ id_ += 1
714
+ response = {
715
+ "gem_id": f"{self.config.name}-{split}-{id_}",
716
+ "totto_id": id_,
717
+ "table_page_title": result["table_page_title"],
718
+ "table_webpage_url": result["table_webpage_url"],
719
+ "table_section_title": result["table_section_title"],
720
+ "table_section_text": result["table_section_text"],
721
+ "table": result["table"],
722
+ "highlighted_cells": result["highlighted_cells"],
723
+ "example_id": str(result["example_id"]),
724
+ "overlap_subset": str(result["overlap_subset"]),
725
+ }
726
+ response["sentence_annotations"] = [] if split == "test" else result["sentence_annotations"]
727
+ response["references"] = [
728
+ sentence["final_sentence"] for sentence in response["sentence_annotations"]
729
+ ]
730
+ response["target"] = response["references"][0] if len(response["references"]) > 0 else ""
731
+ yield id_, response
732
+ elif self.config.name.startswith("web_nlg"):
733
+ with open(filepath, encoding="utf-8") as f:
734
+ examples = json.load(f)
735
+ id_ = -1
736
+ for example in examples["values"]:
737
+ if split == "train":
738
+ for target in example["target"]:
739
+ id_ += 1
740
+ yield id_, {
741
+ "gem_id": f"{self.config.name}-{split}-{id_}",
742
+ "input": example["input"],
743
+ "target": target,
744
+ "references": [] if split == "train" else example["target"],
745
+ "category": example["category"],
746
+ "webnlg_id": example["webnlg-id"],
747
+ }
748
+ else:
749
+ id_ += 1
750
+ yield id_, {
751
+ "gem_id": f"{self.config.name}-{split}-{id_}",
752
+ "input": example["input"],
753
+ "target": example["target"][0] if len(example["target"]) > 0 else "",
754
+ "references": example["target"],
755
+ "category": example["category"],
756
+ "webnlg_id": example["webnlg-id"],
757
+ }
758
+ elif self.config.name == "wiki_auto_asset_turk":
759
+ if split in ["train", "validation"]:
760
+ keys = [
761
+ "target_id",
762
+ "source_id",
763
+ "target",
764
+ "source",
765
+ ]
766
+ with open(filepath, encoding="utf-8") as f:
767
+ for id_, line in enumerate(f):
768
+ values = line.strip().split("\t")
769
+ assert len(values) == 5, f"Not enough fields in ---- {line} --- {values}"
770
+ example = dict([(k, val) for k, val in zip(keys, values[1:])])
771
+ example["gem_id"] = f"{self.config.name}-{split}-{id_}"
772
+ example["references"] = [] if split == "train" else [example["target"]]
773
+ yield id_, example
774
+ elif split.startswith("test"):
775
+ files = [open(f_name, encoding="utf-8") for f_name in filepaths]
776
+ for id_, lines in enumerate(zip(*files)):
777
+ yield id_, {
778
+ "gem_id": f"{self.config.name}-{split}-{id_}",
779
+ "source_id": "",
780
+ "target_id": "",
781
+ "target": lines[1].strip(),
782
+ "source": lines[0].strip(),
783
+ "references": [line.strip() for line in lines[1:]],
784
+ }
785
+ elif self.config.name.startswith("wiki_lingua"):
786
+ with open(os.path.join(filepath, f"{split}.src"), encoding="utf-8") as f_in:
787
+ with open(os.path.join(filepath, f"{split}.tgt"), encoding="utf-8") as f_out:
788
+ for id_, (src, tgt) in enumerate(zip(f_in, f_out)):
789
+ yield id_, {
790
+ "gem_id": f"{self.config.name}-{split}-{id_}",
791
+ "source": src.strip(),
792
+ "target": tgt.strip(),
793
+ "references": [] if split == "train" else [tgt.strip()],
794
+ }
795
+ elif self.config.name == "xsum":
796
+ with open(filepath, "r", encoding="utf-8") as f:
797
+ split_ids = json.load(f)
798
+ for id_, i in enumerate(split_ids[split]):
799
+ with open(os.path.join(filepaths, i + ".summary"), "r", encoding="utf-8") as f:
800
+ text = "".join([line for line in f.readlines() if line not in _XSUM_REMOVE_LINES and line.strip()])
801
+ segs = text.split("[SN]")
802
+ yield id_, {
803
+ "gem_id": f"{self.config.name}-{split}-{id_}",
804
+ "xsum_id": i,
805
+ "document": segs[8].strip(),
806
+ "target": segs[6].strip(),
807
+ "references": [] if split == "train" else [segs[6].strip()],
808
+ }