hiroshi-matsuda-rit commited on
Commit
36cafbf
·
1 Parent(s): a044140

Upload filtered_mc4.py

Browse files
Files changed (1) hide show
  1. filtered_mc4.py +352 -0
filtered_mc4.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ from re import Pattern
4
+ import gzip
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+
13
+ _DESCRIPTION = """\
14
+ The mC4 dataset to which arbitrary filters can be applied.
15
+
16
+ The original description is below:
17
+ ===
18
+ A colossal, cleaned version of Common Crawl's web crawl corpus.
19
+ Based on Common Crawl dataset: "https://commoncrawl.org".
20
+ This is the processed version of Google's mC4 dataset by AllenAI.
21
+ """
22
+
23
+ _CITATION = """
24
+ @article{2019t5,
25
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
26
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
27
+ journal = {arXiv e-prints},
28
+ year = {2019},
29
+ archivePrefix = {arXiv},
30
+ eprint = {1910.10683},
31
+ }
32
+ """
33
+
34
+ _URL = "https://github.com/allenai/allennlp/discussions/5056"
35
+
36
+ _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-{language}{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
37
+
38
+ _LANGUAGES = [
39
+ "af",
40
+ "am",
41
+ "ar",
42
+ "az",
43
+ "be",
44
+ "bg",
45
+ "bg-Latn",
46
+ "bn",
47
+ "ca",
48
+ "ceb",
49
+ "co",
50
+ "cs",
51
+ "cy",
52
+ "da",
53
+ "de",
54
+ "el",
55
+ "el-Latn",
56
+ "en",
57
+ "eo",
58
+ "es",
59
+ "et",
60
+ "eu",
61
+ "fa",
62
+ "fi",
63
+ "fil",
64
+ "fr",
65
+ "fy",
66
+ "ga",
67
+ "gd",
68
+ "gl",
69
+ "gu",
70
+ "ha",
71
+ "haw",
72
+ "hi",
73
+ "hi-Latn",
74
+ "hmn",
75
+ "ht",
76
+ "hu",
77
+ "hy",
78
+ "id",
79
+ "ig",
80
+ "is",
81
+ "it",
82
+ "iw",
83
+ "ja",
84
+ "ja-Latn",
85
+ "jv",
86
+ "ka",
87
+ "kk",
88
+ "km",
89
+ "kn",
90
+ "ko",
91
+ "ku",
92
+ "ky",
93
+ "la",
94
+ "lb",
95
+ "lo",
96
+ "lt",
97
+ "lv",
98
+ "mg",
99
+ "mi",
100
+ "mk",
101
+ "ml",
102
+ "mn",
103
+ "mr",
104
+ "ms",
105
+ "mt",
106
+ "my",
107
+ "ne",
108
+ "nl",
109
+ "no",
110
+ "ny",
111
+ "pa",
112
+ "pl",
113
+ "ps",
114
+ "pt",
115
+ "ro",
116
+ "ru",
117
+ "ru-Latn",
118
+ "sd",
119
+ "si",
120
+ "sk",
121
+ "sl",
122
+ "sm",
123
+ "sn",
124
+ "so",
125
+ "sq",
126
+ "sr",
127
+ "st",
128
+ "su",
129
+ "sv",
130
+ "sw",
131
+ "ta",
132
+ "te",
133
+ "tg",
134
+ "th",
135
+ "tr",
136
+ "uk",
137
+ "und",
138
+ "ur",
139
+ "uz",
140
+ "vi",
141
+ "xh",
142
+ "yi",
143
+ "yo",
144
+ "zh",
145
+ "zh-Latn",
146
+ "zu",
147
+ ]
148
+
149
+ _N_SHARDS_PER_SPLIT = {
150
+ "af": {"train": 64, "validation": 1},
151
+ "am": {"train": 16, "validation": 1},
152
+ "ar": {"train": 1024, "validation": 4},
153
+ "az": {"train": 256, "validation": 1},
154
+ "be": {"train": 128, "validation": 1},
155
+ "bg": {"train": 1024, "validation": 1},
156
+ "bg-Latn": {"train": 4, "validation": 1},
157
+ "bn": {"train": 512, "validation": 1},
158
+ "ca": {"train": 512, "validation": 1},
159
+ "ceb": {"train": 8, "validation": 1},
160
+ "co": {"train": 8, "validation": 1},
161
+ "cs": {"train": 1024, "validation": 2},
162
+ "cy": {"train": 256, "validation": 1},
163
+ "da": {"train": 1024, "validation": 1},
164
+ "de": {"train": 2048, "validation": 16},
165
+ "el": {"train": 1024, "validation": 2},
166
+ "el-Latn": {"train": 16, "validation": 1},
167
+ "en": {"train": 11264, "validation": 128},
168
+ "eo": {"train": 32, "validation": 1},
169
+ "es": {"train": 2048, "validation": 16},
170
+ "et": {"train": 256, "validation": 1},
171
+ "eu": {"train": 64, "validation": 1},
172
+ "fa": {"train": 1024, "validation": 2},
173
+ "fi": {"train": 1024, "validation": 1},
174
+ "fil": {"train": 64, "validation": 1},
175
+ "fr": {"train": 2048, "validation": 16},
176
+ "fy": {"train": 16, "validation": 1},
177
+ "ga": {"train": 16, "validation": 1},
178
+ "gd": {"train": 16, "validation": 1},
179
+ "gl": {"train": 128, "validation": 1},
180
+ "gu": {"train": 64, "validation": 1},
181
+ "ha": {"train": 8, "validation": 1},
182
+ "haw": {"train": 2, "validation": 1},
183
+ "hi": {"train": 1024, "validation": 2},
184
+ "hi-Latn": {"train": 16, "validation": 1},
185
+ "hmn": {"train": 8, "validation": 1},
186
+ "ht": {"train": 8, "validation": 1},
187
+ "hu": {"train": 1024, "validation": 2},
188
+ "hy": {"train": 128, "validation": 1},
189
+ "id": {"train": 1024, "validation": 4},
190
+ "ig": {"train": 4, "validation": 1},
191
+ "is": {"train": 128, "validation": 1},
192
+ "it": {"train": 1024, "validation": 8},
193
+ "iw": {"train": 1024, "validation": 1},
194
+ "ja": {"train": 1024, "validation": 8},
195
+ "ja-Latn": {"train": 8, "validation": 1},
196
+ "jv": {"train": 8, "validation": 1},
197
+ "ka": {"train": 256, "validation": 1},
198
+ "kk": {"train": 256, "validation": 1},
199
+ "km": {"train": 64, "validation": 1},
200
+ "kn": {"train": 64, "validation": 1},
201
+ "ko": {"train": 1024, "validation": 1},
202
+ "ku": {"train": 16, "validation": 1},
203
+ "ky": {"train": 64, "validation": 1},
204
+ "la": {"train": 64, "validation": 1},
205
+ "lb": {"train": 32, "validation": 1},
206
+ "lo": {"train": 8, "validation": 1},
207
+ "lt": {"train": 512, "validation": 1},
208
+ "lv": {"train": 256, "validation": 1},
209
+ "mg": {"train": 8, "validation": 1},
210
+ "mi": {"train": 4, "validation": 1},
211
+ "mk": {"train": 128, "validation": 1},
212
+ "ml": {"train": 128, "validation": 1},
213
+ "mn": {"train": 128, "validation": 1},
214
+ "mr": {"train": 1024, "validation": 1},
215
+ "ms": {"train": 512, "validation": 1},
216
+ "mt": {"train": 128, "validation": 1},
217
+ "my": {"train": 64, "validation": 1},
218
+ "ne": {"train": 256, "validation": 1},
219
+ "nl": {"train": 1024, "validation": 4},
220
+ "no": {"train": 1024, "validation": 1},
221
+ "ny": {"train": 4, "validation": 1},
222
+ "pa": {"train": 32, "validation": 1},
223
+ "pl": {"train": 1024, "validation": 4},
224
+ "ps": {"train": 16, "validation": 1},
225
+ "pt": {"train": 1024, "validation": 4},
226
+ "ro": {"train": 1024, "validation": 2},
227
+ "ru": {"train": 4096, "validation": 32},
228
+ "ru-Latn": {"train": 32, "validation": 1},
229
+ "sd": {"train": 64, "validation": 1},
230
+ "si": {"train": 64, "validation": 1},
231
+ "sk": {"train": 512, "validation": 1},
232
+ "sl": {"train": 256, "validation": 1},
233
+ "sm": {"train": 4, "validation": 1},
234
+ "sn": {"train": 8, "validation": 1},
235
+ "so": {"train": 64, "validation": 1},
236
+ "sq": {"train": 128, "validation": 1},
237
+ "sr": {"train": 256, "validation": 1},
238
+ "st": {"train": 2, "validation": 1},
239
+ "su": {"train": 4, "validation": 1},
240
+ "sv": {"train": 1024, "validation": 2},
241
+ "sw": {"train": 32, "validation": 1},
242
+ "ta": {"train": 256, "validation": 1},
243
+ "te": {"train": 128, "validation": 1},
244
+ "tg": {"train": 64, "validation": 1},
245
+ "th": {"train": 1024, "validation": 1},
246
+ "tr": {"train": 1024, "validation": 4},
247
+ "uk": {"train": 1024, "validation": 2},
248
+ "und": {"train": 3072, "validation": 32},
249
+ "ur": {"train": 128, "validation": 1},
250
+ "uz": {"train": 32, "validation": 1},
251
+ "vi": {"train": 1024, "validation": 4},
252
+ "xh": {"train": 2, "validation": 1},
253
+ "yi": {"train": 16, "validation": 1},
254
+ "yo": {"train": 2, "validation": 1},
255
+ "zh": {"train": 1024, "validation": 2},
256
+ "zh-Latn": {"train": 8, "validation": 1},
257
+ "zu": {"train": 8, "validation": 1},
258
+ }
259
+
260
+ MC4_MAX_REJECT_PATTERN_OCCURENCE = 3
261
+ MC4_FILTER_TARGET_FIELD = "text"
262
+
263
+
264
+ class FilteredMc4Config(datasets.BuilderConfig):
265
+ """BuilderConfig for mC4."""
266
+
267
+ def __init__(
268
+ self,
269
+ *args,
270
+ languages,
271
+ filter_target_field: str=MC4_FILTER_TARGET_FIELD,
272
+ reject_pattern: Pattern=re.compile(r"(?!)"),
273
+ max_reject_pattern_occurence: int=MC4_MAX_REJECT_PATTERN_OCCURENCE,
274
+ **kwargs,
275
+ ):
276
+ """BuilderConfig for mC4.
277
+ Args:
278
+ languages (:obj:`List[str]`): list of languages to load
279
+ **kwargs: keyword arguments forwarded to super.
280
+ """
281
+ super().__init__(
282
+ *args,
283
+ name="+".join(languages),
284
+ **kwargs,
285
+ )
286
+ self.languages = languages
287
+ self.filter_target_field = filter_target_field
288
+ self.reject_pattern = reject_pattern
289
+ self.max_reject_pattern_occurence = max_reject_pattern_occurence
290
+
291
+ def filter(self, example: dict) -> bool:
292
+ for count, _ in enumerate(self.reject_pattern.finditer(example[self.filter_target_field])):
293
+ if count == self.max_reject_pattern_occurence:
294
+ return False
295
+ return True
296
+
297
+
298
+ class FilteredMc4(datasets.GeneratorBasedBuilder):
299
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
300
+
301
+ BUILDER_CONFIGS = [FilteredMc4Config(languages=[lang]) for lang in _LANGUAGES]
302
+ BUILDER_CONFIG_CLASS = FilteredMc4Config
303
+
304
+ def _info(self):
305
+ return datasets.DatasetInfo(
306
+ description=_DESCRIPTION,
307
+ features=datasets.Features(
308
+ {
309
+ "text": datasets.Value("string"),
310
+ "timestamp": datasets.Value("string"),
311
+ "url": datasets.Value("string"),
312
+ }
313
+ ),
314
+ supervised_keys=None,
315
+ homepage=_URL,
316
+ citation=_CITATION,
317
+ )
318
+
319
+ def _split_generators(self, dl_manager):
320
+ data_urls = {}
321
+ for split in ["train", "validation"]:
322
+ data_urls[split] = [
323
+ _DATA_URL.format(
324
+ language=lang,
325
+ split_suffix="-validation" if split == "validation" else "",
326
+ index=index,
327
+ n_shards=_N_SHARDS_PER_SPLIT[lang][split],
328
+ )
329
+ for lang in self.config.languages
330
+ for index in range(_N_SHARDS_PER_SPLIT[lang][split])
331
+ ]
332
+ train_downloaded_files = dl_manager.download(data_urls["train"])
333
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
334
+ return [
335
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
336
+ datasets.SplitGenerator(
337
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
338
+ ),
339
+ ]
340
+
341
+ def _generate_examples(self, filepaths):
342
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
343
+ id_ = 0
344
+ for filepath in filepaths:
345
+ logger.info("generating examples from = %s", filepath)
346
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
347
+ for line in f:
348
+ if line:
349
+ example = json.loads(line)
350
+ if self.config.filter(example):
351
+ yield id_, example
352
+ id_ += 1