enwiki-yearly-cleaned / enwiki-yearly-cleaned.py
Pieter Delobelle
fixed typo
8479798
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cleaned and split version of the English Wikipedia."""
import json
import gzip
import textwrap
import datasets
import random
from itertools import zip_longest
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """\
"""
_HOMEPAGE = ""
_LICENSE = ""
_DATA_URL = "https://huggingface.co./datasets/pdelobelle/enwiki-yearly-cleaned/resolve/main/enwiki-yearly-cleaned/{split}/enwiki_{index}_{split}.jsonl.gz"
_CONFIG_NAMES = ["tiny", "small", "medium", "large", "full"]
_CONFIGS = dict(
tiny={"train": 2, "validation": 1, "estimate": "0.1GB"},
small={"train": 100, "validation": 2, "estimate": "4GB"},
medium={"train": 750, "validation": 2, "estimate": "30GB"},
large={"train": 1500, "validation": 3, "estimate": "59GB"},
full={"train": 3497, "validation": 4, "estimate": "137GB"},
)
class Wikipedia(datasets.GeneratorBasedBuilder):
"""Cleaned and split version of the English Wikipedia."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=name,
version=datasets.Version("1.0.0"),
description=textwrap.dedent(
f"""\
A {name} version of the English Wikipedia.
Estimated size of compressed files: {_CONFIGS[name]['estimate']}
"""
),
)
for name in _CONFIG_NAMES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"year": datasets.Value("string"),
"tlsh": datasets.Value("string"),
"title": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
config = _CONFIGS[self.config.name]
for split in ["train", "validation"]:
start_file = config.get("start", 1) if split == "train" else 1
num_files = config.get(split)
data_urls[split] = []
for index in range(start_file, start_file + num_files):
data_urls[split].append(
_DATA_URL.format(
split=split,
index=index,
)
)
# Shuffle data in streaming mode, so restarts will not always start with the same data
if dl_manager.is_streaming:
random.shuffle(data_urls["train"])
train_downloaded_files = dl_manager.download(data_urls["train"])
validation_downloaded_files = dl_manager.download(data_urls["validation"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": train_downloaded_files},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepaths": validation_downloaded_files},
),
]
@staticmethod
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
@staticmethod
def gzip_open(filepath):
if filepath:
return gzip.open(open(filepath, "rb"), "rt", encoding="utf-8")
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for files in self.grouper(filepaths, 2, None):
logger.info(f"Generating examples from {files}")
gzip_iters = [self.gzip_open(file) for file in files if file is not None]
for lines in zip(*gzip_iters):
for line in lines:
example = json.loads(line)
yield id_, example
id_ += 1