# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import csv | |
import json | |
import os | |
import datasets | |
import bz2 | |
# Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2020} | |
} | |
""" | |
_DESCRIPTION = """\ | |
Test adding a dataset with challenge set to GEM benchmark . | |
""" | |
_HOMEPAGE = "" | |
_LICENSE = "" | |
# The HuggingFace dataset library doesn't host the datasets but only point to the original files | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLs = { | |
"validation": "validation.jsonl", | |
"test": "test.jsonl", | |
"validation.full": "validation.jsonl", | |
"test.full": "test.jsonl", | |
# NB: the "train" split file is defined dynamically inside the `_split_generators` method | |
} | |
_VERSION = datasets.Version("1.0.0", "") | |
class OpusparcusConfig(datasets.BuilderConfig): | |
"""BuilderConfig for Opusparcus.""" | |
def __init__(self, lang=None, quality=100, **kwargs): | |
"""BuilderConfig for Wikipedia. | |
Args: | |
language: string, the language code for the Wikipedia dump to use. | |
date: string, date of the Wikipedia dump in YYYYMMDD format. A list of | |
available dates can be found at https://dumps.wikimedia.org/enwiki/. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(OpusparcusConfig, self).__init__( | |
name="{0}.{1}".format(lang, quality), | |
description="Opusparcus dataset for {0}".format(lang), | |
**kwargs, | |
) | |
self.lang = lang | |
self.quality = quality | |
LANGS = [ "de", "en", "fi", "fr", "ru", "sv" ] | |
QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ] | |
class Opusparcus(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
BUILDER_CONFIG_CLASS = OpusparcusConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
OpusparcusConfig(lang=lang, quality=quality, version=_VERSION) for lang in LANGS for quality in QUALITIES | |
] | |
# There is no default configuration. User always needs to specify one: | |
#DEFAULT_CONFIG_NAME = None | |
def _info(self): | |
# This method specifies the datasets.DatasetInfo object which | |
# contains informations and typings for the dataset | |
features = datasets.Features( | |
{ | |
"lang": datasets.Value("string"), | |
"sent1": datasets.Value("string"), | |
"sent2": datasets.Value("string"), | |
"annot_score": datasets.Value("float"), | |
"gem_id": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset: | |
supervised_keys=("sent1", "sent2"), # is this correct? | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
# This method is tasked with downloading/extracting the data | |
# and defining the splits depending on the configuration. | |
# Several configurations are possible (listed in | |
# BUILDER_CONFIGS), and the configuration selected by the user | |
# is in self.config.name, which consists of two fields | |
# separated by a period, containing the values of | |
# self.config.lang and self.config.quality. | |
if lang is None: | |
# This is an error, nothing to do here | |
return [] | |
# Select which file of the training data contains the matching data: | |
if self.config.quality < 70: | |
# We need to retrieve the largest training set file | |
# containing the full training set for the desired language | |
_URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang) | |
elif self.config.quality <= 95: | |
# We can do with a smaller version of the training set | |
# for the desired language | |
_URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang) | |
# Otherwise, if the desired quality is above 95, we do not | |
# download any training data, because there is no matching data. | |
# The validation and test sets are so small that we do not perform | |
# any filtering or optimization at this stage. | |
# dl_manager is a datasets.download.DownloadManager, which | |
# downloads and extracts the URLs | |
# (It can accept any type or nested list/dict and will give | |
# back the same structure with the url replaced with path to | |
# local files. By default the archives will be extracted and | |
# a path to a cached folder where they are extracted is | |
# returned instead of the archive.) | |
data_dir = dl_manager.download_and_extract(_URLs) | |
splits = [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"lang": self.config.lang, | |
"quality": 100, | |
"filepath": data_dir["test"], | |
"split": "test" | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"lang": self.config.lang, | |
"quality": 100, | |
"filepath": data_dir["validation"], | |
"split": "validation", | |
}, | |
), | |
datasets.SplitGenerator( | |
name="test.full", | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"lang": self.config.lang, | |
"quality": 100, | |
"filepath": data_dir["test.full"], | |
"split": "test.full" | |
}, | |
), | |
datasets.SplitGenerator( | |
name="validation.full", | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"lang": self.config.lang, | |
"quality": 100, | |
"filepath": data_dir["validation.full"], | |
"split": "validation.full", | |
}, | |
), | |
] | |
# If the desired quality value is 100, no subset of the | |
# training set is good enough, and we only produce validation | |
# and test sets, in order to save space and time. | |
if self.config.quality <= 95: | |
# In this case there is matching training data, so we produce | |
# a train split. | |
splits.append( | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"lang": self.config.lang, | |
"quality": self.config.quality, | |
"filepath": data_dir["train"], | |
"split": "train", | |
}, | |
) | |
) | |
return splits | |
def _generate_examples( | |
self, lang, quality, filepath, split | |
# method parameters are unpacked from `gen_kwargs` as given in | |
# `_split_generators` | |
): | |
""" Yields examples as (key, example) tuples. """ | |
# This method handles input defined in _split_generators to | |
# yield (key, example) tuples from the dataset. | |
# The `key` is here for legacy reason (tfds) and is not important in itself. | |
if split == datasets.Split.TRAIN: | |
# Training sets are in compressed bz2 files. | |
# They contain a field "quality" missing from the validation and test sets. | |
# We also know that this file only contains the desired language, | |
# because for the training sets the languages are in separate | |
# files, and only the desired language has been downloaded. | |
with bz2.open(filepath, "rt", encoding="utf-8") as f: | |
for id_, row in enumerate(f): | |
data = json.loads(row) | |
if data["quality"] < quality: | |
# The rest of this file contains too low quality data, | |
# because the data is sorted best first | |
break | |
yield id_, { | |
"lang": data["lang"], | |
"sent1": data["sent1"], | |
"sent2": data["sent2"], | |
"annot_score": 0.0, # means there is no annotation | |
"gem_id": data["gem_id"], | |
} | |
else: | |
# The validation and test sets are in jsonl files. | |
# They contain the fields "lang" and "annot_score" that we filter on. | |
# If we ask for the full sets, we will keep all data entries, also | |
# the sentence pairs that were not considered paraphrases by the | |
# annotators: | |
keep_all = (split == "validation.full" or split == "test.full") | |
with open(filepath, encoding="utf-8") as f: | |
for id_, row in enumerate(f): | |
data = json.loads(row) | |
if data["lang"] == lang: # only keep desired language | |
if keep_all or data["annot_score"] >= 3.0: | |
# for full sets keep all; | |
# for standard test and validation sets, keep only | |
# the paraphrases (annot_score >= 3.0 means "good | |
# or mostly good example of paraphrases") | |
yield id_, { | |
"lang": data["lang"], | |
"sent1": data["sent1"], | |
"sent2": data["sent2"], | |
"annot_score": data["annot_score"], | |
"gem_id": data["gem_id"], | |
} | |