# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ Data for NLI task annotated manually """ _BASE_URL = "https://huggingface.co./datasets/clarin-knext/nli_datasets/resolve/main/data/" _DATASET_NAME = [ "snli_pl", "snli_en", "wnli_pl", "wnli_en", ] _URLS = { f"{dataset}_{lang}": f"{_BASE_URL}{dataset}_nli_{lang}.jsonl" for dataset, lang in (dataset_name.split('_') for dataset_name in _DATASET_NAME) } class NLIDatasetBuilderConfig(datasets.BuilderConfig): def __init__( self, data_url: str, name: str, **kwargs, ): super(NLIDatasetBuilderConfig, self).__init__( name=name, version=datasets.Version("1.0.0"), **kwargs, ) self.name = name self.data_url = data_url if self.name not in _DATASET_NAME: raise ValueError( f"Config name `{self.name}` is not available. Enter one of: {_DATASET_NAME}" ) class NLIDataset(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ NLIDatasetBuilderConfig( name=dataset, data_url=_URLS[dataset], description=f"Dataset {dataset} with NLI annotation.", ) for dataset in _DATASET_NAME ] DEFAULT_CONFIG_NAME = "wnli_en" def _info(self) -> datasets.DatasetInfo: features = { "id": datasets.Value("int32"), "sentence_1": datasets.Value("string"), "sentence_2": datasets.Value("string"), "label": datasets.Value("string"), } if self.config.name.startswith('snli'): features["origin_label"] = datasets.Value("string") return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), supervised_keys=None, # license=_LICENSE, # citation=_CITATION, ) def _split_generators(self, dl_manager): filepath = dl_manager.download_and_extract(self.config.data_url) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": filepath, }, ), ] def _generate_examples(self, filepath: str): key_iter = 0 with open(filepath, encoding="utf-8") as f: for data in (json.loads(line) for line in f): yield key_iter, data key_iter += 1