import json import datasets _DESCRIPTION = """\ This dataset determines whether a GitHub repository description relates to Japanese natural language processing (NLP). The labels are categorized as "Relevant (1)" and "Not Relevant (0)". """ _HOMEPAGE = "https://github.com/taishi-i/awesome-japanese-nlp-resources" _CITATION = "" _LICENSE = "other" class NagisaStopwordsDataset(datasets.GeneratorBasedBuilder): """awesome-japanese-nlp-classification-dataset.""" VERSION = datasets.Version("0.0.1") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="awesome-japanese-nlp-classification-dataset", version=VERSION, description=_DESCRIPTION, ), ] def _info(self): features = datasets.Features( { "label": datasets.features.ClassLabel(names=["0", "1"]), "text": datasets.Value("string"), "url": datasets.Value("string"), "created_at": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): data_url = "https://huggingface.co./datasets/taishi-i/awesome-japanese-nlp-classification-dataset/raw/main" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": dl_manager.download_and_extract( f"{data_url}/train.json" ), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": dl_manager.download_and_extract( f"{data_url}/val.json" ), "split": "val", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": dl_manager.download_and_extract( f"{data_url}/test.json" ), "split": "test", }, ), ] def _generate_examples(self, filepath, split): """Generates examples.""" with open(filepath, "r") as file: data = json.load(file) for id_, row in enumerate(data): yield id_, { "label": row["label"], "text": row["text"], "url": row["url"], "created_at": row["created_at"], }