import datasets logger = datasets.logging.get_logger(__name__) _HOMEPAGE = "https://www.google.com" _URL = f'https://huggingface.co./datasets/chintagunta85/pv_dataset/raw/main/' _TRAINING_FILE = "pv_train.tsv" _DEV_FILE = "pv_val.tsv" _TEST_FILE = "pv_test.tsv" class PVDatasetConfig(datasets.BuilderConfig): """BuilderConfig for Bc2gmCorpus""" def __init__(self, **kwargs): """BuilderConfig for Bc2gmCorpus. Args: **kwargs: keyword arguments forwarded to super. """ super(PVDatasetConfig, self).__init__(**kwargs) class PVDataset(datasets.GeneratorBasedBuilder): """Bc2gmCorpus dataset.""" BUILDER_CONFIGS = [ PVDatasetConfig(name="PVDatasetCorpus", version=datasets.Version("1.0.0"), description="PVDataset"), ] def _info(self): custom_names = ['O','B-GENE','I-GENE','B-CHEMICAL','I-CHEMICAL','B-DISEASE','I-DISEASE','B-SPECIES', 'I-SPECIES'] return datasets.DatasetInfo( description='abhi', features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=custom_names ) ), } ), supervised_keys=None, homepage=_HOMEPAGE, citation='cite me', ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { "train": f"{_URL}{_TRAINING_FILE}", "dev": f"{_URL}{_DEV_FILE}", "test": f"{_URL}{_TEST_FILE}", } downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath): shift = 0 logger.info("⏳ Generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: guid = 0 tokens = [] ner_tags = [] for line in f: if line == "" or line == "\n": if tokens: yield guid, { "id": str(guid), "tokens": tokens, "ner_tags": ner_tags, } guid += 1 tokens = [] ner_tags = [] else: # tokens are tab separated splits = line.split("\t") tokens.append(splits[0]) if(splits[1].rstrip()=="B"): ner_tags.append("B-SPECIES") elif(splits[1].rstrip()=="I"): ner_tags.append("I-SPECIES") elif(splits[1].rstrip()=="B-Chemical"): ner_tags.append("B-CHEMICAL") elif(splits[1].rstrip()=="I-Chemical"): ner_tags.append("I-CHEMICAL") elif(splits[1].rstrip()=="B-Disease"): ner_tags.append("B-DISEASE") elif(splits[1].rstrip()=="I-Disease"): ner_tags.append("I-DISEASE") else: ner_tags.append(splits[1].rstrip()) # last example yield guid, { "id": str(guid), "tokens": tokens, "ner_tags": ner_tags, }