s2orc_small / generate_split.py
leminda-ai's picture
generation file
2fd4750
raw
history blame contribute delete
740 Bytes
from datasets import load_dataset
dataset = load_dataset("allenai/s2orc",
split="train[:1%]",
num_proc=20)
import spacy
import spacy_fastlang
nlp = spacy.load("en_core_web_sm")
nlp.disable_pipes(nlp.pipe_names)
nlp.add_pipe("language_detector")
def has_abstract(example):
if "paperAbstract" in example.keys() and example["paperAbstract"] is not None \
and len(example["paperAbstract"].split())>5:
doc = nlp(example["paperAbstract"])
if doc._.language == 'en' and doc._.language_score >= 0.8:
return True
return False
dataset_sub = dataset.filter(has_abstract)
dataset_sub.push_to_hub("leminda-ai/s2orc_small",split='train',token='XXXXXXXXXXXX')