cogtext / cogtext.py
morteza's picture
Update cogtext.py
348c734
raw
history blame
4.22 kB
"""CogText Dataset"""
import datasets
import pandas as pd
_CITATION = """\
@misc{cogtext2022,
author = {Morteza Ansarinia and
Paul Schrater and
Pedro Cardoso-Leite},
title = {Linking Theories and Methods in Cognitive Sciences via Joint Embedding of the Scientific Literature: The Example of Cognitive Control},
year = {2022},
url = {https://arxiv.org/abs/2203.11016}
}
"""
_DESCRIPTION = """\
CogText dataset contains a collection of PubMed abstracts, along with their GPT-3 embeddings and topic embeddings.
"""
_HOMEPAGE = "https://github.com/morteza/cogtext"
_LICENSE = "CC-BY-4.0"
_URLS = [
"pubmed/abstracts.csv.gz"
]
class CogText(datasets.GeneratorBasedBuilder):
"""S collection of PubMed abstracts, along with their GPT-3 embeddings and topic embeddings."""
VERSION = datasets.Version("1.0.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
# BUILDER_CONFIGS = [
# datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset"),
# datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset"),
# ]
DEFAULT_CONFIG_NAME = "abstracts" # It's not mandatory to have a default configuration.
def _info(self):
# This method specifies the datasets.DatasetInfo object which contains information and typings for the dataset
features = datasets.Features(
{
"pmid": datasets.Value("int32"),
"doi": datasets.Value("string"),
"year": datasets.Value("int32"),
"journal_title": datasets.Value("string"),
"journal_iso_abbreviation": datasets.Value("string"),
"title": datasets.Value("string"),
"abstract": datasets.Value("string"),
"category": datasets.Value("string"),
"subcategory": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("abstract", "subcategory"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
[abstracts_path] = dl_manager.download(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split("abstracts"),
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": abstracts_path,
"split": "abstracts",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
example_df = pd.read_csv(filepath, compression="gzip")
for key, row in example_df.iterrows():
yield key, {
"pmid": row['pmid'],
"year": row['year'],
"journal_title": row['journal_title'],
"journal_iso_abbreviation": row['journal_iso_abbreviation'],
'category': row['category'],
'subcategory': row['subcategory'],
'doi': row['doi'],
'title': row['title'],
'abstract': row['abstract'],
}