PatClass2011 / clefip2011.py
amylonidis's picture
Upload clefip2011.py
c4a4c76 verified
raw
history blame
5.05 kB
import os
import pandas as pd
import datasets
class CLEFIP2011Config(datasets.BuilderConfig):
"""Custom Config for CLEFIP2011"""
def __init__(self, dataset_type=None, **kwargs):
super(CLEFIP2011Config, self).__init__(**kwargs)
self.dataset_type = dataset_type
class CLEFIP2011(datasets.GeneratorBasedBuilder):
"""Custom Dataset Loader"""
BUILDER_CONFIGS = [
CLEFIP2011Config(
name="bibliographic",
version=datasets.Version("1.0.0"),
description="CLEF-IP 2011 Bibliographic Data",
dataset_type="bibliographic",
),
]
def _info(self):
if self.config.dataset_type == "bibliographic":
features = datasets.Features(
{
"ucid": datasets.Value("string"),
"country": datasets.Value("string"),
"doc_number": datasets.Value("string"),
"kind": datasets.Value("string"),
"lang": datasets.Value("string"),
"corrected_lang": datasets.Value("string"),
"date": datasets.Value("string"),
"family_id": datasets.Value("string"),
"date_produced": datasets.Value("string"),
"status": datasets.Value("string"),
"ecla_list": datasets.Value("string"),
"applicant_name_list": datasets.Value("string"),
"inventor_name_list": datasets.Value("string"),
"title_de_text": datasets.Value("string"),
"title_fr_text": datasets.Value("string"),
"title_en_text": datasets.Value("string"),
"abstract_de_exist": datasets.Value("bool"),
"abstract_fr_exist": datasets.Value("bool"),
"abstract_en_exist": datasets.Value("bool"),
"description_de_exist": datasets.Value("bool"),
"description_fr_exist": datasets.Value("bool"),
"description_en_exist": datasets.Value("bool"),
"claims_de_exist": datasets.Value("bool"),
"claims_fr_exist": datasets.Value("bool"),
"claims_en_exist": datasets.Value("bool"),
}
)
return datasets.DatasetInfo(
description="CLEF-IP 2011 Bibliographic dataset.",
features=features,
supervised_keys=None,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download_and_extract(
"https://huggingface.co./datasets/amylonidis/PatClass2011/resolve/main/clefip2011_bibliographic_clean.tar.gz"
)
bibliographic_file = os.path.join(archive_path, "clefip2011_bibliographic.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": [bibliographic_file],
"split": "train",
},
),
]
def _generate_examples(self, filepaths, split):
for filepath in filepaths:
df = pd.read_csv(filepath, header=None)
column_names = [
"ucid",
"country",
"doc_number",
"kind",
"lang",
"corrected_lang",
"date",
"family_id",
"date_produced",
"status",
"ecla_list",
"applicant_name_list",
"inventor_name_list",
"title_de_text",
"title_fr_text",
"title_en_text",
"abstract_de_exist",
"abstract_fr_exist",
"abstract_en_exist",
"description_de_exist",
"description_fr_exist",
"description_en_exist",
"claims_de_exist",
"claims_fr_exist",
"claims_en_exist",
]
df.columns = column_names
df["date"] = pd.to_datetime(df["date"], format="%Y%m%d").astype(str)
df["date_produced"] = pd.to_datetime(
df["date_produced"], format="%Y%m%d"
).astype(str)
boolean_columns = [
"abstract_de_exist",
"abstract_fr_exist",
"abstract_en_exist",
"description_de_exist",
"description_fr_exist",
"description_en_exist",
"claims_de_exist",
"claims_fr_exist",
"claims_en_exist",
]
for col in boolean_columns:
df[col] = df[col].astype(bool)
for idx, row in df.iterrows():
yield idx, row.to_dict()