Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
entity-linking-classification
Languages:
English
Size:
< 1K
License:
# I am trying to understand to the following code. Do not use this for any purpose as I do not support this. | |
# Use the original source from https://huggingface.co./datasets/DFKI-SLT/science_ie/raw/main/science_ie.py | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Semeval2018Task7 is a dataset that describes the first task on semantic relation extraction and classification in scientific paper abstracts""" | |
import glob | |
import datasets | |
import xml.dom.minidom | |
import xml.etree.ElementTree as ET | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@inproceedings{gabor-etal-2018-semeval, | |
title = "{S}em{E}val-2018 Task 7: Semantic Relation Extraction and Classification in Scientific Papers", | |
author = {G{\'a}bor, Kata and | |
Buscaldi, Davide and | |
Schumann, Anne-Kathrin and | |
QasemiZadeh, Behrang and | |
Zargayouna, Ha{\"\i}fa and | |
Charnois, Thierry}, | |
booktitle = "Proceedings of the 12th International Workshop on Semantic Evaluation", | |
month = jun, | |
year = "2018", | |
address = "New Orleans, Louisiana", | |
publisher = "Association for Computational Linguistics", | |
url = "https://aclanthology.org/S18-1111", | |
doi = "10.18653/v1/S18-1111", | |
pages = "679--688", | |
abstract = "This paper describes the first task on semantic relation extraction and classification in | |
scientific paper abstracts at SemEval 2018. The challenge focuses on domain-specific semantic relations | |
and includes three different subtasks. The subtasks were designed so as to compare and quantify the | |
effect of different pre-processing steps on the relation classification results. We expect the task to | |
be relevant for a broad range of researchers working on extracting specialized knowledge from domain | |
corpora, for example but not limited to scientific or bio-medical information extraction. The task | |
attracted a total of 32 participants, with 158 submissions across different scenarios.", | |
} | |
""" | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This paper describes the first task on semantic relation extraction and classification in scientific paper | |
abstracts at SemEval 2018. The challenge focuses on domain-specific semantic relations and includes three | |
different subtasks. The subtasks were designed so as to compare and quantify the effect of different | |
pre-processing steps on the relation classification results. We expect the task to be relevant for a broad | |
range of researchers working on extracting specialized knowledge from domain corpora, for example but not | |
limited to scientific or bio-medical information extraction. The task attracted a total of 32 participants, | |
with 158 submissions across different scenarios. | |
""" | |
# Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "https://github.com/gkata/SemEval2018Task7/tree/testing" | |
# Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
"Subtask_1_1": { | |
"train": { | |
"relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.relations.txt", | |
"text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.text.xml", | |
}, | |
"test": { | |
"relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.test.relations.txt", | |
"text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.test.text.xml", | |
}, | |
}, | |
"Subtask_1_2": { | |
"train": { | |
"relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.relations.txt", | |
"text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.text.xml", | |
}, | |
"test": { | |
"relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.test.relations.txt", | |
"text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.test.text.xml", | |
}, | |
}, | |
} | |
def all_text_nodes(root): | |
if root.text is not None: | |
yield root.text | |
for child in root: | |
if child.tail is not None: | |
yield child.tail | |
def reading_entity_data(ET_data_to_convert): | |
parsed_data = ET.tostring(ET_data_to_convert,"utf-8") | |
parsed_data= parsed_data.decode('utf8').replace("b\'","") | |
parsed_data= parsed_data.replace("<abstract>","") | |
parsed_data= parsed_data.replace("</abstract>","") | |
parsed_data= parsed_data.replace("<title>","") | |
parsed_data= parsed_data.replace("</title>","") | |
parsed_data = parsed_data.replace("\n\n\n","") | |
parsing_tag = False | |
final_string = "" | |
tag_string= "" | |
current_tag_id = "" | |
current_tag_starting_pos = 0 | |
current_tag_ending_pos= 0 | |
entity_mapping_list=[] | |
for i in parsed_data: | |
if i=='<': | |
parsing_tag = True | |
if current_tag_id!="": | |
current_tag_ending_pos = len(final_string)-1 | |
entity_mapping_list.append({"id":current_tag_id, | |
"char_start":current_tag_starting_pos, | |
"char_end":current_tag_ending_pos+1}) | |
current_tag_id= "" | |
tag_string="" | |
elif i=='>': | |
parsing_tag = False | |
tag_string_split = tag_string.split('"') | |
if len(tag_string_split)>1: | |
current_tag_id= tag_string.split('"')[1] | |
current_tag_starting_pos = len(final_string) | |
else: | |
if parsing_tag!=True: | |
final_string = final_string + i | |
else: | |
tag_string = tag_string + i | |
return {"text_data":final_string, "entities":entity_mapping_list} | |
class Semeval2018Task7(datasets.GeneratorBasedBuilder): | |
""" | |
Semeval2018Task7 is a dataset for semantic relation extraction and classification in scientific paper abstracts | |
""" | |
VERSION = datasets.Version("1.1.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="Subtask_1_1", version=VERSION, | |
description="Relation classification on clean data"), | |
datasets.BuilderConfig(name="Subtask_1_2", version=VERSION, | |
description="Relation classification on noisy data"), | |
] | |
DEFAULT_CONFIG_NAME = "Subtask_1_1" | |
def _info(self): | |
class_labels = ["","USAGE", "RESULT", "MODEL-FEATURE", "PART_WHOLE", "TOPIC", "COMPARE"] | |
features = datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"title": datasets.Value("string"), | |
"abstract": datasets.Value("string"), | |
"entities": [ | |
{ | |
"id": datasets.Value("string"), | |
"char_start": datasets.Value("int32"), | |
"char_end": datasets.Value("int32") | |
} | |
], | |
"relation": [ | |
{ | |
"label": datasets.ClassLabel(names=class_labels), | |
"arg1": datasets.Value("string"), | |
"arg2": datasets.Value("string"), | |
"reverse": datasets.Value("bool") | |
} | |
] | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
urls = _URLS[self.config.name] | |
downloaded_files = dl_manager.download(urls) | |
print(downloaded_files) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"relation_filepath": downloaded_files['train']["relations"], | |
"text_filepath": downloaded_files['train']["text"], | |
} | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"relation_filepath": downloaded_files['test']["relations"], | |
"text_filepath": downloaded_files['test']["text"], | |
} | |
)] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, relation_filepath, text_filepath): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
with open(relation_filepath, encoding="utf-8") as f: | |
relations = [] | |
text_id_to_relations_map= {} | |
for key, row in enumerate(f): | |
row_split = row.strip("\n").split("(") | |
use_case = row_split[0] | |
second_half = row_split[1].strip(")") | |
second_half_splits = second_half.split(",") | |
size = len(second_half_splits) | |
relation = { | |
"label": use_case, | |
"arg1": second_half_splits[0], | |
"arg2": second_half_splits[1], | |
"reverse": True if size == 3 else False | |
} | |
relations.append(relation) | |
arg_id = second_half_splits[0].split(".")[0] | |
if arg_id not in text_id_to_relations_map: | |
text_id_to_relations_map[arg_id] = [relation] | |
else: | |
text_id_to_relations_map[arg_id].append(relation) | |
#print("result", text_id_to_relations_map) | |
#for arg_id, values in text_id_to_relations_map.items(): | |
#print(f"ID: {arg_id}") | |
# for value in values: | |
# (value) | |
doc2 = ET.parse(text_filepath) | |
root = doc2.getroot() | |
for child in root: | |
if child.find("title")==None: | |
continue | |
text_id = child.attrib | |
#print("text_id", text_id) | |
if child.find("abstract")==None: | |
continue | |
title = child.find("title").text | |
child_abstract = child.find("abstract") | |
abstract_text_and_entities = reading_entity_data(child.find("abstract")) | |
title_text_and_entities = reading_entity_data(child.find("title")) | |
text_relations = [] | |
if text_id['id'] in text_id_to_relations_map: | |
text_relations = text_id_to_relations_map[text_id['id']] | |
yield text_id['id'], { | |
"id": text_id['id'], | |
"title": title_text_and_entities['text_data'], | |
"abstract": abstract_text_and_entities['text_data'], | |
"entities": abstract_text_and_entities['entities'] + title_text_and_entities['entities'], | |
"relation": text_relations | |
} | |