# coding=utf-8 # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The European Clinical Case Corpus (E3C) project aims at collecting and \ annotating a large corpus of clinical documents in five European languages (Spanish, \ Basque, English, French and Italian), which will be freely distributed. Annotations \ include temporal information, to allow temporal reasoning on chronologies, and \ information about clinical entities based on medical taxonomies, to be used for semantic reasoning. """ import json import os import xml.etree.ElementTree as et from typing import Dict, Iterator, List, Tuple import datasets from .bigbiohub import BigBioConfig, Tasks _LOCAL = True _CITATION = """\ @report{Magnini2021, author = {Bernardo Magnini and BegoƱa Altuna and Alberto Lavelli and Manuela Speranza and Roberto Zanoli and Fondazione Bruno Kessler}, keywords = {Clinical data,clinical enti-ties,corpus,multilingual,temporal information}, title = {The E3C Project: European Clinical Case Corpus El proyecto E3C: European Clinical Case Corpus}, url = {https://uts.nlm.nih.gov/uts/umls/home}, year = {2021}, } """ _DATASETNAME = "e3c" _DESCRIPTION = """\ The European Clinical Case Corpus (E3C) project aims at collecting and \ annotating a large corpus of clinical documents in five European languages (Spanish, \ Basque, English, French and Italian), which will be freely distributed. Annotations \ include temporal information, to allow temporal reasoning on chronologies, and \ information about clinical entities based on medical taxonomies, to be used for semantic reasoning. """ _HOMEPAGE = "https://github.com/hltfbk/E3C-Corpus" _LICENSE = "" _URLS = { _DATASETNAME: "https://github.com/hltfbk/E3C-Corpus/archive/refs/tags/v2.0.0.zip", } _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION] _SOURCE_VERSION = "2.0.0" _BIGBIO_VERSION = "1.0.0" class E3cDataset(datasets.GeneratorBasedBuilder): """The European Clinical Case Corpus (E3C) is a multilingual corpus of clinical documents. The corpus is annotated with clinical entities and temporal information. The corpus is available in five languages: Spanish, Basque, English, French and Italian. """ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION) BUILDER_CONFIGS = [ BigBioConfig( name=f"{_DATASETNAME}_source", version=SOURCE_VERSION, description=f"{_DATASETNAME} source schema", schema="source", subset_id=_DATASETNAME, ), ] DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source" def _info(self) -> datasets.DatasetInfo: # You can arbitrarily nest lists and dictionaries. # For iterables, use lists over tuples or `datasets.Sequence` features = datasets.Features( { "id": datasets.Value("string"), "document_id": datasets.Value("int32"), "text": datasets.Value("string"), "passages": [ { "id": datasets.Value("string"), "text": datasets.Value("string"), "offsets": [datasets.Value("int32")], } ], "entities": [ { "id": datasets.Value("string"), "type": datasets.Value("string"), "text": datasets.Value("string"), "offsets": [datasets.Value("int32")], "semantic_type_id": datasets.Value("string"), "role": datasets.Value("string"), } ], "relations": [ { "id": datasets.Value("string"), "type": datasets.Value("string"), "contextualAspect": datasets.Value("string"), "contextualModality": datasets.Value("string"), "degree": datasets.Value("string"), "docTimeRel": datasets.Value("string"), "eventType": datasets.Value("string"), "permanence": datasets.Value("string"), "polarity": datasets.Value("string"), "functionInDocument": datasets.Value("string"), "timex3Class": datasets.Value("string"), "value": datasets.Value("string"), "concept_1": datasets.Value("string"), "concept_2": datasets.Value("string"), } ], } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]: """Returns SplitGenerators.""" urls = _URLS[_DATASETNAME] data_dir = dl_manager.download_and_extract(urls) paths = { "en.layer1": "data_annotation/English/layer1", "en.layer2": "data_annotation/English/layer2", "en.layer2.validation": "data_validation/English/layer2", "en.layer3": "data_collection/English/layer3", "es.layer1": "data_annotation/Spanish/layer1", "es.layer2": "data_annotation/Spanish/layer2", "es.layer2.validation": "data_validation/Spanish/layer2", "es.layer3": "data_collection/Spanish/layer3", "eu.layer1": "data_annotation/Basque/layer1", "eu.layer2": "data_annotation/Basque/layer2", "eu.layer2.validation": "data_validation/Basque/layer2", "eu.layer3": "data_collection/Basque/layer3", "fr.layer1": "data_annotation/French/layer1", "fr.layer2": "data_annotation/French/layer2", "fr.layer2.validation": "data_validation/French/layer2", "fr.layer3": "data_collection/French/layer3", "it.layer1": "data_annotation/Italian/layer1", "it.layer2": "data_annotation/Italian/layer2", "it.layer2.validation": "data_validation/Italian/layer2", "it.layer3": "data_collection/Italian/layer3", } return [ datasets.SplitGenerator( name=split, # Whatever you put in gen_kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0", path), "split": "train", }, ) for split, path in paths.items() ] def _generate_examples(self, filepath, split: str) -> Iterator[Tuple[int, Dict]]: """Yields examples as (key, example) tuples.""" guid = 0 for folder, _, files in os.walk(filepath): for file in files: with open(f"{folder}/{file}") as document: if "layer3" not in folder: root = et.fromstring(document.read()) annotations: dict = {} for child in root: annotations.setdefault(child.tag, []).append( child.attrib | {"type": child.tag.split("}")[1]} ) text = annotations["{http:///uima/cas.ecore}Sofa"][0]["sofaString"] links = { link["{http://www.omg.org/XMI}id"]: link for link in [ *annotations.get( "{http:///webanno/custom.ecore}EVENTTLINKLink", [] ), *annotations.get( "{http:///webanno/custom.ecore}RMLPERTAINSTOLink", [] ), *annotations.get( "{http:///webanno/custom.ecore}TIMEX3TimexLinkLink", [] ), ] } joined_relations = [] for relation in [ *annotations.get("{http:///webanno/custom.ecore}EVENT", []), *annotations.get("{http:///webanno/custom.ecore}TIMEX3", []), *annotations.get("{http:///webanno/custom.ecore}RML", []), ]: link_ids = [] if "TLINK" in relation.keys(): link_ids = relation["TLINK"].split(" ") elif "PERTAINSTO" in relation.keys(): link_ids = relation["PERTAINSTO"].split(" ") elif "timexLink" in relation.keys(): link_ids = relation["timexLink"].split(" ") elif not link_ids: joined_relations.append( relation | {"source": relation["{http://www.omg.org/XMI}id"]} ) if link_ids != [""]: for link_id in link_ids: joined_relations.append( relation | links[link_id] | {"source": relation["{http://www.omg.org/XMI}id"]} ) yield guid, { "id": "e3c", "document_id": guid, "text": text, "passages": [ { "text": text[int(sentence["begin"]) : int(sentence["end"])], "id": sentence["{http://www.omg.org/XMI}id"], "offsets": [int(sentence["begin"]), int(sentence["end"])], } for sentence in annotations[ "{http:///de/tudarmstadt/ukp/dkpro/core" "/api/segmentation/type.ecore}Sentence" ] ], "entities": [ { "text": text[int(annotation["begin"]) : int(annotation["end"])], "offsets": [int(annotation["begin"]), int(annotation["end"])], "id": annotation["{http://www.omg.org/XMI}id"], "semantic_type_id": annotation.get("entityID", ""), "role": annotation.get("role", ""), "type": annotation.get("type"), } for annotation in [ *annotations.get("{http:///webanno/custom.ecore}EVENT", []), *annotations.get( "{http:///webanno/custom.ecore}CLINENTITY", [] ), *annotations.get("{http:///webanno/custom.ecore}BODYPART", []), *annotations.get("{http:///webanno/custom.ecore}ACTOR", []), *annotations.get("{http:///webanno/custom.ecore}RML", []), *annotations.get("{http:///webanno/custom.ecore}TIMEX3", []), ] ], "relations": [ { "id": relation["{http://www.omg.org/XMI}id"], "type": relation.get("type"), "contextualAspect": relation.get("contextualAspect", ""), "contextualModality": relation.get("contextualModality", ""), "degree": relation.get("degree", ""), "docTimeRel": relation.get("docTimeRel", ""), "eventType": relation.get("eventType", ""), "permanence": relation.get("permanence", ""), "polarity": relation.get("polarity", ""), "functionInDocument": relation.get("functionInDocument", ""), "timex3Class": relation.get("timex3Class", ""), "value": relation.get("value", ""), "concept_1": relation.get("source"), "concept_2": relation.get("target", ""), } for relation in joined_relations ], } else: unannotated_text = json.load(document) yield guid, { "id": "e3c", "document_id": guid, "text": unannotated_text["text"], "passages": [], "entities": [], "relations": [], } guid += 1