Datasets:
File size: 2,903 Bytes
52d5f6c 4eca7c8 52d5f6c 25a3f03 e0dd3d8 52d5f6c 4eca7c8 52d5f6c e0dd3d8 52d5f6c 4eca7c8 ce3ecb3 4eca7c8 ce3ecb3 52d5f6c ce3ecb3 52d5f6c ce3ecb3 52d5f6c 25a3f03 ce3ecb3 52d5f6c ce3ecb3 52d5f6c ce3ecb3 a322b20 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import datasets
from datasets import DatasetBuilder, SplitGenerator, DownloadConfig, load_dataset, DownloadManager, DatasetInfo, GeneratorBasedBuilder
from rdflib import Graph, URIRef, Literal, BNode
from rdflib.namespace import RDF, RDFS, OWL, XSD, Namespace, NamespaceManager
from datasets.features import Features, Value
SCHEMA = Namespace('http://schema.org/')
YAGO = Namespace('http://yago-knowledge.org/resource/')
class YAGO45DatasetBuilder(GeneratorBasedBuilder):
VERSION = "1.0.0"
def _info(self):
return DatasetInfo(
description="A subset of the YAGO 4.5 dataset maintaining only English labels",
citation="@article{suchanek2023integrating,title={Integrating the Wikidata Taxonomy into YAGO},author={Suchanek, Fabian M and Alam, Mehwish and Bonald, Thomas and Paris, Pierre-Henri and Soria, Jules},journal={arXiv preprint arXiv:2308.11884},year={2023}}",
homepage="https://yago-knowledge.org/",
license="https://creativecommons.org/licenses/by-sa/3.0/",
features=Features({
'subject': Value('string'),
'predicate': Value('string'),
'object': Value('string')
})
)
def _split_generators(self, dl_manager):
# Download and extract the dataset
# Define splits for each chunk of your dataset.
# Download and extract the dataset files
facts, taxonomy = dl_manager.download_and_extract(["facts.tar.gz", "yago-taxonomy.ttl"])
facts = os.path.join(facts, "tmp/yago/")
# Define splits for each chunk of your dataset.
chunk_paths = [os.path.join(facts, chunk) for chunk in os.listdir(facts) if chunk.endswith('.nt')]
return [SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={'chunk_paths': chunk_paths})]
def _generate_examples(self, chunk_paths):
# Load the chunks into an rdflib graph
# Yield individual triples from the graph
id_ = 0
for chunk_path in chunk_paths:
graph = Graph(bind_namespaces="core")
graph.parse(chunk_path)
# Yield individual triples from the graph as N3
for (s, p, o) in graph.triples((None, None, None)):
yield id_, {
'subject': s.n3(),
'predicate': p.n3(),
'object': o.n3()
}
id_ += 1
from rdflib.util import from_n3
def triples(features):
try:
subject_node = from_n3(features['subject'])
predicate_node = from_n3(features['predicate'])
object_node = from_n3(features['object'])
return (subject_node, predicate_node, object_node)
except Exception as e:
print(f"Error transforming features {features}: {e}")
return (None, None, None)
|