| import os |
| import datasets |
| from datasets import DatasetBuilder, SplitGenerator, DownloadConfig, load_dataset, DownloadManager, DatasetInfo, GeneratorBasedBuilder |
| from rdflib import Graph, URIRef, Literal, BNode |
| from rdflib.namespace import RDF, RDFS, OWL, XSD, Namespace, NamespaceManager |
| from datasets.features import Features, Value |
|
|
| SCHEMA = Namespace('http://schema.org/') |
|
|
| YAGO = Namespace('http://yago-knowledge.org/resource/') |
|
|
| class YAGO45DatasetBuilder(GeneratorBasedBuilder): |
| VERSION = "1.0.1" |
|
|
| def _info(self): |
| return DatasetInfo( |
| description="A subset of the YAGO 4.5 dataset maintaining only English labels", |
| citation="@article{suchanek2023integrating,title={Integrating the Wikidata Taxonomy into YAGO},author={Suchanek, Fabian M and Alam, Mehwish and Bonald, Thomas and Paris, Pierre-Henri and Soria, Jules},journal={arXiv preprint arXiv:2308.11884},year={2023}}", |
| homepage="https://yago-knowledge.org/", |
| license="https://creativecommons.org/licenses/by-sa/3.0/", |
| features=Features({ |
| 'subject': Value('string'), |
| 'predicate': Value('string'), |
| 'object': Value('string') |
| }) |
| ) |
| |
| def _split_generators(self, dl_manager): |
| |
| |
| |
| |
| facts, taxonomy = dl_manager.download_and_extract(["facts.tar.gz", "yago-taxonomy.ttl"]) |
|
|
| facts = os.path.join(facts, "facts/") |
|
|
| |
| chunk_paths = [os.path.join(facts, chunk) for chunk in os.listdir(facts) if chunk.endswith('.nt')] + [taxonomy] |
| return [SplitGenerator(name=datasets.Split.TRAIN, |
| gen_kwargs={'chunk_paths': chunk_paths})] |
| |
| def _generate_examples(self, chunk_paths): |
| |
| |
| id_ = 0 |
| for chunk_path in chunk_paths: |
| graph = Graph(bind_namespaces="core") |
| graph.parse(chunk_path) |
| |
| |
| for (s, p, o) in graph.triples((None, None, None)): |
| yield id_, { |
| 'subject': s.n3(), |
| 'predicate': p.n3(), |
| 'object': o.n3() |
| } |
| id_ += 1 |
|
|
| from rdflib.util import from_n3 |
|
|
| def triples(features): |
| try: |
| subject_node = from_n3(features['subject']) |
| predicate_node = from_n3(features['predicate']) |
| object_node = from_n3(features['object']) |
| return (subject_node, predicate_node, object_node) |
| except Exception as e: |
| print(f"Error transforming features {features}: {e}") |
| return (None, None, None) |
|
|