File size: 3,236 Bytes
02e08c9 2586e22 02e08c9 2a38672 02e08c9 2a38672 02e08c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import os
import json
import datasets
_BASE_URL = "https://huggingface.co./datasets/giulio98/person_project_easy/resolve/main/"
class SyntheticProjectConnectivityConfig(datasets.BuilderConfig):
"""BuilderConfig for SyntheticProjectConnectivity."""
def __init__(self, chunk=None, **kwargs):
"""
Args:
chunk: (str) chunk identifier (e.g., "4", "8", etc.).
**kwargs: Additional keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
self.chunk = chunk
class SyntheticProjectConnectivity(datasets.GeneratorBasedBuilder):
"""
Dataset builder for Synthetic Project Connectivity with phases phase1..phase8.
"""
BUILDER_CONFIGS = [
SyntheticProjectConnectivityConfig(
name=f"{chunks_id}_chunks",
chunk=str(chunks_id),
version=datasets.Version("1.0.0"),
description=f"Data for chunks {chunks_id}",
)
for chunks_id in [4, 8, 16, 32]
]
def _info(self):
return datasets.DatasetInfo(
description="Dataset for synthetic project connectivity phases.",
features=datasets.Features(
{
"id": datasets.Value("string"), # Unique ID field
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": [datasets.Value("string")], # List of answers
}
),
supervised_keys=None,
homepage="https://example.com",
citation="Your citation here",
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
"""
Define dataset splits using a base URL and dynamically appending phase names.
"""
chunk_str = self.config.chunk
if not chunk_str:
raise ValueError("Chunk ID is not specified in the configuration.")
# Construct the URL dynamically based on the phase
data_url = f"{_BASE_URL}{chunk_str}_chunks.jsonl"
# Download the file corresponding to the requested phase
downloaded_file = dl_manager.download_and_extract(data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_file},
)
]
def _generate_examples(self, filepath):
"""
Read lines from the JSONL file and yield examples.
"""
with open(filepath, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
line = line.strip()
if not line:
continue
try:
data = json.loads(line)
yield idx, {
"id": data["id"], # Include the unique ID field
"context": data["context"],
"question": data["question"],
"answers": data["answers"], # Use answers list
}
except json.JSONDecodeError as e:
raise ValueError(f"Error decoding JSON at line {idx} in {filepath}: {e}") |