|
import bz2 |
|
from typing import Iterator, Dict, Any, List, Optional |
|
import pandas as pd |
|
import os |
|
import hashlib |
|
import json |
|
from sklearn.model_selection import StratifiedKFold |
|
import numpy as np |
|
from multiprocessing import Pool, cpu_count |
|
from functools import partial |
|
import subprocess |
|
|
|
from .utils import ( |
|
read_jsonl_fields_fast, |
|
process_answer_types, |
|
create_stratified_subsets, |
|
subset_jsonl_file, |
|
) |
|
|
|
|
|
class CragSampler: |
|
"""Main class for handling CRAG dataset sampling operations.""" |
|
|
|
def __init__( |
|
self, |
|
input_file: str, |
|
required_fields: Optional[List[str]] = None, |
|
use_cache: bool = True, |
|
): |
|
"""Initialize CragSampler. |
|
|
|
Args: |
|
input_file: Path to input JSONL file (can be bz2 compressed) |
|
required_fields: List of field names to extract. If None, uses default fields |
|
use_cache: Whether to use/create cache file |
|
""" |
|
self.input_file = input_file |
|
self.required_fields = required_fields or [ |
|
"domain", |
|
"answer", |
|
"question_type", |
|
"static_or_dynamic", |
|
] |
|
self.use_cache = use_cache |
|
self.df = self._load_data() |
|
|
|
def _load_data(self) -> pd.DataFrame: |
|
"""Load and process data from JSONL file.""" |
|
df = read_jsonl_fields_fast( |
|
self.input_file, self.required_fields, self.use_cache |
|
) |
|
return process_answer_types(df) |
|
|
|
def create_subsets( |
|
self, |
|
n_subsets: int = 5, |
|
stratify_columns: Optional[List[str]] = None, |
|
output_path: Optional[str] = None, |
|
force_compute: bool = False, |
|
) -> Dict: |
|
"""Create stratified subsets of the dataset. |
|
|
|
Args: |
|
n_subsets: Number of subsets to create |
|
stratify_columns: Columns to use for stratification. If None, uses defaults |
|
output_path: Path to save/load the JSON output |
|
force_compute: If True, always compute subsets even if file exists |
|
|
|
Returns: |
|
Dictionary containing the subsets information |
|
""" |
|
if stratify_columns is None: |
|
stratify_columns = [ |
|
"domain", |
|
"answer_type", |
|
"question_type", |
|
"static_or_dynamic", |
|
] |
|
|
|
if output_path is None: |
|
output_path = os.path.join( |
|
os.path.dirname(self.input_file), |
|
f"{os.path.splitext(os.path.basename(self.input_file))[0]}_subsets.json", |
|
) |
|
|
|
return create_stratified_subsets( |
|
self.df, |
|
n_subsets=n_subsets, |
|
stratify_columns=stratify_columns, |
|
output_path=output_path, |
|
force_compute=force_compute, |
|
) |
|
|
|
def write_subsets( |
|
self, |
|
subsets_file: str, |
|
output_dir: Optional[str] = None, |
|
compress: bool = True, |
|
n_processes: Optional[int] = None, |
|
overwrite: bool = False, |
|
) -> None: |
|
"""Write subsets to separate files. |
|
|
|
Args: |
|
subsets_file: Path to JSON file containing subset indices |
|
output_dir: Directory to save subset files |
|
compress: Whether to compress output files with bz2 |
|
n_processes: Number of processes to use |
|
overwrite: If False, skip existing output files |
|
""" |
|
subset_jsonl_file( |
|
self.input_file, |
|
subsets_file, |
|
output_dir=output_dir, |
|
compress=compress, |
|
n_processes=n_processes, |
|
overwrite=overwrite, |
|
) |
|
|