import bz2 from typing import Iterator, Dict, Any import pandas as pd import os import hashlib import json from sklearn.model_selection import StratifiedKFold import numpy as np from multiprocessing import Pool, cpu_count from functools import partial import subprocess def get_cache_path(file_path: str, required_fields: list[str]) -> str: """ Generate a unique cache file path based on input file and fields. Args: file_path: Path to the input JSONL file required_fields: List of field names to extract Returns: Path to the cache file """ # Create a unique hash based on the file path and fields fields_str = ",".join(sorted(required_fields)) hash_input = f"{fields_str}" hash_str = hashlib.md5(hash_input.encode()).hexdigest()[:10] # Get the directory of the input file base_dir = os.path.dirname(file_path) # Get filename from file path file_name = os.path.basename(file_path).split(".")[0] cache_name = f"{file_name}_cache_{hash_str}.parquet" return os.path.join(base_dir, cache_name) def read_jsonl_fields_fast( file_path: str, required_fields: list[str], use_cache: bool = True ) -> pd.DataFrame: """ Quickly extract specific fields from a compressed JSONL file using string operations. Results are cached in parquet format for faster subsequent reads. Args: file_path: Path to the JSONL file (can be bz2 compressed) required_fields: List of field names to extract from each JSON object use_cache: Whether to use/create cache file (default: True) Returns: DataFrame containing the requested fields """ cache_path = get_cache_path(file_path, required_fields) print(f"Cache path: {cache_path}") # Try to load from cache first if use_cache and os.path.exists(cache_path): return pd.read_parquet(cache_path) # If no cache exists, process the file records = [] patterns = [f'"{field}":' for field in required_fields] with bz2.open(file_path, "rt") as file: for line in file: if not line.strip(): continue result = {} for field, pattern in zip(required_fields, patterns): try: # Find the field in the line start_idx = line.find(pattern) if start_idx == -1: continue # Move to the start of the value start_idx += len(pattern) while start_idx < len(line) and line[start_idx].isspace(): start_idx += 1 # Handle different value types if start_idx >= len(line): continue if line[start_idx] == '"': # String value start_idx += 1 end_idx = line.find('"', start_idx) value = line[start_idx:end_idx] elif line[start_idx] == "{" or line[start_idx] == "[": # Skip nested objects/arrays continue else: # Number, boolean, or null end_idx = line.find(",", start_idx) if end_idx == -1: end_idx = line.find("}", start_idx) value = line[start_idx:end_idx].strip() # Convert to appropriate type if value == "true": value = True elif value == "false": value = False elif value == "null": value = None else: try: value = float(value) if "." in value else int(value) except ValueError: continue result[field] = value except Exception: continue if result: records.append(result) # Convert to DataFrame df = pd.DataFrame.from_records(records) # Convert columns to appropriate types for col in df.columns: # If the column contains any strings, convert the whole column to strings if ( df[col].dtype == object and df[col].apply(lambda x: isinstance(x, str)).any() ): df[col] = df[col].astype(str) # You can add more type conversions here if needed # Save cache if enabled if use_cache: df.to_parquet(cache_path) return df def process_answer_types(df: pd.DataFrame) -> pd.DataFrame: """ Process the answer field to create a new answer_type field. Args: df: Input DataFrame with 'answer' column Returns: DataFrame with new 'answer_type' column """ # Create a copy to avoid modifying the original df = df.copy() # Print unique answers to debug print("Unique answers in dataset:") print(df["answer"].unique()) # Create answer_type column with case-insensitive matching conditions = [ df["answer"].str.lower() == "invalid question", df["answer"].str.lower() == "i don't know", # Try exact match ] choices = ["invalid", "no_answer"] df["answer_type"] = np.select(conditions, choices, default="valid") # Print distribution to verify print("\nAnswer type distribution:") print(df["answer_type"].value_counts()) return df def create_stratified_subsamples( df: pd.DataFrame, n_subsamples: int, stratify_columns: list[str] = [ "domain", "answer_type", "question_type", "static_or_dynamic", ], output_path: str = "subsamples.json", force_compute: bool = False, ) -> dict: """ Create stratified subsamples of the dataset and save them to a JSON file. Each subsample gets a unique ID based on its indices. Args: df: Input DataFrame n_subsamples: Number of subsamples to create stratify_columns: Columns to use for stratification output_path: Path to save/load the JSON output force_compute: If True, always compute subsamples even if file exists Returns: Dictionary containing the subsamples information """ # Check if file exists and we can use it if not force_compute and os.path.exists(output_path): try: with open(output_path, "r") as f: subsamples_data = json.load(f) # Validate the loaded data has the expected structure if ( subsamples_data.get("metadata", {}).get("n_subsamples") == n_subsamples and subsamples_data.get("metadata", {}).get("stratify_columns") == stratify_columns ): print(f"Loading existing subsamples from {output_path}") return subsamples_data else: print( "Existing subsamples file has different parameters, recomputing..." ) except Exception as e: print(f"Error loading existing subsamples file: {e}, recomputing...") # Create a combined category for stratification df["strat_category"] = df[stratify_columns].astype(str).agg("_".join, axis=1) # Initialize the subsampleter skf = StratifiedKFold(n_splits=n_subsamples, shuffle=True, random_state=42) # Create subsamples subsamples_info = [] for subsample_idx, (_, subsample_indices) in enumerate( skf.split(df, df["strat_category"]) ): # Sort indices for consistent hashing sorted_indices = sorted(subsample_indices.tolist()) # Create a deterministic ID from the indices subsample_id = hashlib.md5(str(sorted_indices).encode()).hexdigest()[:8] # Calculate statistics for this subsample stats = {} subsample_df = df.iloc[subsample_indices] for col in stratify_columns: stats[col] = subsample_df[col].value_counts().to_dict() subsamples_info.append( { "id": subsample_id, "statistics": stats, "indices": sorted_indices, "size": len(subsample_indices), } ) # Add global statistics global_stats = {} for col in stratify_columns: global_stats[col] = df[col].value_counts().to_dict() output_data = { "metadata": { "n_subsamples": n_subsamples, "total_samples": len(df), "stratify_columns": stratify_columns, "global_statistics": global_stats, }, "subsamples": subsamples_info, } # Save to JSON with open(output_path, "w") as f: json.dump(output_data, f, indent=2) return output_data def write_subsample( input_file: str, indices: list[int], output_file: str, compress: bool = True ) -> None: """ Write a single subsample to a file using awk. Args: input_file: Path to input JSONL file indices: List of indices to extract output_file: Path to output file compress: Whether to compress output """ # Convert indices to awk condition # NR is the current line number in awk indices_set = set(i + 1 for i in indices) # Convert to 1-based indexing indices_str = ",".join(str(i) for i in sorted(indices_set)) # Create awk script with escaped curly braces awk_script = ( f'BEGIN {{subsample("{indices_str}",a,","); for(i in a) n[a[i]];}} NR in n' ) if input_file.endswith(".bz2"): if compress: cmd = f"bzcat '{input_file}' | awk '{awk_script}' | bzip2 > '{output_file}'" else: cmd = f"bzcat '{input_file}' | awk '{awk_script}' > '{output_file}'" else: if compress: cmd = f"awk '{awk_script}' '{input_file}' | bzip2 > '{output_file}'" else: cmd = f"awk '{awk_script}' '{input_file}' > '{output_file}'" print(f"Process {os.getpid()} - Starting subsample to {output_file}") try: result = subprocess.run( cmd, shell=True, check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, text=True, ) print(f"Process {os.getpid()} - Finished subsample to {output_file}") # Verify the output file exists and has content if os.path.exists(output_file) and os.path.getsize(output_file) > 0: print( f"Process {os.getpid()} - Successfully created {output_file} ({os.path.getsize(output_file)} bytes)" ) else: raise Exception(f"Output file {output_file} is empty or doesn't exist") except subprocess.CalledProcessError as e: print(f"Error executing command: {e.stderr}") print(f"Command output: {e.stdout}") raise except Exception as e: print(f"Error: {str(e)}") raise def subsample_jsonl_file( input_file: str, subsamples_file: str, output_dir: str = None, compress: bool = True, n_processes: int = None, overwrite: bool = False, ) -> None: """ subsample a large JSONL file into multiple files using sed for maximum performance. Args: input_file: Path to input JSONL file (can be bz2 compressed) subsamples_file: Path to JSON file containing subsample indices output_dir: Directory to save subsample files (defaults to input file directory) compress: Whether to compress output files with bz2 n_processes: Number of processes to use (defaults to min(n_subsamples, cpu_count)) overwrite: If False, skip existing output files (default: False) """ # Load subsamples information with open(subsamples_file, "r") as f: subsamples_data = json.load(f) # Determine optimal number of processes n_subsamples = len(subsamples_data["subsamples"]) if n_processes is None: n_processes = min(n_subsamples, cpu_count()) if output_dir is None: output_dir = os.path.dirname(input_file) os.makedirs(output_dir, exist_ok=True) base_name = os.path.splitext(os.path.basename(input_file))[0] if base_name.endswith(".jsonl"): base_name = os.path.splitext(base_name)[0] # Prepare arguments for parallel processing write_args = [] skipped_files = [] for subsample in subsamples_data["subsamples"]: subsample_id = subsample["id"] output_name = f"{base_name}_subsample_{subsample_id}.jsonl" if compress: output_name += ".bz2" output_path = os.path.join(output_dir, output_name) # Skip if file exists and overwrite is False if not overwrite and os.path.exists(output_path): skipped_files.append(output_path) continue write_args.append((input_file, subsample["indices"], output_path, compress)) if skipped_files: print(f"Skipping {len(skipped_files)} existing files:") for file in skipped_files: print(f" - {file}") if write_args: print(f"Processing {len(write_args)} subsamples using {n_processes} processes") with Pool(processes=n_processes) as pool: pool.starmap(write_subsample, write_args) else: print("No files to process - all files exist and overwrite=False") def run_crag_task_1_and_2( file_path: str, fields_to_extract: list[str], n_subsamples: int = 5, output_dir: str = None, compress: bool = True, n_processes: int = None, overwrite: bool = False, ): # Load and process data df = read_jsonl_fields_fast(file_path, fields_to_extract) df = process_answer_types(df) print(df.head()) output_path = os.path.join( os.path.dirname(file_path), os.path.basename(file_path).split(".")[0] + "_subsamples.json", ) # This will load from file if it exists and parameters match subsamples_data = create_stratified_subsamples( df, n_subsamples=5, output_path=output_path ) # Example of how to read and use the subsamples with open(output_path, "r") as f: subsamples_data = json.load(f) # Print some information about the subsamples print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples") print("\nGlobal statistics:") print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2)) # Print statistics for first subsample print("\nFirst subsample statistics:") print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2)) # This will use all available CPU cores subsample_jsonl_file(file_path, output_path, compress=True) # Example usage if __name__ == "__main__": file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2" fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"] run_crag_task_1_and_2(file_path, fields_to_extract)