|
import bz2 |
|
from typing import Iterator, Dict, Any |
|
import pandas as pd |
|
import os |
|
import hashlib |
|
import json |
|
from sklearn.model_selection import StratifiedKFold |
|
import numpy as np |
|
from multiprocessing import Pool, cpu_count |
|
from functools import partial |
|
import subprocess |
|
|
|
|
|
def get_cache_path(file_path: str, required_fields: list[str]) -> str: |
|
""" |
|
Generate a unique cache file path based on input file and fields. |
|
|
|
Args: |
|
file_path: Path to the input JSONL file |
|
required_fields: List of field names to extract |
|
|
|
Returns: |
|
Path to the cache file |
|
""" |
|
|
|
fields_str = ",".join(sorted(required_fields)) |
|
hash_input = f"{fields_str}" |
|
hash_str = hashlib.md5(hash_input.encode()).hexdigest()[:10] |
|
|
|
|
|
base_dir = os.path.dirname(file_path) |
|
|
|
file_name = os.path.basename(file_path).split(".")[0] |
|
cache_name = f"{file_name}_cache_{hash_str}.parquet" |
|
return os.path.join(base_dir, cache_name) |
|
|
|
|
|
def read_jsonl_fields_fast( |
|
file_path: str, required_fields: list[str], use_cache: bool = True |
|
) -> pd.DataFrame: |
|
""" |
|
Quickly extract specific fields from a compressed JSONL file using string operations. |
|
Results are cached in parquet format for faster subsequent reads. |
|
|
|
Args: |
|
file_path: Path to the JSONL file (can be bz2 compressed) |
|
required_fields: List of field names to extract from each JSON object |
|
use_cache: Whether to use/create cache file (default: True) |
|
|
|
Returns: |
|
DataFrame containing the requested fields |
|
""" |
|
cache_path = get_cache_path(file_path, required_fields) |
|
print(f"Cache path: {cache_path}") |
|
|
|
if use_cache and os.path.exists(cache_path): |
|
return pd.read_parquet(cache_path) |
|
|
|
|
|
records = [] |
|
patterns = [f'"{field}":' for field in required_fields] |
|
|
|
with bz2.open(file_path, "rt") as file: |
|
for line in file: |
|
if not line.strip(): |
|
continue |
|
|
|
result = {} |
|
for field, pattern in zip(required_fields, patterns): |
|
try: |
|
|
|
start_idx = line.find(pattern) |
|
if start_idx == -1: |
|
continue |
|
|
|
|
|
start_idx += len(pattern) |
|
while start_idx < len(line) and line[start_idx].isspace(): |
|
start_idx += 1 |
|
|
|
|
|
if start_idx >= len(line): |
|
continue |
|
|
|
if line[start_idx] == '"': |
|
|
|
start_idx += 1 |
|
end_idx = line.find('"', start_idx) |
|
value = line[start_idx:end_idx] |
|
elif line[start_idx] == "{" or line[start_idx] == "[": |
|
|
|
continue |
|
else: |
|
|
|
end_idx = line.find(",", start_idx) |
|
if end_idx == -1: |
|
end_idx = line.find("}", start_idx) |
|
value = line[start_idx:end_idx].strip() |
|
|
|
if value == "true": |
|
value = True |
|
elif value == "false": |
|
value = False |
|
elif value == "null": |
|
value = None |
|
else: |
|
try: |
|
value = float(value) if "." in value else int(value) |
|
except ValueError: |
|
continue |
|
|
|
result[field] = value |
|
except Exception: |
|
continue |
|
|
|
if result: |
|
records.append(result) |
|
|
|
|
|
df = pd.DataFrame.from_records(records) |
|
|
|
|
|
for col in df.columns: |
|
|
|
if ( |
|
df[col].dtype == object |
|
and df[col].apply(lambda x: isinstance(x, str)).any() |
|
): |
|
df[col] = df[col].astype(str) |
|
|
|
|
|
|
|
if use_cache: |
|
df.to_parquet(cache_path) |
|
|
|
return df |
|
|
|
|
|
def process_answer_types(df: pd.DataFrame) -> pd.DataFrame: |
|
""" |
|
Process the answer field to create a new answer_type field. |
|
|
|
Args: |
|
df: Input DataFrame with 'answer' column |
|
|
|
Returns: |
|
DataFrame with new 'answer_type' column |
|
""" |
|
|
|
df = df.copy() |
|
|
|
|
|
print("Unique answers in dataset:") |
|
print(df["answer"].unique()) |
|
|
|
|
|
conditions = [ |
|
df["answer"].str.lower() == "invalid question", |
|
df["answer"].str.lower() == "i don't know", |
|
] |
|
choices = ["invalid", "no_answer"] |
|
df["answer_type"] = np.select(conditions, choices, default="valid") |
|
|
|
|
|
print("\nAnswer type distribution:") |
|
print(df["answer_type"].value_counts()) |
|
|
|
return df |
|
|
|
|
|
def create_stratified_subsamples( |
|
df: pd.DataFrame, |
|
n_subsamples: int, |
|
stratify_columns: list[str] = [ |
|
"domain", |
|
"answer_type", |
|
"question_type", |
|
"static_or_dynamic", |
|
], |
|
output_path: str = "subsamples.json", |
|
force_compute: bool = False, |
|
) -> dict: |
|
""" |
|
Create stratified subsamples of the dataset and save them to a JSON file. |
|
Each subsample gets a unique ID based on its indices. |
|
|
|
Args: |
|
df: Input DataFrame |
|
n_subsamples: Number of subsamples to create |
|
stratify_columns: Columns to use for stratification |
|
output_path: Path to save/load the JSON output |
|
force_compute: If True, always compute subsamples even if file exists |
|
|
|
Returns: |
|
Dictionary containing the subsamples information |
|
""" |
|
|
|
if not force_compute and os.path.exists(output_path): |
|
try: |
|
with open(output_path, "r") as f: |
|
subsamples_data = json.load(f) |
|
|
|
|
|
if ( |
|
subsamples_data.get("metadata", {}).get("n_subsamples") == n_subsamples |
|
and subsamples_data.get("metadata", {}).get("stratify_columns") |
|
== stratify_columns |
|
): |
|
print(f"Loading existing subsamples from {output_path}") |
|
return subsamples_data |
|
else: |
|
print( |
|
"Existing subsamples file has different parameters, recomputing..." |
|
) |
|
except Exception as e: |
|
print(f"Error loading existing subsamples file: {e}, recomputing...") |
|
|
|
|
|
df["strat_category"] = df[stratify_columns].astype(str).agg("_".join, axis=1) |
|
|
|
|
|
skf = StratifiedKFold(n_splits=n_subsamples, shuffle=True, random_state=42) |
|
|
|
|
|
subsamples_info = [] |
|
for subsample_idx, (_, subsample_indices) in enumerate( |
|
skf.split(df, df["strat_category"]) |
|
): |
|
|
|
sorted_indices = sorted(subsample_indices.tolist()) |
|
|
|
|
|
subsample_id = hashlib.md5(str(sorted_indices).encode()).hexdigest()[:8] |
|
|
|
|
|
stats = {} |
|
subsample_df = df.iloc[subsample_indices] |
|
for col in stratify_columns: |
|
stats[col] = subsample_df[col].value_counts().to_dict() |
|
|
|
subsamples_info.append( |
|
{ |
|
"id": subsample_id, |
|
"statistics": stats, |
|
"indices": sorted_indices, |
|
"size": len(subsample_indices), |
|
} |
|
) |
|
|
|
|
|
global_stats = {} |
|
for col in stratify_columns: |
|
global_stats[col] = df[col].value_counts().to_dict() |
|
|
|
output_data = { |
|
"metadata": { |
|
"n_subsamples": n_subsamples, |
|
"total_samples": len(df), |
|
"stratify_columns": stratify_columns, |
|
"global_statistics": global_stats, |
|
}, |
|
"subsamples": subsamples_info, |
|
} |
|
|
|
|
|
with open(output_path, "w") as f: |
|
json.dump(output_data, f, indent=2) |
|
|
|
return output_data |
|
|
|
|
|
def write_subsample( |
|
input_file: str, indices: list[int], output_file: str, compress: bool = True |
|
) -> None: |
|
""" |
|
Write a single subsample to a file using awk. |
|
|
|
Args: |
|
input_file: Path to input JSONL file |
|
indices: List of indices to extract |
|
output_file: Path to output file |
|
compress: Whether to compress output |
|
""" |
|
|
|
|
|
indices_set = set(i + 1 for i in indices) |
|
indices_str = ",".join(str(i) for i in sorted(indices_set)) |
|
|
|
|
|
awk_script = ( |
|
f'BEGIN {{subsample("{indices_str}",a,","); for(i in a) n[a[i]];}} NR in n' |
|
) |
|
|
|
if input_file.endswith(".bz2"): |
|
if compress: |
|
cmd = f"bzcat '{input_file}' | awk '{awk_script}' | bzip2 > '{output_file}'" |
|
else: |
|
cmd = f"bzcat '{input_file}' | awk '{awk_script}' > '{output_file}'" |
|
else: |
|
if compress: |
|
cmd = f"awk '{awk_script}' '{input_file}' | bzip2 > '{output_file}'" |
|
else: |
|
cmd = f"awk '{awk_script}' '{input_file}' > '{output_file}'" |
|
|
|
print(f"Process {os.getpid()} - Starting subsample to {output_file}") |
|
try: |
|
result = subprocess.run( |
|
cmd, |
|
shell=True, |
|
check=True, |
|
stderr=subprocess.PIPE, |
|
stdout=subprocess.PIPE, |
|
text=True, |
|
) |
|
print(f"Process {os.getpid()} - Finished subsample to {output_file}") |
|
|
|
|
|
if os.path.exists(output_file) and os.path.getsize(output_file) > 0: |
|
print( |
|
f"Process {os.getpid()} - Successfully created {output_file} ({os.path.getsize(output_file)} bytes)" |
|
) |
|
else: |
|
raise Exception(f"Output file {output_file} is empty or doesn't exist") |
|
|
|
except subprocess.CalledProcessError as e: |
|
print(f"Error executing command: {e.stderr}") |
|
print(f"Command output: {e.stdout}") |
|
raise |
|
except Exception as e: |
|
print(f"Error: {str(e)}") |
|
raise |
|
|
|
|
|
def subsample_jsonl_file( |
|
input_file: str, |
|
subsamples_file: str, |
|
output_dir: str = None, |
|
compress: bool = True, |
|
n_processes: int = None, |
|
overwrite: bool = False, |
|
) -> None: |
|
""" |
|
subsample a large JSONL file into multiple files using sed for maximum performance. |
|
|
|
Args: |
|
input_file: Path to input JSONL file (can be bz2 compressed) |
|
subsamples_file: Path to JSON file containing subsample indices |
|
output_dir: Directory to save subsample files (defaults to input file directory) |
|
compress: Whether to compress output files with bz2 |
|
n_processes: Number of processes to use (defaults to min(n_subsamples, cpu_count)) |
|
overwrite: If False, skip existing output files (default: False) |
|
""" |
|
|
|
with open(subsamples_file, "r") as f: |
|
subsamples_data = json.load(f) |
|
|
|
|
|
n_subsamples = len(subsamples_data["subsamples"]) |
|
if n_processes is None: |
|
n_processes = min(n_subsamples, cpu_count()) |
|
|
|
if output_dir is None: |
|
output_dir = os.path.dirname(input_file) |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
base_name = os.path.splitext(os.path.basename(input_file))[0] |
|
if base_name.endswith(".jsonl"): |
|
base_name = os.path.splitext(base_name)[0] |
|
|
|
|
|
write_args = [] |
|
skipped_files = [] |
|
for subsample in subsamples_data["subsamples"]: |
|
subsample_id = subsample["id"] |
|
output_name = f"{base_name}_subsample_{subsample_id}.jsonl" |
|
if compress: |
|
output_name += ".bz2" |
|
output_path = os.path.join(output_dir, output_name) |
|
|
|
|
|
if not overwrite and os.path.exists(output_path): |
|
skipped_files.append(output_path) |
|
continue |
|
|
|
write_args.append((input_file, subsample["indices"], output_path, compress)) |
|
|
|
if skipped_files: |
|
print(f"Skipping {len(skipped_files)} existing files:") |
|
for file in skipped_files: |
|
print(f" - {file}") |
|
|
|
if write_args: |
|
print(f"Processing {len(write_args)} subsamples using {n_processes} processes") |
|
with Pool(processes=n_processes) as pool: |
|
pool.starmap(write_subsample, write_args) |
|
else: |
|
print("No files to process - all files exist and overwrite=False") |
|
|
|
|
|
def run_crag_task_1_and_2( |
|
file_path: str, |
|
fields_to_extract: list[str], |
|
n_subsamples: int = 5, |
|
output_dir: str = None, |
|
compress: bool = True, |
|
n_processes: int = None, |
|
overwrite: bool = False, |
|
): |
|
|
|
df = read_jsonl_fields_fast(file_path, fields_to_extract) |
|
df = process_answer_types(df) |
|
print(df.head()) |
|
|
|
output_path = os.path.join( |
|
os.path.dirname(file_path), |
|
os.path.basename(file_path).split(".")[0] + "_subsamples.json", |
|
) |
|
|
|
|
|
subsamples_data = create_stratified_subsamples( |
|
df, n_subsamples=5, output_path=output_path |
|
) |
|
|
|
|
|
with open(output_path, "r") as f: |
|
subsamples_data = json.load(f) |
|
|
|
|
|
print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples") |
|
print("\nGlobal statistics:") |
|
print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2)) |
|
|
|
|
|
print("\nFirst subsample statistics:") |
|
print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2)) |
|
|
|
|
|
subsample_jsonl_file(file_path, output_path, compress=True) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2" |
|
fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"] |
|
|
|
run_crag_task_1_and_2(file_path, fields_to_extract) |
|
|