|
import os |
|
import hashlib |
|
import bz2 |
|
import json |
|
import subprocess |
|
from typing import Dict, List, Optional, Any |
|
import pandas as pd |
|
import numpy as np |
|
from multiprocessing import Pool, cpu_count |
|
from sklearn.model_selection import StratifiedKFold |
|
|
|
|
|
def get_cache_path(file_path: str, required_fields: List[str]) -> str: |
|
""" |
|
Generate a unique cache file path based on input file and fields. |
|
|
|
Args: |
|
file_path: Path to the input JSONL file |
|
required_fields: List of field names to extract |
|
|
|
Returns: |
|
Path to the cache file |
|
""" |
|
|
|
fields_str = ",".join(sorted(required_fields)) |
|
hash_input = f"{fields_str}" |
|
hash_str = hashlib.md5(hash_input.encode()).hexdigest()[:10] |
|
|
|
|
|
base_dir = os.path.dirname(file_path) |
|
|
|
file_name = os.path.basename(file_path).split(".")[0] |
|
cache_name = f"{file_name}_cache_{hash_str}.parquet" |
|
return os.path.join(base_dir, cache_name) |
|
|
|
|
|
def read_jsonl_fields_fast( |
|
file_path: str, required_fields: List[str], use_cache: bool = True |
|
) -> pd.DataFrame: |
|
""" |
|
Quickly extract specific fields from a compressed JSONL file using string operations. |
|
Results are cached in parquet format for faster subsequent reads. |
|
|
|
Args: |
|
file_path: Path to the JSONL file (can be bz2 compressed) |
|
required_fields: List of field names to extract from each JSON object |
|
use_cache: Whether to use/create cache file (default: True) |
|
|
|
Returns: |
|
DataFrame containing the requested fields |
|
""" |
|
cache_path = get_cache_path(file_path, required_fields) |
|
print(f"Cache path: {cache_path}") |
|
|
|
if use_cache and os.path.exists(cache_path): |
|
return pd.read_parquet(cache_path) |
|
|
|
|
|
records = [] |
|
patterns = [f'"{field}":' for field in required_fields] |
|
|
|
with bz2.open(file_path, "rt") as file: |
|
for line in file: |
|
if not line.strip(): |
|
continue |
|
|
|
result = {} |
|
for field, pattern in zip(required_fields, patterns): |
|
try: |
|
|
|
start_idx = line.find(pattern) |
|
if start_idx == -1: |
|
continue |
|
|
|
|
|
start_idx += len(pattern) |
|
while start_idx < len(line) and line[start_idx].isspace(): |
|
start_idx += 1 |
|
|
|
|
|
if start_idx >= len(line): |
|
continue |
|
|
|
if line[start_idx] == '"': |
|
|
|
start_idx += 1 |
|
end_idx = line.find('"', start_idx) |
|
value = line[start_idx:end_idx] |
|
elif line[start_idx] == "{" or line[start_idx] == "[": |
|
|
|
continue |
|
else: |
|
|
|
end_idx = line.find(",", start_idx) |
|
if end_idx == -1: |
|
end_idx = line.find("}", start_idx) |
|
value = line[start_idx:end_idx].strip() |
|
|
|
if value == "true": |
|
value = True |
|
elif value == "false": |
|
value = False |
|
elif value == "null": |
|
value = None |
|
else: |
|
try: |
|
value = float(value) if "." in value else int(value) |
|
except ValueError: |
|
continue |
|
|
|
result[field] = value |
|
except Exception: |
|
continue |
|
|
|
if result: |
|
records.append(result) |
|
|
|
|
|
df = pd.DataFrame.from_records(records) |
|
|
|
|
|
for col in df.columns: |
|
|
|
if ( |
|
df[col].dtype == object |
|
and df[col].apply(lambda x: isinstance(x, str)).any() |
|
): |
|
df[col] = df[col].astype(str) |
|
|
|
|
|
|
|
if use_cache: |
|
df.to_parquet(cache_path) |
|
|
|
return df |
|
|
|
|
|
def process_answer_types(df: pd.DataFrame) -> pd.DataFrame: |
|
""" |
|
Process the answer field to create a new answer_type field. |
|
|
|
Args: |
|
df: Input DataFrame with 'answer' column |
|
|
|
Returns: |
|
DataFrame with new 'answer_type' column |
|
""" |
|
|
|
df = df.copy() |
|
|
|
|
|
print("Unique answers in dataset:") |
|
print(df["answer"].unique()) |
|
|
|
|
|
conditions = [ |
|
df["answer"].str.lower() == "invalid question", |
|
df["answer"].str.lower() == "i don't know", |
|
] |
|
choices = ["invalid", "no_answer"] |
|
df["answer_type"] = np.select(conditions, choices, default="valid") |
|
|
|
|
|
print("\nAnswer type distribution:") |
|
print(df["answer_type"].value_counts()) |
|
|
|
return df |
|
|
|
|
|
def create_stratified_subsets( |
|
df: pd.DataFrame, |
|
n_subsets: int, |
|
stratify_columns: List[str] = [ |
|
"domain", |
|
"answer_type", |
|
"question_type", |
|
"static_or_dynamic", |
|
], |
|
output_path: str = "subsets.json", |
|
force_compute: bool = False, |
|
) -> Dict[str, Any]: |
|
""" |
|
Create stratified subsets of the dataset and save them to a JSON file. |
|
Each subset gets a unique ID based on its indices. |
|
|
|
Args: |
|
df: Input DataFrame |
|
n_subsets: Number of subsets to create |
|
stratify_columns: Columns to use for stratification |
|
output_path: Path to save/load the JSON output |
|
force_compute: If True, always compute subsets even if file exists |
|
|
|
Returns: |
|
Dictionary containing the subsets information |
|
""" |
|
|
|
if not force_compute and os.path.exists(output_path): |
|
try: |
|
with open(output_path, "r") as f: |
|
subsets_data = json.load(f) |
|
|
|
|
|
if ( |
|
subsets_data.get("metadata", {}).get("n_subsets") == n_subsets |
|
and subsets_data.get("metadata", {}).get("stratify_columns") |
|
== stratify_columns |
|
): |
|
print(f"Loading existing subsets from {output_path}") |
|
return subsets_data |
|
else: |
|
print("Existing subsets file has different parameters, recomputing...") |
|
except Exception as e: |
|
print(f"Error loading existing subsets file: {e}, recomputing...") |
|
|
|
|
|
df["strat_category"] = df[stratify_columns].astype(str).agg("_".join, axis=1) |
|
|
|
|
|
skf = StratifiedKFold(n_splits=n_subsets, shuffle=True, random_state=42) |
|
|
|
|
|
subsets_info = [] |
|
for subset_idx, (_, subset_indices) in enumerate( |
|
skf.split(df, df["strat_category"]) |
|
): |
|
|
|
sorted_indices = sorted(subset_indices.tolist()) |
|
|
|
|
|
subset_id = hashlib.md5(str(sorted_indices).encode()).hexdigest()[:8] |
|
|
|
|
|
stats = {} |
|
subset_df = df.iloc[subset_indices] |
|
for col in stratify_columns: |
|
stats[col] = subset_df[col].value_counts().to_dict() |
|
|
|
subsets_info.append( |
|
{ |
|
"index": subset_idx, |
|
"statistics": stats, |
|
"indices": sorted_indices, |
|
"size": len(subset_indices), |
|
} |
|
) |
|
|
|
|
|
global_stats = {} |
|
for col in stratify_columns: |
|
global_stats[col] = df[col].value_counts().to_dict() |
|
|
|
output_data = { |
|
"metadata": { |
|
"n_subsets": n_subsets, |
|
"total_samples": len(df), |
|
"stratify_columns": stratify_columns, |
|
"global_statistics": global_stats, |
|
}, |
|
"subsets": subsets_info, |
|
} |
|
|
|
|
|
with open(output_path, "w") as f: |
|
json.dump(output_data, f, indent=2) |
|
|
|
return output_data |
|
|
|
|
|
def write_subset( |
|
input_file: str, indices: List[int], output_file: str, compress: bool = True |
|
) -> None: |
|
"""Write a single subset to a file using awk and jq.""" |
|
indices_set = set(i + 1 for i in indices) |
|
nr_conditions = " || ".join(f"NR == {i}" for i in sorted(indices_set)) |
|
|
|
|
|
awk_script = f""" |
|
{{ |
|
if ({nr_conditions}) {{ |
|
print |
|
}} |
|
}}""" |
|
|
|
if input_file.endswith(".bz2"): |
|
|
|
if compress: |
|
cmd = f"bzcat '{input_file}' | awk '{awk_script}' | jq 'del(.alt_ans)' | bzip2 > '{output_file}'" |
|
else: |
|
cmd = f"bzcat '{input_file}' | awk '{awk_script}' | jq 'del(.alt_ans)' > '{output_file}'" |
|
else: |
|
if compress: |
|
cmd = f"awk '{awk_script}' '{input_file}' | jq 'del(.alt_ans)' | bzip2 > '{output_file}'" |
|
else: |
|
cmd = f"awk '{awk_script}' '{input_file}' | jq 'del(.alt_ans)' > '{output_file}'" |
|
|
|
print(f"Process {os.getpid()} - Starting subset to {output_file}") |
|
try: |
|
result = subprocess.run( |
|
cmd, |
|
shell=True, |
|
check=True, |
|
stderr=subprocess.PIPE, |
|
stdout=subprocess.PIPE, |
|
text=True, |
|
) |
|
print(f"Process {os.getpid()} - Finished subset to {output_file}") |
|
|
|
if os.path.exists(output_file) and os.path.getsize(output_file) > 0: |
|
print( |
|
f"Process {os.getpid()} - Successfully created {output_file} " |
|
f"({os.path.getsize(output_file)} bytes)" |
|
) |
|
else: |
|
raise Exception(f"Output file {output_file} is empty or doesn't exist") |
|
|
|
except subprocess.CalledProcessError as e: |
|
print(f"Error executing command: {e.stderr}") |
|
print(f"Command output: {e.stdout}") |
|
raise |
|
except Exception as e: |
|
print(f"Error: {str(e)}") |
|
raise |
|
|
|
|
|
def subset_jsonl_file( |
|
input_file: str, |
|
subsets_file: str, |
|
output_dir: Optional[str] = None, |
|
compress: bool = True, |
|
n_processes: Optional[int] = None, |
|
overwrite: bool = False, |
|
) -> None: |
|
""" |
|
subset a large JSONL file into multiple files using sed for maximum performance. |
|
|
|
Args: |
|
input_file: Path to input JSONL file (can be bz2 compressed) |
|
subsets_file: Path to JSON file containing subset indices |
|
output_dir: Directory to save subset files (defaults to input file directory) |
|
compress: Whether to compress output files with bz2 |
|
n_processes: Number of processes to use (defaults to min(n_subsets, cpu_count)) |
|
overwrite: If False, skip existing output files (default: False) |
|
""" |
|
|
|
with open(subsets_file, "r") as f: |
|
subsets_data = json.load(f) |
|
|
|
|
|
n_subsets = len(subsets_data["subsets"]) |
|
if n_processes is None: |
|
n_processes = min(n_subsets, cpu_count()) |
|
|
|
if output_dir is None: |
|
output_dir = os.path.dirname(input_file) |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
base_name = os.path.splitext(os.path.basename(input_file))[0] |
|
if base_name.endswith(".jsonl"): |
|
base_name = os.path.splitext(base_name)[0] |
|
|
|
|
|
write_args = [] |
|
skipped_files = [] |
|
for subset in subsets_data["subsets"]: |
|
subset_idx = subset["index"] |
|
output_name = f"{base_name}_subset_{subset_idx+1}.jsonl" |
|
if compress: |
|
output_name += ".bz2" |
|
output_path = os.path.join(output_dir, output_name) |
|
|
|
|
|
if not overwrite and os.path.exists(output_path): |
|
skipped_files.append(output_path) |
|
continue |
|
|
|
write_args.append((input_file, subset["indices"], output_path, compress)) |
|
|
|
if skipped_files: |
|
print(f"Skipping {len(skipped_files)} existing files:") |
|
for file in skipped_files: |
|
print(f" - {file}") |
|
|
|
if write_args: |
|
print(f"Processing {len(write_args)} subsets using {n_processes} processes") |
|
with Pool(processes=n_processes) as pool: |
|
pool.starmap(write_subset, write_args) |
|
else: |
|
print("No files to process - all files exist and overwrite=False") |
|
|