Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
jchevallard commited on
Commit
0e25589
·
1 Parent(s): 0789070

Added script to create subsamples

Browse files
Files changed (4) hide show
  1. crag_to_subsamples.py +446 -0
  2. pyproject.toml +29 -0
  3. requirements-dev.lock +109 -0
  4. requirements.lock +109 -0
crag_to_subsamples.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bz2
2
+ from typing import Iterator, Dict, Any
3
+ import pandas as pd
4
+ import os
5
+ import hashlib
6
+ import json
7
+ from sklearn.model_selection import StratifiedKFold
8
+ import numpy as np
9
+ from multiprocessing import Pool, cpu_count
10
+ from functools import partial
11
+ import subprocess
12
+
13
+
14
+ def get_cache_path(file_path: str, required_fields: list[str]) -> str:
15
+ """
16
+ Generate a unique cache file path based on input file and fields.
17
+
18
+ Args:
19
+ file_path: Path to the input JSONL file
20
+ required_fields: List of field names to extract
21
+
22
+ Returns:
23
+ Path to the cache file
24
+ """
25
+ # Create a unique hash based on the file path and fields
26
+ fields_str = ",".join(sorted(required_fields))
27
+ hash_input = f"{fields_str}"
28
+ hash_str = hashlib.md5(hash_input.encode()).hexdigest()[:10]
29
+
30
+ # Get the directory of the input file
31
+ base_dir = os.path.dirname(file_path)
32
+ # Get filename from file path
33
+ file_name = os.path.basename(file_path).split(".")[0]
34
+ cache_name = f"{file_name}_cache_{hash_str}.parquet"
35
+ return os.path.join(base_dir, cache_name)
36
+
37
+
38
+ def read_jsonl_fields_fast(
39
+ file_path: str, required_fields: list[str], use_cache: bool = True
40
+ ) -> pd.DataFrame:
41
+ """
42
+ Quickly extract specific fields from a compressed JSONL file using string operations.
43
+ Results are cached in parquet format for faster subsequent reads.
44
+
45
+ Args:
46
+ file_path: Path to the JSONL file (can be bz2 compressed)
47
+ required_fields: List of field names to extract from each JSON object
48
+ use_cache: Whether to use/create cache file (default: True)
49
+
50
+ Returns:
51
+ DataFrame containing the requested fields
52
+ """
53
+ cache_path = get_cache_path(file_path, required_fields)
54
+ print(f"Cache path: {cache_path}")
55
+ # Try to load from cache first
56
+ if use_cache and os.path.exists(cache_path):
57
+ return pd.read_parquet(cache_path)
58
+
59
+ # If no cache exists, process the file
60
+ records = []
61
+ patterns = [f'"{field}":' for field in required_fields]
62
+
63
+ with bz2.open(file_path, "rt") as file:
64
+ for line in file:
65
+ if not line.strip():
66
+ continue
67
+
68
+ result = {}
69
+ for field, pattern in zip(required_fields, patterns):
70
+ try:
71
+ # Find the field in the line
72
+ start_idx = line.find(pattern)
73
+ if start_idx == -1:
74
+ continue
75
+
76
+ # Move to the start of the value
77
+ start_idx += len(pattern)
78
+ while start_idx < len(line) and line[start_idx].isspace():
79
+ start_idx += 1
80
+
81
+ # Handle different value types
82
+ if start_idx >= len(line):
83
+ continue
84
+
85
+ if line[start_idx] == '"':
86
+ # String value
87
+ start_idx += 1
88
+ end_idx = line.find('"', start_idx)
89
+ value = line[start_idx:end_idx]
90
+ elif line[start_idx] == "{" or line[start_idx] == "[":
91
+ # Skip nested objects/arrays
92
+ continue
93
+ else:
94
+ # Number, boolean, or null
95
+ end_idx = line.find(",", start_idx)
96
+ if end_idx == -1:
97
+ end_idx = line.find("}", start_idx)
98
+ value = line[start_idx:end_idx].strip()
99
+ # Convert to appropriate type
100
+ if value == "true":
101
+ value = True
102
+ elif value == "false":
103
+ value = False
104
+ elif value == "null":
105
+ value = None
106
+ else:
107
+ try:
108
+ value = float(value) if "." in value else int(value)
109
+ except ValueError:
110
+ continue
111
+
112
+ result[field] = value
113
+ except Exception:
114
+ continue
115
+
116
+ if result:
117
+ records.append(result)
118
+
119
+ # Convert to DataFrame
120
+ df = pd.DataFrame.from_records(records)
121
+
122
+ # Convert columns to appropriate types
123
+ for col in df.columns:
124
+ # If the column contains any strings, convert the whole column to strings
125
+ if (
126
+ df[col].dtype == object
127
+ and df[col].apply(lambda x: isinstance(x, str)).any()
128
+ ):
129
+ df[col] = df[col].astype(str)
130
+ # You can add more type conversions here if needed
131
+
132
+ # Save cache if enabled
133
+ if use_cache:
134
+ df.to_parquet(cache_path)
135
+
136
+ return df
137
+
138
+
139
+ def process_answer_types(df: pd.DataFrame) -> pd.DataFrame:
140
+ """
141
+ Process the answer field to create a new answer_type field.
142
+
143
+ Args:
144
+ df: Input DataFrame with 'answer' column
145
+
146
+ Returns:
147
+ DataFrame with new 'answer_type' column
148
+ """
149
+ # Create a copy to avoid modifying the original
150
+ df = df.copy()
151
+
152
+ # Print unique answers to debug
153
+ print("Unique answers in dataset:")
154
+ print(df["answer"].unique())
155
+
156
+ # Create answer_type column with case-insensitive matching
157
+ conditions = [
158
+ df["answer"].str.lower() == "invalid question",
159
+ df["answer"].str.lower() == "i don't know", # Try exact match
160
+ ]
161
+ choices = ["invalid", "no_answer"]
162
+ df["answer_type"] = np.select(conditions, choices, default="valid")
163
+
164
+ # Print distribution to verify
165
+ print("\nAnswer type distribution:")
166
+ print(df["answer_type"].value_counts())
167
+
168
+ return df
169
+
170
+
171
+ def create_stratified_subsamples(
172
+ df: pd.DataFrame,
173
+ n_subsamples: int,
174
+ stratify_columns: list[str] = [
175
+ "domain",
176
+ "answer_type",
177
+ "question_type",
178
+ "static_or_dynamic",
179
+ ],
180
+ output_path: str = "subsamples.json",
181
+ force_compute: bool = False,
182
+ ) -> dict:
183
+ """
184
+ Create stratified subsamples of the dataset and save them to a JSON file.
185
+ Each subsample gets a unique ID based on its indices.
186
+
187
+ Args:
188
+ df: Input DataFrame
189
+ n_subsamples: Number of subsamples to create
190
+ stratify_columns: Columns to use for stratification
191
+ output_path: Path to save/load the JSON output
192
+ force_compute: If True, always compute subsamples even if file exists
193
+
194
+ Returns:
195
+ Dictionary containing the subsamples information
196
+ """
197
+ # Check if file exists and we can use it
198
+ if not force_compute and os.path.exists(output_path):
199
+ try:
200
+ with open(output_path, "r") as f:
201
+ subsamples_data = json.load(f)
202
+
203
+ # Validate the loaded data has the expected structure
204
+ if (
205
+ subsamples_data.get("metadata", {}).get("n_subsamples") == n_subsamples
206
+ and subsamples_data.get("metadata", {}).get("stratify_columns")
207
+ == stratify_columns
208
+ ):
209
+ print(f"Loading existing subsamples from {output_path}")
210
+ return subsamples_data
211
+ else:
212
+ print(
213
+ "Existing subsamples file has different parameters, recomputing..."
214
+ )
215
+ except Exception as e:
216
+ print(f"Error loading existing subsamples file: {e}, recomputing...")
217
+
218
+ # Create a combined category for stratification
219
+ df["strat_category"] = df[stratify_columns].astype(str).agg("_".join, axis=1)
220
+
221
+ # Initialize the subsampleter
222
+ skf = StratifiedKFold(n_splits=n_subsamples, shuffle=True, random_state=42)
223
+
224
+ # Create subsamples
225
+ subsamples_info = []
226
+ for subsample_idx, (_, subsample_indices) in enumerate(
227
+ skf.split(df, df["strat_category"])
228
+ ):
229
+ # Sort indices for consistent hashing
230
+ sorted_indices = sorted(subsample_indices.tolist())
231
+
232
+ # Create a deterministic ID from the indices
233
+ subsample_id = hashlib.md5(str(sorted_indices).encode()).hexdigest()[:8]
234
+
235
+ # Calculate statistics for this subsample
236
+ stats = {}
237
+ subsample_df = df.iloc[subsample_indices]
238
+ for col in stratify_columns:
239
+ stats[col] = subsample_df[col].value_counts().to_dict()
240
+
241
+ subsamples_info.append(
242
+ {
243
+ "id": subsample_id,
244
+ "statistics": stats,
245
+ "indices": sorted_indices,
246
+ "size": len(subsample_indices),
247
+ }
248
+ )
249
+
250
+ # Add global statistics
251
+ global_stats = {}
252
+ for col in stratify_columns:
253
+ global_stats[col] = df[col].value_counts().to_dict()
254
+
255
+ output_data = {
256
+ "metadata": {
257
+ "n_subsamples": n_subsamples,
258
+ "total_samples": len(df),
259
+ "stratify_columns": stratify_columns,
260
+ "global_statistics": global_stats,
261
+ },
262
+ "subsamples": subsamples_info,
263
+ }
264
+
265
+ # Save to JSON
266
+ with open(output_path, "w") as f:
267
+ json.dump(output_data, f, indent=2)
268
+
269
+ return output_data
270
+
271
+
272
+ def write_subsample(
273
+ input_file: str, indices: list[int], output_file: str, compress: bool = True
274
+ ) -> None:
275
+ """
276
+ Write a single subsample to a file using awk.
277
+
278
+ Args:
279
+ input_file: Path to input JSONL file
280
+ indices: List of indices to extract
281
+ output_file: Path to output file
282
+ compress: Whether to compress output
283
+ """
284
+ # Convert indices to awk condition
285
+ # NR is the current line number in awk
286
+ indices_set = set(i + 1 for i in indices) # Convert to 1-based indexing
287
+ indices_str = ",".join(str(i) for i in sorted(indices_set))
288
+
289
+ # Create awk script with escaped curly braces
290
+ awk_script = (
291
+ f'BEGIN {{subsample("{indices_str}",a,","); for(i in a) n[a[i]];}} NR in n'
292
+ )
293
+
294
+ if input_file.endswith(".bz2"):
295
+ if compress:
296
+ cmd = f"bzcat '{input_file}' | awk '{awk_script}' | bzip2 > '{output_file}'"
297
+ else:
298
+ cmd = f"bzcat '{input_file}' | awk '{awk_script}' > '{output_file}'"
299
+ else:
300
+ if compress:
301
+ cmd = f"awk '{awk_script}' '{input_file}' | bzip2 > '{output_file}'"
302
+ else:
303
+ cmd = f"awk '{awk_script}' '{input_file}' > '{output_file}'"
304
+
305
+ print(f"Process {os.getpid()} - Starting subsample to {output_file}")
306
+ try:
307
+ result = subprocess.run(
308
+ cmd,
309
+ shell=True,
310
+ check=True,
311
+ stderr=subprocess.PIPE,
312
+ stdout=subprocess.PIPE,
313
+ text=True,
314
+ )
315
+ print(f"Process {os.getpid()} - Finished subsample to {output_file}")
316
+
317
+ # Verify the output file exists and has content
318
+ if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
319
+ print(
320
+ f"Process {os.getpid()} - Successfully created {output_file} ({os.path.getsize(output_file)} bytes)"
321
+ )
322
+ else:
323
+ raise Exception(f"Output file {output_file} is empty or doesn't exist")
324
+
325
+ except subprocess.CalledProcessError as e:
326
+ print(f"Error executing command: {e.stderr}")
327
+ print(f"Command output: {e.stdout}")
328
+ raise
329
+ except Exception as e:
330
+ print(f"Error: {str(e)}")
331
+ raise
332
+
333
+
334
+ def subsample_jsonl_file(
335
+ input_file: str,
336
+ subsamples_file: str,
337
+ output_dir: str = None,
338
+ compress: bool = True,
339
+ n_processes: int = None,
340
+ overwrite: bool = False,
341
+ ) -> None:
342
+ """
343
+ subsample a large JSONL file into multiple files using sed for maximum performance.
344
+
345
+ Args:
346
+ input_file: Path to input JSONL file (can be bz2 compressed)
347
+ subsamples_file: Path to JSON file containing subsample indices
348
+ output_dir: Directory to save subsample files (defaults to input file directory)
349
+ compress: Whether to compress output files with bz2
350
+ n_processes: Number of processes to use (defaults to min(n_subsamples, cpu_count))
351
+ overwrite: If False, skip existing output files (default: False)
352
+ """
353
+ # Load subsamples information
354
+ with open(subsamples_file, "r") as f:
355
+ subsamples_data = json.load(f)
356
+
357
+ # Determine optimal number of processes
358
+ n_subsamples = len(subsamples_data["subsamples"])
359
+ if n_processes is None:
360
+ n_processes = min(n_subsamples, cpu_count())
361
+
362
+ if output_dir is None:
363
+ output_dir = os.path.dirname(input_file)
364
+ os.makedirs(output_dir, exist_ok=True)
365
+
366
+ base_name = os.path.splitext(os.path.basename(input_file))[0]
367
+ if base_name.endswith(".jsonl"):
368
+ base_name = os.path.splitext(base_name)[0]
369
+
370
+ # Prepare arguments for parallel processing
371
+ write_args = []
372
+ skipped_files = []
373
+ for subsample in subsamples_data["subsamples"]:
374
+ subsample_id = subsample["id"]
375
+ output_name = f"{base_name}_subsample_{subsample_id}.jsonl"
376
+ if compress:
377
+ output_name += ".bz2"
378
+ output_path = os.path.join(output_dir, output_name)
379
+
380
+ # Skip if file exists and overwrite is False
381
+ if not overwrite and os.path.exists(output_path):
382
+ skipped_files.append(output_path)
383
+ continue
384
+
385
+ write_args.append((input_file, subsample["indices"], output_path, compress))
386
+
387
+ if skipped_files:
388
+ print(f"Skipping {len(skipped_files)} existing files:")
389
+ for file in skipped_files:
390
+ print(f" - {file}")
391
+
392
+ if write_args:
393
+ print(f"Processing {len(write_args)} subsamples using {n_processes} processes")
394
+ with Pool(processes=n_processes) as pool:
395
+ pool.starmap(write_subsample, write_args)
396
+ else:
397
+ print("No files to process - all files exist and overwrite=False")
398
+
399
+
400
+ def run_crag_task_1_and_2(
401
+ file_path: str,
402
+ fields_to_extract: list[str],
403
+ n_subsamples: int = 5,
404
+ output_dir: str = None,
405
+ compress: bool = True,
406
+ n_processes: int = None,
407
+ overwrite: bool = False,
408
+ ):
409
+ # Load and process data
410
+ df = read_jsonl_fields_fast(file_path, fields_to_extract)
411
+ df = process_answer_types(df)
412
+ print(df.head())
413
+
414
+ output_path = os.path.join(
415
+ os.path.dirname(file_path),
416
+ os.path.basename(file_path).split(".")[0] + "_subsamples.json",
417
+ )
418
+
419
+ # This will load from file if it exists and parameters match
420
+ subsamples_data = create_stratified_subsamples(
421
+ df, n_subsamples=5, output_path=output_path
422
+ )
423
+
424
+ # Example of how to read and use the subsamples
425
+ with open(output_path, "r") as f:
426
+ subsamples_data = json.load(f)
427
+
428
+ # Print some information about the subsamples
429
+ print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples")
430
+ print("\nGlobal statistics:")
431
+ print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2))
432
+
433
+ # Print statistics for first subsample
434
+ print("\nFirst subsample statistics:")
435
+ print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2))
436
+
437
+ # This will use all available CPU cores
438
+ subsample_jsonl_file(file_path, output_path, compress=True)
439
+
440
+
441
+ # Example usage
442
+ if __name__ == "__main__":
443
+ file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
444
+ fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
445
+
446
+ run_crag_task_1_and_2(file_path, fields_to_extract)
pyproject.toml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "lejuge"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ authors = [
6
+ { name = "Jacopo Chevallard", email = "[email protected]" }
7
+ ]
8
+ dependencies = [
9
+ "ipykernel>=6.29.5",
10
+ "pandas>=2.2.3",
11
+ "fastparquet>=2024.11.0",
12
+ "scikit-learn>=1.6.1",
13
+ ]
14
+ readme = "README.md"
15
+ requires-python = ">= 3.11"
16
+
17
+ [build-system]
18
+ requires = ["hatchling"]
19
+ build-backend = "hatchling.build"
20
+
21
+ [tool.rye]
22
+ managed = true
23
+ dev-dependencies = []
24
+
25
+ [tool.hatch.metadata]
26
+ allow-direct-references = true
27
+
28
+ [tool.hatch.build.targets.wheel]
29
+ packages = ["src/lejuge"]
requirements-dev.lock ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+ # universal: false
11
+
12
+ -e file:.
13
+ appnope==0.1.4
14
+ # via ipykernel
15
+ asttokens==3.0.0
16
+ # via stack-data
17
+ comm==0.2.2
18
+ # via ipykernel
19
+ cramjam==2.9.1
20
+ # via fastparquet
21
+ debugpy==1.8.12
22
+ # via ipykernel
23
+ decorator==5.1.1
24
+ # via ipython
25
+ executing==2.2.0
26
+ # via stack-data
27
+ fastparquet==2024.11.0
28
+ # via lejuge
29
+ fsspec==2024.12.0
30
+ # via fastparquet
31
+ ipykernel==6.29.5
32
+ # via lejuge
33
+ ipython==8.31.0
34
+ # via ipykernel
35
+ jedi==0.19.2
36
+ # via ipython
37
+ joblib==1.4.2
38
+ # via scikit-learn
39
+ jupyter-client==8.6.3
40
+ # via ipykernel
41
+ jupyter-core==5.7.2
42
+ # via ipykernel
43
+ # via jupyter-client
44
+ matplotlib-inline==0.1.7
45
+ # via ipykernel
46
+ # via ipython
47
+ nest-asyncio==1.6.0
48
+ # via ipykernel
49
+ numpy==2.2.2
50
+ # via fastparquet
51
+ # via pandas
52
+ # via scikit-learn
53
+ # via scipy
54
+ packaging==24.2
55
+ # via fastparquet
56
+ # via ipykernel
57
+ pandas==2.2.3
58
+ # via fastparquet
59
+ # via lejuge
60
+ parso==0.8.4
61
+ # via jedi
62
+ pexpect==4.9.0
63
+ # via ipython
64
+ platformdirs==4.3.6
65
+ # via jupyter-core
66
+ prompt-toolkit==3.0.50
67
+ # via ipython
68
+ psutil==6.1.1
69
+ # via ipykernel
70
+ ptyprocess==0.7.0
71
+ # via pexpect
72
+ pure-eval==0.2.3
73
+ # via stack-data
74
+ pygments==2.19.1
75
+ # via ipython
76
+ python-dateutil==2.9.0.post0
77
+ # via jupyter-client
78
+ # via pandas
79
+ pytz==2024.2
80
+ # via pandas
81
+ pyzmq==26.2.0
82
+ # via ipykernel
83
+ # via jupyter-client
84
+ scikit-learn==1.6.1
85
+ # via lejuge
86
+ scipy==1.15.1
87
+ # via scikit-learn
88
+ six==1.17.0
89
+ # via python-dateutil
90
+ stack-data==0.6.3
91
+ # via ipython
92
+ threadpoolctl==3.5.0
93
+ # via scikit-learn
94
+ tornado==6.4.2
95
+ # via ipykernel
96
+ # via jupyter-client
97
+ traitlets==5.14.3
98
+ # via comm
99
+ # via ipykernel
100
+ # via ipython
101
+ # via jupyter-client
102
+ # via jupyter-core
103
+ # via matplotlib-inline
104
+ typing-extensions==4.12.2
105
+ # via ipython
106
+ tzdata==2025.1
107
+ # via pandas
108
+ wcwidth==0.2.13
109
+ # via prompt-toolkit
requirements.lock ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+ # universal: false
11
+
12
+ -e file:.
13
+ appnope==0.1.4
14
+ # via ipykernel
15
+ asttokens==3.0.0
16
+ # via stack-data
17
+ comm==0.2.2
18
+ # via ipykernel
19
+ cramjam==2.9.1
20
+ # via fastparquet
21
+ debugpy==1.8.12
22
+ # via ipykernel
23
+ decorator==5.1.1
24
+ # via ipython
25
+ executing==2.2.0
26
+ # via stack-data
27
+ fastparquet==2024.11.0
28
+ # via lejuge
29
+ fsspec==2024.12.0
30
+ # via fastparquet
31
+ ipykernel==6.29.5
32
+ # via lejuge
33
+ ipython==8.31.0
34
+ # via ipykernel
35
+ jedi==0.19.2
36
+ # via ipython
37
+ joblib==1.4.2
38
+ # via scikit-learn
39
+ jupyter-client==8.6.3
40
+ # via ipykernel
41
+ jupyter-core==5.7.2
42
+ # via ipykernel
43
+ # via jupyter-client
44
+ matplotlib-inline==0.1.7
45
+ # via ipykernel
46
+ # via ipython
47
+ nest-asyncio==1.6.0
48
+ # via ipykernel
49
+ numpy==2.2.2
50
+ # via fastparquet
51
+ # via pandas
52
+ # via scikit-learn
53
+ # via scipy
54
+ packaging==24.2
55
+ # via fastparquet
56
+ # via ipykernel
57
+ pandas==2.2.3
58
+ # via fastparquet
59
+ # via lejuge
60
+ parso==0.8.4
61
+ # via jedi
62
+ pexpect==4.9.0
63
+ # via ipython
64
+ platformdirs==4.3.6
65
+ # via jupyter-core
66
+ prompt-toolkit==3.0.50
67
+ # via ipython
68
+ psutil==6.1.1
69
+ # via ipykernel
70
+ ptyprocess==0.7.0
71
+ # via pexpect
72
+ pure-eval==0.2.3
73
+ # via stack-data
74
+ pygments==2.19.1
75
+ # via ipython
76
+ python-dateutil==2.9.0.post0
77
+ # via jupyter-client
78
+ # via pandas
79
+ pytz==2024.2
80
+ # via pandas
81
+ pyzmq==26.2.0
82
+ # via ipykernel
83
+ # via jupyter-client
84
+ scikit-learn==1.6.1
85
+ # via lejuge
86
+ scipy==1.15.1
87
+ # via scikit-learn
88
+ six==1.17.0
89
+ # via python-dateutil
90
+ stack-data==0.6.3
91
+ # via ipython
92
+ threadpoolctl==3.5.0
93
+ # via scikit-learn
94
+ tornado==6.4.2
95
+ # via ipykernel
96
+ # via jupyter-client
97
+ traitlets==5.14.3
98
+ # via comm
99
+ # via ipykernel
100
+ # via ipython
101
+ # via jupyter-client
102
+ # via jupyter-core
103
+ # via matplotlib-inline
104
+ typing-extensions==4.12.2
105
+ # via ipython
106
+ tzdata==2025.1
107
+ # via pandas
108
+ wcwidth==0.2.13
109
+ # via prompt-toolkit