File size: 10,599 Bytes
106d6b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
import os
import pandas as pd
import pyarrow.parquet as pq
import json
import numpy as np
# Configuration
base_dir = "/lustre/fsn1/projects/rech/fmr/uft12cr/statistics_corpus_full"
set_dirs = [f"set_{i}" for i in range(1, 11)] # Sets 1 to 10, excluding filtered sets
test = False # Set to True to process only a single Parquet file
min_tokens_threshold = 100000 # Minimum tokens required to report certain statistics
# Dictionary to accumulate statistics
stats = {
"total_tokens_by_language": {},
"total_words_by_language": {},
"total_tokens_by_collection": {},
"total_words_by_collection": {},
"doc_count_by_language": {},
"doc_count_by_collection": {},
"license_distribution": {},
"compression_rate_by_language": {},
"avg_doc_length_words_by_language": {},
"avg_doc_length_tokens_by_language": {},
"compression_rate_by_collection": {},
"avg_doc_length_words_by_collection": {},
"avg_doc_length_tokens_by_collection": {},
}
processed_files = 0
errors_count = 0
# Helper function to update dictionaries for language and collection statistics
def update_stats(df):
required_columns = {"collection", "language", "token_count", "word_count", "license"}
if not required_columns.issubset(df.columns):
print("DataFrame is missing required columns.")
return
try:
df.loc[df['collection'].notna() & df['collection'].str.contains("Github", case=False), 'language'] = 'code'
for collection_name, col_df in df.groupby("collection"):
stats["total_tokens_by_collection"].setdefault(collection_name, 0)
stats["total_tokens_by_collection"][collection_name] += col_df["token_count"].sum()
stats["total_words_by_collection"].setdefault(collection_name, 0)
stats["total_words_by_collection"][collection_name] += col_df["word_count"].sum()
stats["doc_count_by_collection"].setdefault(collection_name, 0)
stats["doc_count_by_collection"][collection_name] += len(col_df)
license_counts = col_df["license"].value_counts().to_dict()
for license_type, count in license_counts.items():
stats["license_distribution"].setdefault(collection_name, {}).setdefault(license_type, 0)
stats["license_distribution"][collection_name][license_type] += count
for language, lang_df in df.groupby("language"):
stats["total_tokens_by_language"].setdefault(language, 0)
stats["total_tokens_by_language"][language] += lang_df["token_count"].sum()
stats["total_words_by_language"].setdefault(language, 0)
stats["total_words_by_language"][language] += lang_df["word_count"].sum()
stats["doc_count_by_language"].setdefault(language, 0)
stats["doc_count_by_language"][language] += len(lang_df)
except Exception as e:
print(f"Error updating statistics: {e}")
# Gather all parquet files, with only one if in test mode
all_parquet_files = []
for set_dir in set_dirs:
set_path = os.path.join(base_dir, set_dir)
if not os.path.isdir(set_path):
print(f"Directory {set_path} not found, skipping...")
continue
try:
parquet_files = [os.path.join(set_path, f) for f in os.listdir(set_path) if f.endswith(".parquet")]
all_parquet_files.extend(parquet_files)
except Exception as e:
print(f"Error reading directory {set_path}: {e}")
if test:
all_parquet_files = all_parquet_files[:1]
# Process all selected parquet files
total_files = len(all_parquet_files)
for file_path in all_parquet_files:
try:
df = pq.read_table(file_path).to_pandas()
update_stats(df)
processed_files += 1
except Exception as e:
print(f"Error processing file {file_path}: {e}")
errors_count += 1
if processed_files % 10 == 0 or processed_files == total_files:
print(f"Processed {processed_files}/{total_files} files with {errors_count} errors.")
# Compute compression rates and average document lengths by language with threshold checks
for language in stats["total_tokens_by_language"]:
try:
total_tokens = stats["total_tokens_by_language"][language]
total_words = stats["total_words_by_language"][language]
doc_count = stats["doc_count_by_language"][language]
if total_tokens >= min_tokens_threshold:
stats["compression_rate_by_language"][language] = total_tokens / total_words if total_words > 0 else None
stats["avg_doc_length_words_by_language"][language] = total_words / doc_count if doc_count > 0 else None
stats["avg_doc_length_tokens_by_language"][language] = total_tokens / doc_count if doc_count > 0 else None
else:
stats["compression_rate_by_language"][language] = "N/A"
stats["avg_doc_length_words_by_language"][language] = "N/A"
stats["avg_doc_length_tokens_by_language"][language] = "N/A"
except Exception as e:
print(f"Error calculating stats for language {language}: {e}")
# Compute compression rates and average document lengths by collection with threshold checks
for collection in stats["total_tokens_by_collection"]:
try:
total_tokens = stats["total_tokens_by_collection"][collection]
total_words = stats["total_words_by_collection"][collection]
doc_count = stats["doc_count_by_collection"][collection]
if total_tokens >= min_tokens_threshold:
stats["compression_rate_by_collection"][collection] = total_tokens / total_words if total_words > 0 else None
stats["avg_doc_length_words_by_collection"][collection] = total_words / doc_count if doc_count > 0 else None
stats["avg_doc_length_tokens_by_collection"][collection] = total_tokens / doc_count if doc_count > 0 else None
else:
stats["compression_rate_by_collection"][collection] = "N/A"
stats["avg_doc_length_words_by_collection"][collection] = "N/A"
stats["avg_doc_length_tokens_by_collection"][collection] = "N/A"
except Exception as e:
print(f"Error calculating stats for collection {collection}: {e}")
# Convert to native types function
def convert_to_native_types(stats):
def convert(value):
if isinstance(value, (np.integer, np.floating)):
return value.item()
elif isinstance(value, dict):
return {k: convert(v) for k, v in value.items()}
return value
return {k: convert(v) for k, v in stats.items()}
# Print and save statistics in human-readable format
def print_stats(stats):
output = []
output.append("============ Corpus Statistics Overview ============")
total_tokens = sum(stats["total_tokens_by_collection"].values())
total_words = sum(stats["total_words_by_collection"].values())
total_docs = sum(stats["doc_count_by_collection"].values())
output.append(f"\nTotal Tokens in Corpus: {total_tokens:,}")
output.append(f"Total Words in Corpus: {total_words:,}")
output.append(f"Total Documents in Corpus: {total_docs:,}")
# Display top 10 collections by total tokens
output.append("\nTop 10 Collections by Total Tokens:")
for collection, count in sorted(stats["total_tokens_by_collection"].items(), key=lambda x: x[1], reverse=True)[:10]:
output.append(f" - {collection}: {count:,}")
# Display top 10 languages by total tokens
output.append("\nTop 10 Languages by Total Tokens:")
for language, count in sorted(stats["total_tokens_by_language"].items(), key=lambda x: x[1], reverse=True)[:10]:
output.append(f" - {language}: {count:,}")
# Display compression rate by language (top 10)
output.append("\nCompression Rate by Language (Top 10):")
for language, rate in sorted(stats["compression_rate_by_language"].items(), key=lambda x: x[1] if isinstance(x[1], (int, float)) else 0, reverse=True)[:10]:
output.append(f" - {language}: {rate:.2f}" if isinstance(rate, (int, float)) else f" - {language}: N/A")
# Display average document length by language (top 10)
output.append("\nAverage Document Length (Words) by Language (Top 10):")
for language, avg_len in sorted(stats["avg_doc_length_words_by_language"].items(), key=lambda x: x[1] if isinstance(x[1], (int, float)) else 0, reverse=True)[:10]:
output.append(f" - {language}: {avg_len:.2f}" if isinstance(avg_len, (int, float)) else f" - {language}: N/A")
# License distribution by collection (top 5 collections)
output.append("\nLicense Distribution by Collection (Top 5):")
for collection, licenses in list(sorted(stats["license_distribution"].items(), key=lambda x: sum(x[1].values()), reverse=True))[:5]:
output.append(f" - {collection}:")
for license_type, count in sorted(licenses.items(), key=lambda x: x[1], reverse=True):
output.append(f" * {license_type}: {count:,}")
output.append("====================================================")
print("\n".join(output))
# Save to text file
with open('stats_readable_output.txt', 'w') as f:
f.write("\n".join(output))
# Print and save the human-readable stats
print_stats(stats)
# Convert to native types and save as JSON
stats_native = convert_to_native_types(stats)
with open('stats_by_language.json', 'w') as f:
json.dump({
'total_tokens_by_language': stats_native['total_tokens_by_language'],
'total_words_by_language': stats_native['total_words_by_language'],
'doc_count_by_language': stats_native['doc_count_by_language'],
'compression_rate_by_language': stats_native['compression_rate_by_language'],
'avg_doc_length_words_by_language': stats_native['avg_doc_length_words_by_language'],
'avg_doc_length_tokens_by_language': stats_native['avg_doc_length_tokens_by_language'],
}, f, indent=4)
with open('stats_by_collection.json', 'w') as f:
json.dump({
'total_tokens_by_collection': stats_native['total_tokens_by_collection'],
'total_words_by_collection': stats_native['total_words_by_collection'],
'doc_count_by_collection': stats_native['doc_count_by_collection'],
'compression_rate_by_collection': stats_native['compression_rate_by_collection'],
'avg_doc_length_words_by_collection': stats_native['avg_doc_length_words_by_collection'],
'avg_doc_length_tokens_by_collection': stats_native['avg_doc_length_tokens_by_collection'],
'license_distribution': stats_native['license_distribution'],
}, f, indent=4)
|