Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: UnicodeDecodeError Message: 'utf-8' codec can't decode byte 0x80 in position 64: invalid start byte Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1995, in _prepare_split_single for _, table in generator: File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/text/text.py", line 90, in _generate_tables batch = f.read(self.config.chunksize) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/file_utils.py", line 1104, in read_with_retries out = read(*args, **kwargs) File "/usr/local/lib/python3.9/codecs.py", line 322, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 64: invalid start byte The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1529, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1154, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1027, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1122, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1882, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2038, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
text
string |
---|
import numpy as np |
from sklearn.metrics.pairwise import cosine_similarity |
from scipy.spatial.distance import cdist |
inv_data = np.load('/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_1/inv.npy') |
inv_cluster_data = np.load('/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_1/inv_cluster.npy') |
closest_cluster_indices = np.argmin(np.linalg.norm(inv_data[:, np.newaxis, :] - inv_cluster_data, axis=2), axis=1) |
print(len(closest_cluster_indices)) |
import json |
# output_file = '/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_2/output.json' |
# with open(output_file, 'w') as f: |
# json.dump(closest_cluster_indices.tolist(), f) |
# 从文件加载存储的索引列表 |
file_path = '/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_2/modified_ranks.json' |
with open(file_path, 'r') as f: |
stored_indices = json.load(f) |
# 计算重合度 |
count_same_values = 0 |
total_values = len(stored_indices) |
for i in range(total_values): |
if stored_indices[i] == closest_cluster_indices[i]: |
count_same_values += 1 |
overlap_percentage = (count_same_values / total_values) * 100 |
print(f"相同值概率: {overlap_percentage}%") |
# # 加载两个ndarray |
# train_data = np.load('/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_1/train_data.npy') |
# train_data_inv = np.load('/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_1/inv.npy') |
# # 计算每个对应项的余弦相似度,并存储结果 |
# similarities = [] |
# count = 0 |
# for i in range(len(train_data)): |
# similarity = cosine_similarity(train_data[i].reshape(1, -1), train_data_inv[i].reshape(1, -1)) |
# similarities.append(similarity) |
# if similarity > 0.9: |
# count += 1 |
# print(count) |
# print(np.max(similarities)) |
# print(np.min(similarities)) |
# # 计算平均相似度 |
# average_similarity = np.mean(similarities) |
# # 打印平均相似度 |
# print(average_similarity) |
# # 加载两个ndarray |
# train_data = np.load('/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_1/train_data.npy') |
# embedding = np.load('/home/yiming/cophi/training_dynamic/code_training_dynamic/saved_models/ruby_fine_tine_5/Model/Epoch_1/embedding.npy') |
# # 从train_data中随机选择1000个索引 |
# num_samples = 1000 |
# random_indices = np.random.choice(len(train_data), size=num_samples, replace=False) |
# selected_train_data = train_data[random_indices] |
# selected_embedding = embedding[random_indices] |
# # 计算选定的train_data中每个样本与自身的距离 |
# train_data_distances = cdist(selected_train_data, selected_train_data, metric='euclidean') |
# # 找到选定的train_data中每个样本第二近的样本的索引 |
# train_data_second_nearest_indices = np.argpartition(train_data_distances, kth=1)[:, 1] |
# # 计算选定的embedding中每个样本与自身的距离 |
# embedding_distances = cdist(selected_embedding, selected_embedding, metric='euclidean') |
# # 找到选定的embedding中每个样本第二近的样本的索引 |
# embedding_second_nearest_indices = np.argpartition(embedding_distances, kth=1)[:, 1] |
# # 计算选定的train_data和embedding第二近索引的重合程度 |
# overlap = np.mean(train_data_second_nearest_indices == embedding_second_nearest_indices) |
# # 打印重合程度 |
# print("重合程度:", overlap) |
# import numpy as np |
# from pynndescent import NNDescent |
# # from sklearn.neighbors import NearestNeighbors |
# # from sklearn.manifold import trustworthiness |
# # from scipy.stats import kendalltau, spearmanr, pearsonr, rankdata |
# def evaluate_proj_nn_perseverance_knn(data, embedding, n_neighbors, metric="euclidean"): |
# """ |
# evaluate projection function, nn preserving property using knn algorithm |
# :param data: ndarray, high dimensional representations |
# :param embedding: ndarray, low dimensional representations |
End of preview.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 5