Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,44 +2,24 @@ import pandas as pd
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
import seaborn as sns
|
4 |
import gradio as gr
|
|
|
|
|
5 |
|
6 |
-
# Input data
|
7 |
data_full = [
|
8 |
-
["CultriX/Qwen2.5-14B-SLERPv7", 0.7205, 0.8272, 0.7541, 0.6581, 0.5000, 0.7290],
|
9 |
-
["djuna/Q2.5-Veltha-14B-0.5", 0.7492, 0.8386, 0.7305, 0.5980, 0.4300, 0.7817],
|
10 |
-
|
11 |
-
["CultriX/Qwen2.5-14B-MultiCultyv2", 0.7295, 0.8359, 0.7363, 0.5767, 0.4400, 0.7316],
|
12 |
-
["CultriX/Qwen2.5-14B-Brocav7", 0.7445, 0.8353, 0.7508, 0.6292, 0.4600, 0.7629],
|
13 |
-
["CultriX/Qwen2.5-14B-Broca", 0.7456, 0.8352, 0.7480, 0.6034, 0.4400, 0.7716],
|
14 |
-
["CultriX/Qwen2.5-14B-Brocav3", 0.7395, 0.8388, 0.7393, 0.6405, 0.4700, 0.7659],
|
15 |
-
["CultriX/Qwen2.5-14B-Brocav4", 0.7432, 0.8377, 0.7444, 0.6277, 0.4800, 0.7580],
|
16 |
-
["CultriX/Qwen2.5-14B-Brocav2", 0.7492, 0.8302, 0.7508, 0.6377, 0.5100, 0.7478],
|
17 |
-
["CultriX/Qwen2.5-14B-Brocav5", 0.7445, 0.8313, 0.7547, 0.6376, 0.5000, 0.7304],
|
18 |
-
["CultriX/Qwen2.5-14B-Brocav6", 0.7179, 0.8354, 0.7531, 0.6378, 0.4900, 0.7524],
|
19 |
-
["CultriX/Qwenfinity-2.5-14B", 0.7347, 0.8254, 0.7279, 0.7267, 0.5600, 0.6970],
|
20 |
-
["CultriX/Qwen2.5-14B-Emergedv2", 0.7137, 0.8335, 0.7363, 0.5836, 0.4400, 0.7344],
|
21 |
-
["CultriX/Qwen2.5-14B-Unity", 0.7063, 0.8343, 0.7423, 0.6820, 0.5700, 0.7498],
|
22 |
-
["CultriX/Qwen2.5-14B-MultiCultyv3", 0.7132, 0.8216, 0.7395, 0.6792, 0.5500, 0.7120],
|
23 |
-
["CultriX/Qwen2.5-14B-Emergedv3", 0.7436, 0.8312, 0.7519, 0.6585, 0.5500, 0.7068],
|
24 |
-
["CultriX/SeQwence-14Bv1", 0.7278, 0.8410, 0.7541, 0.6816, 0.5200, 0.7539],
|
25 |
-
["CultriX/Qwen2.5-14B-Wernickev2", 0.7391, 0.8168, 0.7273, 0.6220, 0.4500, 0.7572],
|
26 |
-
["CultriX/Qwen2.5-14B-Wernickev3", 0.7357, 0.8148, 0.7245, 0.7023, 0.5500, 0.7869],
|
27 |
-
["CultriX/Qwen2.5-14B-Wernickev4", 0.7355, 0.8290, 0.7497, 0.6306, 0.4800, 0.7635],
|
28 |
-
["CultriX/SeQwential-14B-v1", 0.7355, 0.8205, 0.7549, 0.6367, 0.4800, 0.7626],
|
29 |
-
["CultriX/Qwen2.5-14B-Wernickev5", 0.7224, 0.8272, 0.7541, 0.6790, 0.5100, 0.7578],
|
30 |
-
["CultriX/Qwen2.5-14B-Wernickev6", 0.6994, 0.7549, 0.5816, 0.6991, 0.5800, 0.7267],
|
31 |
-
["CultriX/Qwen2.5-14B-Wernickev7", 0.7147, 0.7599, 0.6097, 0.7056, 0.5700, 0.7164],
|
32 |
-
["CultriX/Qwen2.5-14B-FinalMerge-tmp2", 0.7255, 0.8192, 0.7535, 0.6671, 0.5000, 0.7612],
|
33 |
]
|
34 |
|
35 |
-
columns = ["Model Configuration", "tinyArc", "tinyHellaswag", "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"]
|
36 |
|
37 |
# Convert to DataFrame
|
38 |
df_full = pd.DataFrame(data_full, columns=columns)
|
39 |
|
40 |
-
|
41 |
def plot_average_scores():
|
42 |
-
df_full["Average Score"] = df_full.iloc[:,
|
43 |
df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False)
|
44 |
|
45 |
plt.figure(figsize=(12, 8))
|
@@ -54,7 +34,7 @@ def plot_average_scores():
|
|
54 |
return "average_performance.png"
|
55 |
|
56 |
def plot_task_performance():
|
57 |
-
df_full_melted = df_full.melt(id_vars="Model Configuration", var_name="Task", value_name="Score")
|
58 |
|
59 |
plt.figure(figsize=(14, 10))
|
60 |
for model in df_full["Model Configuration"]:
|
@@ -72,8 +52,8 @@ def plot_task_performance():
|
|
72 |
return "task_performance.png"
|
73 |
|
74 |
def plot_task_specific_top_models():
|
75 |
-
top_models = df_full.iloc[:,
|
76 |
-
top_scores = df_full.iloc[:,
|
77 |
|
78 |
results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"})
|
79 |
|
@@ -87,52 +67,32 @@ def plot_task_specific_top_models():
|
|
87 |
plt.savefig("task_specific_top_models.png")
|
88 |
return "task_specific_top_models.png"
|
89 |
|
90 |
-
def
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
def summary_statistics():
|
105 |
-
stats = df_full.iloc[:, 1:].describe().T # Summary stats for each task
|
106 |
-
stats['Std Dev'] = df_full.iloc[:, 1:].std(axis=0)
|
107 |
-
return stats.reset_index()
|
108 |
-
|
109 |
-
def plot_distribution_boxplots():
|
110 |
-
plt.figure(figsize=(14, 8))
|
111 |
-
df_melted = df_full.melt(id_vars="Model Configuration", var_name="Task", value_name="Score")
|
112 |
-
sns.boxplot(x="Task", y="Score", data=df_melted)
|
113 |
-
plt.title("Score Distribution by Task", fontsize=16)
|
114 |
-
plt.xlabel("Task", fontsize=14)
|
115 |
-
plt.ylabel("Score", fontsize=14)
|
116 |
-
plt.grid(axis='y', linestyle='--', alpha=0.7)
|
117 |
-
plt.tight_layout()
|
118 |
-
plt.savefig("distribution_boxplots.png")
|
119 |
-
return "distribution_boxplots.png"
|
120 |
-
|
121 |
-
def best_overall_model():
|
122 |
-
df_full["Average Score"] = df_full.iloc[:, 1:].mean(axis=1)
|
123 |
-
best_model = df_full.loc[df_full["Average Score"].idxmax()]
|
124 |
-
return best_model
|
125 |
|
126 |
def plot_heatmap():
|
127 |
plt.figure(figsize=(12, 8))
|
128 |
-
sns.heatmap(df_full.iloc[:,
|
129 |
plt.title("Performance Heatmap", fontsize=16)
|
130 |
plt.tight_layout()
|
131 |
plt.savefig("performance_heatmap.png")
|
132 |
return "performance_heatmap.png"
|
133 |
|
|
|
134 |
with gr.Blocks() as demo:
|
135 |
-
gr.Markdown("# Model Performance Analysis")
|
136 |
|
137 |
with gr.Row():
|
138 |
btn1 = gr.Button("Show Average Performance")
|
@@ -149,29 +109,15 @@ with gr.Blocks() as demo:
|
|
149 |
img3 = gr.Image(type="filepath")
|
150 |
btn3.click(plot_task_specific_top_models, outputs=img3)
|
151 |
|
152 |
-
with gr.Row():
|
153 |
-
btn4 = gr.Button("Top 3 Models Per Task")
|
154 |
-
output4 = gr.Dataframe()
|
155 |
-
btn4.click(top_3_models_per_task, outputs=output4)
|
156 |
-
|
157 |
-
with gr.Row():
|
158 |
-
btn1 = gr.Button("Show Summary Statistics")
|
159 |
-
stats_output = gr.Dataframe()
|
160 |
-
btn1.click(summary_statistics, outputs=stats_output)
|
161 |
-
|
162 |
-
with gr.Row():
|
163 |
-
btn2 = gr.Button("Plot Score Distributions")
|
164 |
-
dist_img = gr.Image(type="filepath")
|
165 |
-
btn2.click(plot_distribution_boxplots, outputs=dist_img)
|
166 |
-
|
167 |
-
with gr.Row():
|
168 |
-
btn3 = gr.Button("Best Overall Model")
|
169 |
-
best_output = gr.Textbox()
|
170 |
-
btn3.click(best_overall_model, outputs=best_output)
|
171 |
-
|
172 |
with gr.Row():
|
173 |
btn4 = gr.Button("Plot Performance Heatmap")
|
174 |
heatmap_img = gr.Image(type="filepath")
|
175 |
btn4.click(plot_heatmap, outputs=heatmap_img)
|
176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
demo.launch()
|
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
import seaborn as sns
|
4 |
import gradio as gr
|
5 |
+
import requests
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
|
8 |
+
# Input data with links to Hugging Face repositories
|
9 |
data_full = [
|
10 |
+
["CultriX/Qwen2.5-14B-SLERPv7", "https://huggingface.co/CultriX/Qwen2.5-14B-SLERPv7", 0.7205, 0.8272, 0.7541, 0.6581, 0.5000, 0.7290],
|
11 |
+
["djuna/Q2.5-Veltha-14B-0.5", "https://huggingface.co/djuna/Q2.5-Veltha-14B-0.5", 0.7492, 0.8386, 0.7305, 0.5980, 0.4300, 0.7817],
|
12 |
+
# Add links for other models...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
]
|
14 |
|
15 |
+
columns = ["Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"]
|
16 |
|
17 |
# Convert to DataFrame
|
18 |
df_full = pd.DataFrame(data_full, columns=columns)
|
19 |
|
20 |
+
# Visualization and analytics functions
|
21 |
def plot_average_scores():
|
22 |
+
df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1)
|
23 |
df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False)
|
24 |
|
25 |
plt.figure(figsize=(12, 8))
|
|
|
34 |
return "average_performance.png"
|
35 |
|
36 |
def plot_task_performance():
|
37 |
+
df_full_melted = df_full.melt(id_vars=["Model Configuration", "Model Link"], var_name="Task", value_name="Score")
|
38 |
|
39 |
plt.figure(figsize=(14, 10))
|
40 |
for model in df_full["Model Configuration"]:
|
|
|
52 |
return "task_performance.png"
|
53 |
|
54 |
def plot_task_specific_top_models():
|
55 |
+
top_models = df_full.iloc[:, 2:].idxmax()
|
56 |
+
top_scores = df_full.iloc[:, 2:].max()
|
57 |
|
58 |
results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"})
|
59 |
|
|
|
67 |
plt.savefig("task_specific_top_models.png")
|
68 |
return "task_specific_top_models.png"
|
69 |
|
70 |
+
def scrape_mergekit_config(model_name):
|
71 |
+
"""
|
72 |
+
Scrapes the Hugging Face model page for YAML configuration.
|
73 |
+
"""
|
74 |
+
model_link = df_full.loc[df_full["Model Configuration"] == model_name, "Model Link"].values[0]
|
75 |
+
response = requests.get(model_link)
|
76 |
+
if response.status_code != 200:
|
77 |
+
return f"Failed to fetch model page for {model_name}. Please check the link."
|
78 |
+
|
79 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
80 |
+
yaml_config = soup.find("pre") # Assume YAML is in <pre> tags
|
81 |
+
if yaml_config:
|
82 |
+
return yaml_config.text.strip()
|
83 |
+
return f"No YAML configuration found for {model_name}."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
def plot_heatmap():
|
86 |
plt.figure(figsize=(12, 8))
|
87 |
+
sns.heatmap(df_full.iloc[:, 2:], annot=True, cmap="YlGnBu", xticklabels=columns[2:], yticklabels=df_full["Model Configuration"])
|
88 |
plt.title("Performance Heatmap", fontsize=16)
|
89 |
plt.tight_layout()
|
90 |
plt.savefig("performance_heatmap.png")
|
91 |
return "performance_heatmap.png"
|
92 |
|
93 |
+
# Gradio app
|
94 |
with gr.Blocks() as demo:
|
95 |
+
gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links")
|
96 |
|
97 |
with gr.Row():
|
98 |
btn1 = gr.Button("Show Average Performance")
|
|
|
109 |
img3 = gr.Image(type="filepath")
|
110 |
btn3.click(plot_task_specific_top_models, outputs=img3)
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
with gr.Row():
|
113 |
btn4 = gr.Button("Plot Performance Heatmap")
|
114 |
heatmap_img = gr.Image(type="filepath")
|
115 |
btn4.click(plot_heatmap, outputs=heatmap_img)
|
116 |
|
117 |
+
with gr.Row():
|
118 |
+
model_selector = gr.Dropdown(choices=df_full["Model Configuration"].tolist(), label="Select a Model")
|
119 |
+
scrape_btn = gr.Button("Scrape MergeKit Configuration")
|
120 |
+
yaml_output = gr.Textbox(lines=10, placeholder="YAML Configuration will appear here.")
|
121 |
+
scrape_btn.click(scrape_mergekit_config, inputs=model_selector, outputs=yaml_output)
|
122 |
+
|
123 |
demo.launch()
|