CultriX commited on
Commit
b61d487
·
verified ·
1 Parent(s): 010c7bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -39
app.py CHANGED
@@ -1,6 +1,3 @@
1
- # Comprehensive Model Performance Analysis
2
-
3
- # Importing Required Libraries
4
  import pandas as pd
5
  import matplotlib.pyplot as plt
6
  import seaborn as sns
@@ -13,8 +10,8 @@ import base64
13
  import zipfile
14
  from PIL import Image
15
  from io import BytesIO
 
16
 
17
- # Input Data
18
  # Input data with links to Hugging Face repositories
19
  data_full = [
20
  ['CultriX/Qwen2.5-14B-SLERPv7', 'https://huggingface.co/CultriX/Qwen2.5-14B-SLERPv7', 0.7205, 0.8272, 0.7541, 0.6581, 0.5, 0.729],
@@ -43,14 +40,17 @@ data_full = [
43
  ['CultriX/Qwen2.5-14B-Wernickev7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev7', 0.7147, 0.7599, 0.6097, 0.7056, 0.57, 0.7164],
44
  ['CultriX/Qwen2.5-14B-FinalMerge-tmp2', 'https://huggingface.co/CultriX/Qwen2.5-14B-FinalMerge-tmp2', 0.7255, 0.8192, 0.7535, 0.6671, 0.5, 0.7612],
45
  ]
 
46
  columns = ["Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"]
 
 
47
  df_full = pd.DataFrame(data_full, columns=columns)
48
 
49
- # Visualization and Analytics Functions
50
- # 1. Plot Average Scores
51
  def plot_average_scores():
52
  df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1)
53
  df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False)
 
54
  plt.figure(figsize=(12, 8))
55
  plt.barh(df_avg_sorted["Model Configuration"], df_avg_sorted["Average Score"])
56
  plt.title("Average Performance of Models Across Tasks", fontsize=16)
@@ -59,15 +59,27 @@ def plot_average_scores():
59
  plt.gca().invert_yaxis()
60
  plt.grid(axis='x', linestyle='--', alpha=0.7)
61
  plt.tight_layout()
62
- plt.show()
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- # 2. Plot Task Performance
65
  def plot_task_performance():
66
  df_full_melted = df_full.melt(id_vars=["Model Configuration", "Model Link"], var_name="Task", value_name="Score")
 
67
  plt.figure(figsize=(14, 10))
68
  for model in df_full["Model Configuration"]:
69
  model_data = df_full_melted[df_full_melted["Model Configuration"] == model]
70
  plt.plot(model_data["Task"], model_data["Score"], marker="o", label=model)
 
71
  plt.title("Performance of All Models Across Tasks", fontsize=16)
72
  plt.xlabel("Task", fontsize=14)
73
  plt.ylabel("Score", fontsize=14)
@@ -75,13 +87,24 @@ def plot_task_performance():
75
  plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9)
76
  plt.grid(axis='y', linestyle='--', alpha=0.7)
77
  plt.tight_layout()
78
- plt.show()
 
 
 
 
 
 
 
 
 
 
79
 
80
- # 3. Plot Task-Specific Top Models
81
  def plot_task_specific_top_models():
82
  top_models = df_full.iloc[:, 2:].idxmax()
83
  top_scores = df_full.iloc[:, 2:].max()
 
84
  results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"})
 
85
  plt.figure(figsize=(12, 6))
86
  plt.bar(results["Task"], results["Score"])
87
  plt.title("Task-Specific Top Models", fontsize=16)
@@ -89,68 +112,192 @@ def plot_task_specific_top_models():
89
  plt.ylabel("Score", fontsize=14)
90
  plt.grid(axis="y", linestyle="--", alpha=0.7)
91
  plt.tight_layout()
92
- plt.show()
93
 
94
- # YAML Configuration and Scraping Utilities
95
- # 1. Scrape MergeKit Configuration
 
 
 
 
 
 
 
 
96
  def scrape_mergekit_config(model_name):
 
 
 
97
  model_link = df_full.loc[df_full["Model Configuration"] == model_name, "Model Link"].values[0]
98
  response = requests.get(model_link)
99
  if response.status_code != 200:
100
  return f"Failed to fetch model page for {model_name}. Please check the link."
 
101
  soup = BeautifulSoup(response.text, "html.parser")
102
- yaml_config = soup.find("pre")
103
- return yaml_config.text.strip() if yaml_config else f"No YAML configuration found for {model_name}."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- # 2. Download All Data
106
  def download_all_data():
 
107
  csv_buffer = io.StringIO()
108
  df_full.to_csv(csv_buffer, index=False)
109
  csv_data = csv_buffer.getvalue().encode('utf-8')
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  zip_buffer = io.BytesIO()
112
  with zipfile.ZipFile(zip_buffer, 'w') as zf:
113
  zf.writestr("model_scores.csv", csv_data)
114
- zip_buffer.seek(0)
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  return zip_buffer, "analysis_data.zip"
117
 
118
- # Performance Heatmap
119
- def plot_heatmap():
120
- plt.figure(figsize=(12, 8))
121
- sns.heatmap(df_full.iloc[:, 2:], annot=True, cmap="YlGnBu", xticklabels=columns[2:], yticklabels=df_full["Model Configuration"])
122
- plt.title("Performance Heatmap", fontsize=16)
123
- plt.tight_layout()
124
- plt.show()
 
 
 
 
125
 
126
- # Gradio App
127
- # Building the Interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  with gr.Blocks() as demo:
129
  gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links")
130
 
131
  with gr.Row():
132
- btn1 = gr.Button("Show Average Performance")
133
- img1 = gr.Image(type="pil", label="Average Performance Plot")
134
- btn1.click(plot_average_scores, outputs=[img1])
135
-
 
 
 
 
136
  with gr.Row():
137
- btn2 = gr.Button("Show Task Performance")
138
- img2 = gr.Image(type="pil", label="Task Performance Plot")
139
- btn2.click(plot_task_performance, outputs=[img2])
 
 
 
 
140
 
141
  with gr.Row():
142
- btn3 = gr.Button("Task-Specific Top Models")
143
- img3 = gr.Image(type="pil", label="Task-Specific Top Models Plot")
144
- btn3.click(plot_task_specific_top_models, outputs=[img3])
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  with gr.Row():
147
- btn4 = gr.Button("Plot Performance Heatmap")
148
- img4 = gr.Image(type="pil", label="Performance Heatmap")
149
- btn4.click(plot_heatmap, outputs=[img4])
 
 
 
 
 
 
 
150
 
151
  with gr.Row():
152
  download_all_btn = gr.Button("Download Everything")
153
  all_downloads = gr.File(label="Download All Data")
154
  download_all_btn.click(download_all_data, outputs=all_downloads)
 
 
 
 
 
 
 
 
155
 
156
  demo.launch()
 
 
 
 
1
  import pandas as pd
2
  import matplotlib.pyplot as plt
3
  import seaborn as sns
 
10
  import zipfile
11
  from PIL import Image
12
  from io import BytesIO
13
+ import tempfile
14
 
 
15
  # Input data with links to Hugging Face repositories
16
  data_full = [
17
  ['CultriX/Qwen2.5-14B-SLERPv7', 'https://huggingface.co/CultriX/Qwen2.5-14B-SLERPv7', 0.7205, 0.8272, 0.7541, 0.6581, 0.5, 0.729],
 
40
  ['CultriX/Qwen2.5-14B-Wernickev7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev7', 0.7147, 0.7599, 0.6097, 0.7056, 0.57, 0.7164],
41
  ['CultriX/Qwen2.5-14B-FinalMerge-tmp2', 'https://huggingface.co/CultriX/Qwen2.5-14B-FinalMerge-tmp2', 0.7255, 0.8192, 0.7535, 0.6671, 0.5, 0.7612],
42
  ]
43
+
44
  columns = ["Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"]
45
+
46
+ # Convert to DataFrame
47
  df_full = pd.DataFrame(data_full, columns=columns)
48
 
49
+ # Visualization and analytics functions
 
50
  def plot_average_scores():
51
  df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1)
52
  df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False)
53
+
54
  plt.figure(figsize=(12, 8))
55
  plt.barh(df_avg_sorted["Model Configuration"], df_avg_sorted["Average Score"])
56
  plt.title("Average Performance of Models Across Tasks", fontsize=16)
 
59
  plt.gca().invert_yaxis()
60
  plt.grid(axis='x', linestyle='--', alpha=0.7)
61
  plt.tight_layout()
62
+
63
+ img_buffer = io.BytesIO()
64
+ plt.savefig(img_buffer, format='png')
65
+ img_buffer.seek(0)
66
+ img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8')
67
+ plt.close()
68
+
69
+ pil_image = Image.open(BytesIO(base64.b64decode(img_base64)))
70
+
71
+ temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
72
+ pil_image.save(temp_image_file.name)
73
+ return pil_image, temp_image_file.name
74
 
 
75
  def plot_task_performance():
76
  df_full_melted = df_full.melt(id_vars=["Model Configuration", "Model Link"], var_name="Task", value_name="Score")
77
+
78
  plt.figure(figsize=(14, 10))
79
  for model in df_full["Model Configuration"]:
80
  model_data = df_full_melted[df_full_melted["Model Configuration"] == model]
81
  plt.plot(model_data["Task"], model_data["Score"], marker="o", label=model)
82
+
83
  plt.title("Performance of All Models Across Tasks", fontsize=16)
84
  plt.xlabel("Task", fontsize=14)
85
  plt.ylabel("Score", fontsize=14)
 
87
  plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9)
88
  plt.grid(axis='y', linestyle='--', alpha=0.7)
89
  plt.tight_layout()
90
+
91
+ img_buffer = io.BytesIO()
92
+ plt.savefig(img_buffer, format='png')
93
+ img_buffer.seek(0)
94
+ img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8')
95
+ plt.close()
96
+
97
+ pil_image = Image.open(BytesIO(base64.b64decode(img_base64)))
98
+ temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
99
+ pil_image.save(temp_image_file.name)
100
+ return pil_image, temp_image_file.name
101
 
 
102
  def plot_task_specific_top_models():
103
  top_models = df_full.iloc[:, 2:].idxmax()
104
  top_scores = df_full.iloc[:, 2:].max()
105
+
106
  results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"})
107
+
108
  plt.figure(figsize=(12, 6))
109
  plt.bar(results["Task"], results["Score"])
110
  plt.title("Task-Specific Top Models", fontsize=16)
 
112
  plt.ylabel("Score", fontsize=14)
113
  plt.grid(axis="y", linestyle="--", alpha=0.7)
114
  plt.tight_layout()
 
115
 
116
+ img_buffer = io.BytesIO()
117
+ plt.savefig(img_buffer, format='png')
118
+ img_buffer.seek(0)
119
+ img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8')
120
+ plt.close()
121
+ pil_image = Image.open(BytesIO(base64.b64decode(img_base64)))
122
+ temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
123
+ pil_image.save(temp_image_file.name)
124
+ return pil_image, temp_image_file.name
125
+
126
  def scrape_mergekit_config(model_name):
127
+ """
128
+ Scrapes the Hugging Face model page for YAML configuration.
129
+ """
130
  model_link = df_full.loc[df_full["Model Configuration"] == model_name, "Model Link"].values[0]
131
  response = requests.get(model_link)
132
  if response.status_code != 200:
133
  return f"Failed to fetch model page for {model_name}. Please check the link."
134
+
135
  soup = BeautifulSoup(response.text, "html.parser")
136
+ yaml_config = soup.find("pre") # Assume YAML is in <pre> tags
137
+ if yaml_config:
138
+ return yaml_config.text.strip()
139
+ return f"No YAML configuration found for {model_name}."
140
+
141
+ def plot_heatmap():
142
+ plt.figure(figsize=(12, 8))
143
+ sns.heatmap(df_full.iloc[:, 2:], annot=True, cmap="YlGnBu", xticklabels=columns[2:], yticklabels=df_full["Model Configuration"])
144
+ plt.title("Performance Heatmap", fontsize=16)
145
+ plt.tight_layout()
146
+
147
+ img_buffer = io.BytesIO()
148
+ plt.savefig(img_buffer, format='png')
149
+ img_buffer.seek(0)
150
+ img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8')
151
+ plt.close()
152
+ pil_image = Image.open(BytesIO(base64.b64decode(img_base64)))
153
+ temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
154
+ pil_image.save(temp_image_file.name)
155
+ return pil_image, temp_image_file.name
156
+
157
+ def download_yaml(yaml_content, model_name):
158
+ """
159
+ Generates a downloadable link for the scraped YAML content.
160
+ """
161
+ if "No YAML configuration found" in yaml_content or "Failed to fetch model page" in yaml_content:
162
+ return None # Do not return a link if there's no config or a fetch error
163
+
164
+ filename = f"{model_name.replace('/', '_')}_config.yaml"
165
+ return gr.File(value=yaml_content.encode(), filename=filename)
166
 
 
167
  def download_all_data():
168
+ # Prepare data to download
169
  csv_buffer = io.StringIO()
170
  df_full.to_csv(csv_buffer, index=False)
171
  csv_data = csv_buffer.getvalue().encode('utf-8')
172
+
173
+ # Prepare all plots
174
+ average_plot_pil, average_plot_name = plot_average_scores()
175
+ task_plot_pil, task_plot_name = plot_task_performance()
176
+ top_models_plot_pil, top_models_plot_name = plot_task_specific_top_models()
177
+ heatmap_plot_pil, heatmap_plot_name = plot_heatmap()
178
+
179
+ plot_dict = {
180
+ "average_performance": (average_plot_pil, average_plot_name),
181
+ "task_performance": (task_plot_pil, task_plot_name),
182
+ "top_models": (top_models_plot_pil, top_models_plot_name),
183
+ "heatmap": (heatmap_plot_pil, heatmap_plot_name)
184
+ }
185
 
186
  zip_buffer = io.BytesIO()
187
  with zipfile.ZipFile(zip_buffer, 'w') as zf:
188
  zf.writestr("model_scores.csv", csv_data)
 
189
 
190
+ for name, (pil_image, filename) in plot_dict.items():
191
+ image_bytes = io.BytesIO()
192
+ pil_image.save(image_bytes, format='PNG')
193
+ image_bytes.seek(0)
194
+ zf.writestr(filename, image_bytes.read())
195
+
196
+ for model_name in df_full["Model Configuration"].to_list():
197
+ yaml_content = scrape_mergekit_config(model_name)
198
+ if "No YAML configuration found" not in yaml_content and "Failed to fetch model page" not in yaml_content:
199
+ zf.writestr(f"{model_name.replace('/', '_')}_config.yaml", yaml_content.encode())
200
+
201
+ zip_buffer.seek(0)
202
+
203
  return zip_buffer, "analysis_data.zip"
204
 
205
+ def scrape_model_page(model_url):
206
+ """
207
+ Scrapes the Hugging Face model page for YAML configuration and other details.
208
+ """
209
+ try:
210
+ # Fetch the model page
211
+ response = requests.get(model_url)
212
+ if response.status_code != 200:
213
+ return f"Error: Unable to fetch the page (Status Code: {response.status_code})"
214
+
215
+ soup = BeautifulSoup(response.text, "html.parser")
216
 
217
+ # Extract YAML configuration (usually inside <pre> tags)
218
+ yaml_config = soup.find("pre")
219
+ yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found."
220
+
221
+ # Extract additional metadata or performance (if available)
222
+ metadata_section = soup.find("div", class_="metadata")
223
+ metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found."
224
+
225
+ # Return the scraped details
226
+ return f"**YAML Configuration:**\n{yaml_text}\n\n**Metadata:**\n{metadata_text}"
227
+
228
+ except Exception as e:
229
+ return f"Error: {str(e)}"
230
+
231
+ def display_scraped_model_data(model_url):
232
+ """
233
+ Displays YAML configuration and metadata for a given model URL.
234
+ """
235
+ return scrape_model_page(model_url)
236
+
237
+
238
+ # Gradio app
239
  with gr.Blocks() as demo:
240
  gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links")
241
 
242
  with gr.Row():
243
+ with gr.Column(width=3):
244
+ btn1 = gr.Button("Show Average Performance")
245
+ with gr.Column(width=7):
246
+ img1 = gr.Image(type="pil", label="Average Performance Plot")
247
+ with gr.Column(width=2):
248
+ img1_download = gr.File(label="Download Average Performance")
249
+ btn1.click(plot_average_scores, outputs=[img1,img1_download])
250
+
251
  with gr.Row():
252
+ with gr.Column(width=3):
253
+ btn2 = gr.Button("Show Task Performance")
254
+ with gr.Column(width=7):
255
+ img2 = gr.Image(type="pil", label="Task Performance Plot")
256
+ with gr.Column(width=2):
257
+ img2_download = gr.File(label="Download Task Performance")
258
+ btn2.click(plot_task_performance, outputs=[img2, img2_download])
259
 
260
  with gr.Row():
261
+ with gr.Column(width=3):
262
+ btn3 = gr.Button("Task-Specific Top Models")
263
+ with gr.Column(width=7):
264
+ img3 = gr.Image(type="pil", label="Task-Specific Top Models Plot")
265
+ with gr.Column(width=2):
266
+ img3_download = gr.File(label="Download Top Models")
267
+ btn3.click(plot_task_specific_top_models, outputs=[img3, img3_download])
268
+
269
+ with gr.Row():
270
+ with gr.Column(width=3):
271
+ btn4 = gr.Button("Plot Performance Heatmap")
272
+ with gr.Column(width=7):
273
+ heatmap_img = gr.Image(type="pil", label="Performance Heatmap")
274
+ with gr.Column(width=2):
275
+ heatmap_download = gr.File(label="Download Heatmap")
276
+ btn4.click(plot_heatmap, outputs=[heatmap_img, heatmap_download])
277
 
278
  with gr.Row():
279
+ model_selector = gr.Dropdown(choices=df_full["Model Configuration"].tolist(), label="Select a Model")
280
+ with gr.Column():
281
+ scrape_btn = gr.Button("Scrape MergeKit Configuration")
282
+ yaml_output = gr.Textbox(lines=10, placeholder="YAML Configuration will appear here.")
283
+ scrape_btn.click(scrape_mergekit_config, inputs=model_selector, outputs=yaml_output)
284
+ with gr.Column():
285
+ save_yaml_btn = gr.Button("Save MergeKit Configuration")
286
+ yaml_download = gr.File(label="Download MergeKit Configuration")
287
+ save_yaml_btn.click(download_yaml, inputs=[yaml_output, model_selector], outputs=yaml_download)
288
+
289
 
290
  with gr.Row():
291
  download_all_btn = gr.Button("Download Everything")
292
  all_downloads = gr.File(label="Download All Data")
293
  download_all_btn.click(download_all_data, outputs=all_downloads)
294
+
295
+ # Live scraping feature
296
+ gr.Markdown("## Live Scraping Features")
297
+ with gr.Row():
298
+ url_input = gr.Textbox(label="Enter Hugging Face Model URL", placeholder="https://huggingface.co/<model>")
299
+ live_scrape_btn = gr.Button("Scrape Model Page")
300
+ live_scrape_output = gr.Textbox(label="Scraped Data", lines=15)
301
+ live_scrape_btn.click(display_scraped_model_data, inputs=url_input, outputs=live_scrape_output)
302
 
303
  demo.launch()