Spaces:
Running
Running
import pandas as pd | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
import gradio as gr | |
import requests | |
from bs4 import BeautifulSoup | |
# Input data with links to Hugging Face repositories | |
data_full = [ | |
["CultriX/Qwen2.5-14B-SLERPv7", "https://huggingface.co./CultriX/Qwen2.5-14B-SLERPv7", 0.7205, 0.8272, 0.7541, 0.6581, 0.5000, 0.7290], | |
["djuna/Q2.5-Veltha-14B-0.5", "https://huggingface.co./djuna/Q2.5-Veltha-14B-0.5", 0.7492, 0.8386, 0.7305, 0.5980, 0.4300, 0.7817], | |
# Add links for other models... | |
] | |
columns = ["Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"] | |
# Convert to DataFrame | |
df_full = pd.DataFrame(data_full, columns=columns) | |
# Visualization and analytics functions | |
def plot_average_scores(): | |
df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1) | |
df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False) | |
plt.figure(figsize=(12, 8)) | |
plt.barh(df_avg_sorted["Model Configuration"], df_avg_sorted["Average Score"]) | |
plt.title("Average Performance of Models Across Tasks", fontsize=16) | |
plt.xlabel("Average Score", fontsize=14) | |
plt.ylabel("Model Configuration", fontsize=14) | |
plt.gca().invert_yaxis() | |
plt.grid(axis='x', linestyle='--', alpha=0.7) | |
plt.tight_layout() | |
plt.savefig("average_performance.png") | |
return "average_performance.png" | |
def plot_task_performance(): | |
df_full_melted = df_full.melt(id_vars=["Model Configuration", "Model Link"], var_name="Task", value_name="Score") | |
plt.figure(figsize=(14, 10)) | |
for model in df_full["Model Configuration"]: | |
model_data = df_full_melted[df_full_melted["Model Configuration"] == model] | |
plt.plot(model_data["Task"], model_data["Score"], marker="o", label=model) | |
plt.title("Performance of All Models Across Tasks", fontsize=16) | |
plt.xlabel("Task", fontsize=14) | |
plt.ylabel("Score", fontsize=14) | |
plt.xticks(rotation=45) | |
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9) | |
plt.grid(axis='y', linestyle='--', alpha=0.7) | |
plt.tight_layout() | |
plt.savefig("task_performance.png") | |
return "task_performance.png" | |
def plot_task_specific_top_models(): | |
top_models = df_full.iloc[:, 2:].idxmax() | |
top_scores = df_full.iloc[:, 2:].max() | |
results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"}) | |
plt.figure(figsize=(12, 6)) | |
plt.bar(results["Task"], results["Score"]) | |
plt.title("Task-Specific Top Models", fontsize=16) | |
plt.xlabel("Task", fontsize=14) | |
plt.ylabel("Score", fontsize=14) | |
plt.grid(axis="y", linestyle="--", alpha=0.7) | |
plt.tight_layout() | |
plt.savefig("task_specific_top_models.png") | |
return "task_specific_top_models.png" | |
def scrape_mergekit_config(model_name): | |
""" | |
Scrapes the Hugging Face model page for YAML configuration. | |
""" | |
model_link = df_full.loc[df_full["Model Configuration"] == model_name, "Model Link"].values[0] | |
response = requests.get(model_link) | |
if response.status_code != 200: | |
return f"Failed to fetch model page for {model_name}. Please check the link." | |
soup = BeautifulSoup(response.text, "html.parser") | |
yaml_config = soup.find("pre") # Assume YAML is in <pre> tags | |
if yaml_config: | |
return yaml_config.text.strip() | |
return f"No YAML configuration found for {model_name}." | |
def plot_heatmap(): | |
plt.figure(figsize=(12, 8)) | |
sns.heatmap(df_full.iloc[:, 2:], annot=True, cmap="YlGnBu", xticklabels=columns[2:], yticklabels=df_full["Model Configuration"]) | |
plt.title("Performance Heatmap", fontsize=16) | |
plt.tight_layout() | |
plt.savefig("performance_heatmap.png") | |
return "performance_heatmap.png" | |
# Gradio app | |
with gr.Blocks() as demo: | |
gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links") | |
with gr.Row(): | |
btn1 = gr.Button("Show Average Performance") | |
img1 = gr.Image(type="filepath") | |
btn1.click(plot_average_scores, outputs=img1) | |
with gr.Row(): | |
btn2 = gr.Button("Show Task Performance") | |
img2 = gr.Image(type="filepath") | |
btn2.click(plot_task_performance, outputs=img2) | |
with gr.Row(): | |
btn3 = gr.Button("Task-Specific Top Models") | |
img3 = gr.Image(type="filepath") | |
btn3.click(plot_task_specific_top_models, outputs=img3) | |
with gr.Row(): | |
btn4 = gr.Button("Plot Performance Heatmap") | |
heatmap_img = gr.Image(type="filepath") | |
btn4.click(plot_heatmap, outputs=heatmap_img) | |
with gr.Row(): | |
model_selector = gr.Dropdown(choices=df_full["Model Configuration"].tolist(), label="Select a Model") | |
scrape_btn = gr.Button("Scrape MergeKit Configuration") | |
yaml_output = gr.Textbox(lines=10, placeholder="YAML Configuration will appear here.") | |
scrape_btn.click(scrape_mergekit_config, inputs=model_selector, outputs=yaml_output) | |
demo.launch() | |