File size: 5,105 Bytes
e2e6875 25557b5 05c90f4 6679087 25557b5 38f4369 ddc25db 38f4369 25557b5 c660995 25557b5 05c90f4 c660995 25557b5 05c90f4 6679087 05c90f4 6679087 e29ab28 6679087 81f1dd1 0d4c659 6679087 0d4c659 6679087 0d4c659 6679087 0d4c659 6679087 a0691fa 0d4c659 81f1dd1 6679087 5b4c5f8 25557b5 e611814 6679087 e611814 6679087 ddc25db 5b4c5f8 ddc25db e611814 2436603 6679087 05c90f4 6679087 fb9885c 6679087 25557b5 e611814 6679087 5b4c5f8 e611814 6679087 5b4c5f8 ddc25db 6679087 e611814 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import io
import json
import gradio as gr
import pandas as pd
from huggingface_hub import HfFileSystem
RESULTS_DATASET_ID = "datasets/open-llm-leaderboard/results"
EXCLUDED_KEYS = {
"pretty_env_info",
"chat_template",
"group_subtasks",
}
# EXCLUDED_RESULTS_KEYS = {
# "leaderboard",
# }
# EXCLUDED_RESULTS_LEADERBOARDS_KEYS = {
# "alias",
# }
TASKS = {
"leaderboard_arc_challenge": ("ARC", "leaderboard_arc_challenge"),
"leaderboard_bbh": ("BBH", "leaderboard_bbh"),
"leaderboard_gpqa": ("GPQA", "leaderboard_gpqa"),
"leaderboard_ifeval": ("IFEval", "leaderboard_ifeval"),
"leaderboard_math_hard": ("MATH", "leaderboard_math"),
"leaderboard_mmlu_pro": ("MMLU-Pro", "leaderboard_mmlu_pro"),
"leaderboard_musr": ("MuSR", "leaderboard_musr"),
}
fs = HfFileSystem()
def fetch_result_paths():
paths = fs.glob(f"{RESULTS_DATASET_ID}/**/**/*.json")
return paths
def filter_latest_result_path_per_model(paths):
from collections import defaultdict
d = defaultdict(list)
for path in paths:
model_id, _ = path[len(RESULTS_DATASET_ID) +1:].rsplit("/", 1)
d[model_id].append(path)
return {model_id: max(paths) for model_id, paths in d.items()}
def get_result_path_from_model(model_id, result_path_per_model):
return result_path_per_model[model_id]
def load_data(result_path) -> pd.DataFrame:
with fs.open(result_path, "r") as f:
data = json.load(f)
return data
def load_result_dataframe(model_id):
result_path = get_result_path_from_model(model_id, latest_result_path_per_model)
data = load_data(result_path)
model_name = data.get("model_name", "Model")
df = pd.json_normalize([{key: value for key, value in data.items() if key not in EXCLUDED_KEYS}])
# df.columns = df.columns.str.split(".") # .split return a list instead of a tuple
return df.set_index(pd.Index([model_name])).reset_index()
def display_results(df_1, df_2, task):
df = pd.concat([df.set_index("index") for df in [df_1, df_2] if "index" in df.columns])
df = df.T.rename_axis(columns=None)
return display_tab("results", df, task), display_tab("configs", df, task)
def display_tab(tab, df, task):
df = df.style.format(na_rep="")
df.hide(
[
row
for row in df.index
if (
not row.startswith(f"{tab}.")
or row.startswith(f"{tab}.leaderboard.")
or row.endswith(".alias")
or (not row.startswith(f"{tab}.{task}") if task != "All" else False)
)
],
axis="index",
)
start = len(f"{tab}.leaderboard_") if task == "All" else len(f"{tab}.{task} ")
df.format_index(lambda idx: idx[start:].removesuffix(",none"), axis="index")
return df.to_html()
def update_tasks(task):
return gr.Radio(
["All"] + list(TASKS.values()),
label="Tasks",
info="Evaluation tasks to be displayed",
value="All",
interactive=True,
)
# if __name__ == "__main__":
latest_result_path_per_model = filter_latest_result_path_per_model(fetch_result_paths())
with gr.Blocks(fill_height=True) as demo:
gr.HTML("<h1 style='text-align: center;'>Compare Results of the 🤗 Open LLM Leaderboard</h1>")
gr.HTML("<h3 style='text-align: center;'>Select 2 results to load and compare</h3>")
with gr.Row():
with gr.Column():
model_id_1 = gr.Dropdown(choices=list(latest_result_path_per_model.keys()), label="Results")
load_btn_1 = gr.Button("Load")
dataframe_1 = gr.Dataframe(visible=False)
with gr.Column():
model_id_2 = gr.Dropdown(choices=list(latest_result_path_per_model.keys()), label="Results")
load_btn_2 = gr.Button("Load")
dataframe_2 = gr.Dataframe(visible=False)
with gr.Row():
task = gr.Radio(
["All"] + list(TASKS.values()),
label="Tasks",
info="Evaluation tasks to be displayed",
value="All",
interactive=False,
)
with gr.Row():
# with gr.Tab("All"):
# pass
with gr.Tab("Results"):
results = gr.HTML()
with gr.Tab("Configs"):
configs = gr.HTML()
load_btn_1.click(
fn=load_result_dataframe,
inputs=model_id_1,
outputs=dataframe_1,
).then(
fn=display_results,
inputs=[dataframe_1, dataframe_2, task],
outputs=[results, configs],
).then(
fn=update_tasks,
inputs=task,
outputs=task,
)
load_btn_2.click(
fn=load_result_dataframe,
inputs=model_id_2,
outputs=dataframe_2,
).then(
fn=display_results,
inputs=[dataframe_1, dataframe_2, task],
outputs=[results, configs],
).then(
fn=update_tasks,
inputs=task,
outputs=task,
)
task.change(
fn=display_results,
inputs=[dataframe_1, dataframe_2, task],
outputs=[results, configs],
)
demo.launch()
|