Spaces:
Running
Running
File size: 10,320 Bytes
71cd8ad 90def7d 71cd8ad ead38c4 1f40cb7 71cd8ad 9afe185 4855a8c 9afe185 71cd8ad 5b9a852 cf16aef 970e6bf 5b9a852 baf0072 4855a8c 970e6bf fe96d5f cf16aef 1d59da3 5b9a852 aa53fd4 71cd8ad 96c4b54 f8f2c33 71cd8ad a648d00 71cd8ad 9afe185 71cd8ad 96c4b54 71cd8ad f8f2c33 15f31ff f8f2c33 8a6f9bb 96c4b54 8a6f9bb 96c4b54 157969e 96c4b54 2918f52 96c4b54 8a6f9bb 96c4b54 8a6f9bb 4005601 96c4b54 4005601 8a6f9bb 4005601 fae4e6a 4005601 8a6f9bb 96c4b54 8a6f9bb f8f2c33 71cd8ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns, SearchColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
import os, json
from src.envs import API
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
EVAL_COLS,
EVAL_TYPES,
AutoEvalColumn,
ModelType,
fields,
WeightType,
Precision
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception:
restart_space()
# LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
import jsonlines
# Initialize an empty list to store the JSON objects
json_list = []
# Open the JSONL file
with jsonlines.open('commit_results.jsonl') as reader:
for obj in reader:
# Append each JSON object to the list
json_list.append(obj)
# _test_data = pd.DataFrame({"Score": [54,46,53], "Name": ["MageBench", "MageBench", "MageBench"], "BaseModel": ["GPT-4o", "GPT-4o", "LLaMA"], "Env.": ["Sokoban", "Sokoban", "Football"],
# "Target-research": ["Model-Eval-Global", "Model-Eval-Online", "Agent-Eval-Prompt"], "Subset": ["mini", "all", "mini"], "Link": ["xxx", "xxx", "xxx"]})
json_list = sorted(json_list, key=lambda x: x['Score'], reverse=True)
committed = pd.DataFrame(json_list)
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
def init_leaderboard(dataframe):
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe, #dataframe,
select_columns=SelectColumns(
default_selection=["Score", "Name", "BaseModel", "Env.", "Target-research", "Subset", "Link"],
cant_deselect=["Score", "Name",],
label="Select Columns to Display:",
),
search_columns=SearchColumns(primary_column="Name", secondary_columns=["BaseModel", "Target-research"],
placeholder="Search by work name or basemodel. To search by country, type 'basemodel:<query>'",
label="Search"),
filter_columns=[
ColumnFilter("Target-research", type="checkboxgroup", label="Comparison settings for target researches (Single Selection)"),
# ColumnFilter("BaseModel", type="dropdown", label="Select The base lmm model that fultill the task."),
ColumnFilter("Env.", type="checkboxgroup", label="Environment (Single Selection)"),
ColumnFilter("Subset", type="checkboxgroup", label="Subset (Single Selection)"),
ColumnFilter("State", type="checkboxgroup", label="Result state (checked or under-review)"),
# ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
# ColumnFilter(
# AutoEvalColumn.params.name,
# type="slider",
# min=0.01,
# max=150,
# label="Select the number of parameters (B)",
# ),
# ColumnFilter(
# AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
# ),
],
interactive=False,
)
all_submissions = []
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Video('demo.mp4', elem_id="video-player", label="Introduction Video")
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
leaderboard = init_leaderboard(committed) # LEADERBOARD_DF
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Row():
score_input = gr.Textbox(label="Score (float)", placeholder="请输入分数")
name_input = gr.Textbox(label="Name (str)", placeholder="请输入名称")
base_model_input = gr.Textbox(label="BaseModel (str)", placeholder="请输入基模型名称")
with gr.Row():
env_dropdown = gr.Dropdown(
choices=["Sokoban", "Football", "WebUI"],
label="Env.",
value="Sokoban"
)
target_research_dropdown = gr.Dropdown(
choices=["Model-Eval-Online", "Model-Eval-Global", "Agent-Eval-Prompt", "Agent-Eval-Finetune"],
label="Target-research",
value="Model-Eval-Online"
)
subset_dropdown = gr.Dropdown(
choices=["mini", "all"],
label="Subset",
value="mini"
)
link_input = gr.Textbox(label="Link (str)", placeholder="请输入链接")
submit_button = gr.Button("Upload One Eval")
with gr.Row():
clear_button = gr.Button("Clear Uploads")
submit_all_button = gr.Button("Submit All")
submission_result = gr.Markdown("## Uploaded results")
def submit_eval(score, name, base_model, env, target_research, subset, link):
# 处理单条数据提交
result = {
"Score": float(score),
"Name": name,
"BaseModel": base_model,
"Env.": env,
"Target-research": target_research,
"Subset": subset,
"Link": link,
"State": "Checking"
}
# 将结果添加到全局变量中
global all_submissions
all_submissions.append(result)
# 更新页面展示
display_text = "\n".join([json.dumps(submission) for submission in all_submissions])
return gr.Markdown("## Uploaded results\n\n```json\n"+display_text+"\n```")
def submit_all():
json_list = []
with jsonlines.open('commit_results.jsonl') as reader:
for obj in reader:
json_list.append(obj)
global all_submissions
if len(all_submissions)>0:
json_list.extend(all_submissions)
tmp_path = "tmp-output.json"
with jsonlines.open(tmp_path, mode='w') as writer:
writer.write_all(json_list)
print("Uploading eval file")
API.upload_file(
path_or_fileobj=tmp_path,
path_in_repo='commit_results.jsonl',
repo_id="microsoft/MageBench-Leaderboard",
repo_type="space",
commit_message=f"Add submissions to checking queue",
)
all_submissions = []
return gr.Markdown("## All submissions uploaded successfully! \nThis will re-start the space...")
else:
return gr.Markdown("Please click Upload One Eval to upload some results before you submit.")
def clear():
global all_submissions
all_submissions = []
return gr.Markdown("## Uploaded results")
# 单条数据提交按钮点击事件
submit_button.click(
submit_eval,
[score_input, name_input, base_model_input, env_dropdown, target_research_dropdown, subset_dropdown, link_input],
submission_result
)
# 所有数据提交按钮点击事件
submit_all_button.click(
submit_all,
inputs=[],
outputs=submission_result
)
clear_button.click(
clear,
[],
submission_result
)
with gr.Row():
with gr.Accordion("📙 Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch() |