Omartificial-Intelligence-Space
commited on
update app.py
Browse files
app.py
CHANGED
@@ -56,39 +56,6 @@ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS,
|
|
56 |
pending_eval_queue_df,
|
57 |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
58 |
|
59 |
-
def init_leaderboard(dataframe):
|
60 |
-
if dataframe is None or dataframe.empty:
|
61 |
-
print("Leaderboard DataFrame is empty. No models have been evaluated yet.")
|
62 |
-
dataframe = pd.DataFrame(columns=[col.name for col in COLUMNS])
|
63 |
-
return Leaderboard(
|
64 |
-
value=dataframe,
|
65 |
-
datatype=[col.type for col in COLUMNS],
|
66 |
-
select_columns=SelectColumns(
|
67 |
-
default_selection=[col.name for col in COLUMNS if col.displayed_by_default],
|
68 |
-
cant_deselect=[col.name for col in COLUMNS if col.never_hidden],
|
69 |
-
label="Select Columns to Display:",
|
70 |
-
),
|
71 |
-
search_columns=[col.name for col in COLUMNS if col.name in ["model", "license"]],
|
72 |
-
hide_columns=[col.name for col in COLUMNS if col.hidden],
|
73 |
-
filter_columns=[
|
74 |
-
ColumnFilter("model_type", type="checkboxgroup", label="Model types"),
|
75 |
-
ColumnFilter("precision", type="checkboxgroup", label="Precision"),
|
76 |
-
ColumnFilter(
|
77 |
-
"params",
|
78 |
-
type="slider",
|
79 |
-
min=0.01,
|
80 |
-
max=150,
|
81 |
-
label="Select the number of parameters (B)",
|
82 |
-
),
|
83 |
-
ColumnFilter(
|
84 |
-
"still_on_hub", type="boolean", label="Deleted/incomplete", default=True
|
85 |
-
),
|
86 |
-
],
|
87 |
-
bool_checkboxgroup_label="Hide models",
|
88 |
-
interactive=False,
|
89 |
-
)
|
90 |
-
|
91 |
-
|
92 |
demo = gr.Blocks(css=custom_css)
|
93 |
with demo:
|
94 |
gr.HTML(TITLE)
|
@@ -99,8 +66,34 @@ with demo:
|
|
99 |
if LEADERBOARD_DF.empty:
|
100 |
gr.Markdown("No evaluations have been performed yet. The leaderboard is currently empty.")
|
101 |
else:
|
102 |
-
leaderboard =
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
106 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
@@ -180,4 +173,4 @@ with demo:
|
|
180 |
scheduler = BackgroundScheduler()
|
181 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
182 |
scheduler.start()
|
183 |
-
demo.queue(default_concurrency_limit=40).launch()
|
|
|
56 |
pending_eval_queue_df,
|
57 |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
demo = gr.Blocks(css=custom_css)
|
60 |
with demo:
|
61 |
gr.HTML(TITLE)
|
|
|
66 |
if LEADERBOARD_DF.empty:
|
67 |
gr.Markdown("No evaluations have been performed yet. The leaderboard is currently empty.")
|
68 |
else:
|
69 |
+
leaderboard = Leaderboard(
|
70 |
+
value=LEADERBOARD_DF,
|
71 |
+
datatype=[col.type for col in COLUMNS],
|
72 |
+
select_columns=SelectColumns(
|
73 |
+
default_selection=[col.name for col in COLUMNS if col.displayed_by_default],
|
74 |
+
cant_deselect=[col.name for col in COLUMNS if col.never_hidden],
|
75 |
+
label="Select Columns to Display:",
|
76 |
+
),
|
77 |
+
search_columns=[col.name for col in COLUMNS if col.name in ["model", "license"]],
|
78 |
+
hide_columns=[col.name for col in COLUMNS if col.hidden],
|
79 |
+
filter_columns=[
|
80 |
+
ColumnFilter("model_type", type="checkboxgroup", label="Model types"),
|
81 |
+
ColumnFilter("precision", type="checkboxgroup", label="Precision"),
|
82 |
+
ColumnFilter(
|
83 |
+
"params",
|
84 |
+
type="slider",
|
85 |
+
min=0.01,
|
86 |
+
max=150,
|
87 |
+
label="Select the number of parameters (B)",
|
88 |
+
),
|
89 |
+
ColumnFilter(
|
90 |
+
"still_on_hub", type="boolean", label="Deleted/incomplete", default=True
|
91 |
+
),
|
92 |
+
],
|
93 |
+
bool_checkboxgroup_label="Hide models",
|
94 |
+
interactive=False,
|
95 |
+
)
|
96 |
+
# No need to call leaderboard.render() since it's created within the Gradio context
|
97 |
|
98 |
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
99 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
|
173 |
scheduler = BackgroundScheduler()
|
174 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
175 |
scheduler.start()
|
176 |
+
demo.queue(default_concurrency_limit=40).launch()
|