Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
model type filters
#139
by
multimodalart
HF staff
- opened
- app.py +25 -5
- src/assets/css_html_js.py +0 -1
app.py
CHANGED
@@ -276,6 +276,18 @@ def select_columns(df, columns):
|
|
276 |
filtered_df = df[always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]]
|
277 |
return filtered_df
|
278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
def change_tab(query_param):
|
280 |
query_param = query_param.replace("'", '"')
|
281 |
query_param = json.loads(query_param)
|
@@ -305,11 +317,18 @@ with demo:
|
|
305 |
elem_id="column-select",
|
306 |
interactive=True,
|
307 |
)
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
leaderboard_table = gr.components.Dataframe(
|
314 |
value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value+ [AutoEvalColumn.dummy.name]],
|
315 |
headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
|
@@ -334,6 +353,7 @@ with demo:
|
|
334 |
leaderboard_table,
|
335 |
)
|
336 |
shown_columns.change(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table)
|
|
|
337 |
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
338 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
339 |
|
|
|
276 |
filtered_df = df[always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]]
|
277 |
return filtered_df
|
278 |
|
279 |
+
#TODO allow this to filter by values of any columns
|
280 |
+
def filter_items(df, leaderboard_table, query):
|
281 |
+
if query == "all":
|
282 |
+
return df[leaderboard_table.columns]
|
283 |
+
else:
|
284 |
+
query = query[0] #take only the emoji character
|
285 |
+
if AutoEvalColumn.model_type_symbol.name in leaderboard_table.columns:
|
286 |
+
filtered_df = df[(df[AutoEvalColumn.model_type_symbol.name] == query)]
|
287 |
+
else:
|
288 |
+
return leaderboard_table.columns
|
289 |
+
return filtered_df[leaderboard_table.columns]
|
290 |
+
|
291 |
def change_tab(query_param):
|
292 |
query_param = query_param.replace("'", '"')
|
293 |
query_param = json.loads(query_param)
|
|
|
317 |
elem_id="column-select",
|
318 |
interactive=True,
|
319 |
)
|
320 |
+
with gr.Column(min_width=320):
|
321 |
+
search_bar = gr.Textbox(
|
322 |
+
placeholder="🔍 Search for your model and press ENTER...",
|
323 |
+
show_label=False,
|
324 |
+
elem_id="search-bar",
|
325 |
+
)
|
326 |
+
filter_columns = gr.Radio(
|
327 |
+
label="⏚ Filter model types",
|
328 |
+
choices = ["all", "🟢 base", "🔶 instruction-tuned", "🟦 RL-tuned"],
|
329 |
+
value="all",
|
330 |
+
elem_id="filter-columns"
|
331 |
+
)
|
332 |
leaderboard_table = gr.components.Dataframe(
|
333 |
value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value+ [AutoEvalColumn.dummy.name]],
|
334 |
headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
|
|
|
353 |
leaderboard_table,
|
354 |
)
|
355 |
shown_columns.change(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table)
|
356 |
+
filter_columns.change(filter_items, [hidden_leaderboard_table_for_search, leaderboard_table, filter_columns], leaderboard_table)
|
357 |
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
358 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
359 |
|
src/assets/css_html_js.py
CHANGED
@@ -43,7 +43,6 @@ custom_css = """
|
|
43 |
|
44 |
#search-bar {
|
45 |
padding: 0px;
|
46 |
-
width: 30%;
|
47 |
}
|
48 |
|
49 |
/* Hides the final AutoEvalColumn */
|
|
|
43 |
|
44 |
#search-bar {
|
45 |
padding: 0px;
|
|
|
46 |
}
|
47 |
|
48 |
/* Hides the final AutoEvalColumn */
|