DontPlanToEnd commited on
Commit
d68505c
β€’
1 Parent(s): cea04e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -54,6 +54,15 @@ def update_table(df: pd.DataFrame, query: str, param_ranges: list) -> pd.DataFra
54
  GraInter = gr.Blocks()
55
 
56
  with GraInter:
 
 
 
 
 
 
 
 
 
57
  with gr.Column():
58
  with gr.Row():
59
  search_bar = gr.Textbox(placeholder=" πŸ” Search for a model...", show_label=False, elem_id="search-bar")
@@ -99,15 +108,6 @@ with GraInter:
99
  inputs=inputs,
100
  outputs=outputs
101
  )
102
-
103
- gr.HTML("""
104
- <div style="display: flex; flex-direction: column; align-items: center;">
105
- <div style="align-self: flex-start;">
106
- <a href="mailto:[email protected]" target="_blank" style="color: blue; text-decoration: none;">Contact</a>
107
- </div>
108
- <h1 style="margin: 0;">UGI Leaderboard</h1>
109
- </div>
110
- """)
111
  gr.Markdown("""
112
  **UGI: Uncensored General Intelligence**. The average score from 5 different subjects that LLMs are commonly steered away from. The leaderboard is made of roughly 60 questions/tasks, measuring both "willingness to answer" and "accuracy" in controversial fact-based questions.
113
 
 
54
  GraInter = gr.Blocks()
55
 
56
  with GraInter:
57
+ gr.HTML("""
58
+ <div style="display: flex; flex-direction: column; align-items: center;">
59
+ <div style="align-self: flex-start;">
60
+ <a href="mailto:[email protected]" target="_blank" style="color: blue; text-decoration: none;">Contact</a>
61
+ </div>
62
+ <h1 style="margin: 0;">UGI Leaderboard</h1>
63
+ </div>
64
+ """)
65
+
66
  with gr.Column():
67
  with gr.Row():
68
  search_bar = gr.Textbox(placeholder=" πŸ” Search for a model...", show_label=False, elem_id="search-bar")
 
108
  inputs=inputs,
109
  outputs=outputs
110
  )
 
 
 
 
 
 
 
 
 
111
  gr.Markdown("""
112
  **UGI: Uncensored General Intelligence**. The average score from 5 different subjects that LLMs are commonly steered away from. The leaderboard is made of roughly 60 questions/tasks, measuring both "willingness to answer" and "accuracy" in controversial fact-based questions.
113