DontPlanToEnd
commited on
Commit
•
0e29ede
1
Parent(s):
5c1c1ff
Update app.py
Browse files
app.py
CHANGED
@@ -57,7 +57,7 @@ def update_table(df: pd.DataFrame, query: str, param_ranges: dict) -> pd.DataFra
|
|
57 |
GraInterface = gr.Blocks()
|
58 |
|
59 |
with GraInterface:
|
60 |
-
gr.Markdown("## UGI Leaderboard", elem_classes="text-lg text-center",
|
61 |
gr.Markdown("""
|
62 |
UGI: Uncensored General Intelligence. The average of 5 different subjects that LLMs are commonly steered away from. The leaderboard is made from roughly 60 questions overall, measuring both "willingness to answer" and "accuracy" in fact-based controversial questions.\n
|
63 |
Willingness: A more narrow, 10-point score, solely measuring the LLM's willingness to answer controversial questions.\n
|
@@ -112,6 +112,13 @@ with GraInterface:
|
|
112 |
inputs=inputs,
|
113 |
outputs=outputs
|
114 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
# Launch the Gradio app
|
117 |
GraInterface.launch()
|
|
|
57 |
GraInterface = gr.Blocks()
|
58 |
|
59 |
with GraInterface:
|
60 |
+
gr.Markdown("## UGI Leaderboard", elem_classes="text-lg text-center", elem_id="title")
|
61 |
gr.Markdown("""
|
62 |
UGI: Uncensored General Intelligence. The average of 5 different subjects that LLMs are commonly steered away from. The leaderboard is made from roughly 60 questions overall, measuring both "willingness to answer" and "accuracy" in fact-based controversial questions.\n
|
63 |
Willingness: A more narrow, 10-point score, solely measuring the LLM's willingness to answer controversial questions.\n
|
|
|
112 |
inputs=inputs,
|
113 |
outputs=outputs
|
114 |
)
|
115 |
+
|
116 |
+
# Add custom CSS styles
|
117 |
+
GraInterface.css = """
|
118 |
+
#title {
|
119 |
+
text-align: center;
|
120 |
+
}
|
121 |
+
"""
|
122 |
|
123 |
# Launch the Gradio app
|
124 |
GraInterface.launch()
|