alielfilali01 commited on
Commit
bc55399
Β·
verified Β·
1 Parent(s): 6387672

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -85
app.py CHANGED
@@ -243,91 +243,91 @@ with demo:
243
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
244
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
245
 
246
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
247
- with gr.Column():
248
- with gr.Row():
249
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
250
-
251
- with gr.Column():
252
- with gr.Accordion(
253
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
254
- open=False,
255
- ):
256
- with gr.Row():
257
- finished_eval_table = gr.components.Dataframe(
258
- value=finished_eval_queue_df,
259
- headers=EVAL_COLS,
260
- datatype=EVAL_TYPES,
261
- row_count=5,
262
- )
263
- # with gr.Accordion(
264
- # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
265
- # open=False,
266
- # ):
267
- # with gr.Row():
268
- # running_eval_table = gr.components.Dataframe(
269
- # value=running_eval_queue_df,
270
- # headers=EVAL_COLS,
271
- # datatype=EVAL_TYPES,
272
- # row_count=5,
273
- # )
274
- # with gr.Accordion(
275
- # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
- # open=False,
277
- # ):
278
- # with gr.Row():
279
- # pending_eval_table = gr.components.Dataframe(
280
- # value=pending_eval_queue_df,
281
- # headers=EVAL_COLS,
282
- # datatype=EVAL_TYPES,
283
- # row_count=5,
284
- # )
285
- # with gr.Row():
286
- # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
-
288
- # with gr.Row():
289
- # with gr.Column():
290
- # model_name_textbox = gr.Textbox(label="Model name")
291
- # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
- # model_type = gr.Dropdown(
293
- # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
- # label="Model type",
295
- # multiselect=False,
296
- # value=None,
297
- # interactive=True,
298
- # )
299
-
300
- # with gr.Column():
301
- # precision = gr.Dropdown(
302
- # choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
- # label="Precision",
304
- # multiselect=False,
305
- # value="float16",
306
- # interactive=True,
307
- # )
308
- # weight_type = gr.Dropdown(
309
- # choices=[i.value.name for i in WeightType],
310
- # label="Weights type",
311
- # multiselect=False,
312
- # value="Original",
313
- # interactive=True,
314
- # )
315
- # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
-
317
- # submit_button = gr.Button("Submit Eval")
318
- # submission_result = gr.Markdown()
319
- # submit_button.click(
320
- # add_new_eval,
321
- # [
322
- # model_name_textbox,
323
- # base_model_name_textbox,
324
- # revision_name_textbox,
325
- # precision,
326
- # weight_type,
327
- # model_type,
328
- # ],
329
- # submission_result,
330
- # )
331
 
332
  with gr.Row():
333
  with gr.Accordion("πŸ“™ Citation", open=False):
 
243
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
244
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
245
 
246
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
247
+ # with gr.Column():
248
+ # with gr.Row():
249
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
250
+
251
+ # with gr.Column():
252
+ # with gr.Accordion(
253
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
254
+ # open=False,
255
+ # ):
256
+ # with gr.Row():
257
+ # finished_eval_table = gr.components.Dataframe(
258
+ # value=finished_eval_queue_df,
259
+ # headers=EVAL_COLS,
260
+ # datatype=EVAL_TYPES,
261
+ # row_count=5,
262
+ # )
263
+ # with gr.Accordion(
264
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
265
+ # open=False,
266
+ # ):
267
+ # with gr.Row():
268
+ # running_eval_table = gr.components.Dataframe(
269
+ # value=running_eval_queue_df,
270
+ # headers=EVAL_COLS,
271
+ # datatype=EVAL_TYPES,
272
+ # row_count=5,
273
+ # )
274
+ # with gr.Accordion(
275
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
+ # open=False,
277
+ # ):
278
+ # with gr.Row():
279
+ # pending_eval_table = gr.components.Dataframe(
280
+ # value=pending_eval_queue_df,
281
+ # headers=EVAL_COLS,
282
+ # datatype=EVAL_TYPES,
283
+ # row_count=5,
284
+ # )
285
+ # with gr.Row():
286
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
+
288
+ # with gr.Row():
289
+ # with gr.Column():
290
+ # model_name_textbox = gr.Textbox(label="Model name")
291
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
+ # model_type = gr.Dropdown(
293
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
+ # label="Model type",
295
+ # multiselect=False,
296
+ # value=None,
297
+ # interactive=True,
298
+ # )
299
+
300
+ # with gr.Column():
301
+ # precision = gr.Dropdown(
302
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
+ # label="Precision",
304
+ # multiselect=False,
305
+ # value="float16",
306
+ # interactive=True,
307
+ # )
308
+ # weight_type = gr.Dropdown(
309
+ # choices=[i.value.name for i in WeightType],
310
+ # label="Weights type",
311
+ # multiselect=False,
312
+ # value="Original",
313
+ # interactive=True,
314
+ # )
315
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
+
317
+ # submit_button = gr.Button("Submit Eval")
318
+ # submission_result = gr.Markdown()
319
+ # submit_button.click(
320
+ # add_new_eval,
321
+ # [
322
+ # model_name_textbox,
323
+ # base_model_name_textbox,
324
+ # revision_name_textbox,
325
+ # precision,
326
+ # weight_type,
327
+ # model_type,
328
+ # ],
329
+ # submission_result,
330
+ # )
331
 
332
  with gr.Row():
333
  with gr.Accordion("πŸ“™ Citation", open=False):