jihoo-kim commited on
Commit
6de4b3b
Β·
verified Β·
1 Parent(s): aa4fd2e

fix submission info

Browse files
Files changed (1) hide show
  1. app.py +99 -98
app.py CHANGED
@@ -297,104 +297,105 @@ with demo:
297
  gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
298
 
299
  # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
300
- # with gr.Column():
301
- # with gr.Row():
302
- # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
303
-
304
- # with gr.Column():
305
- # with gr.Accordion(
306
- # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
307
- # open=False,
308
- # ):
309
- # with gr.Row():
310
- # finished_eval_table = gr.components.Dataframe(
311
- # value=finished_eval_queue_df,
312
- # headers=EVAL_COLS,
313
- # datatype=EVAL_TYPES,
314
- # row_count=5,
315
- # )
316
- # with gr.Accordion(
317
- # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
318
- # open=False,
319
- # ):
320
- # with gr.Row():
321
- # running_eval_table = gr.components.Dataframe(
322
- # value=running_eval_queue_df,
323
- # headers=EVAL_COLS,
324
- # datatype=EVAL_TYPES,
325
- # row_count=5,
326
- # )
327
-
328
- # with gr.Accordion(
329
- # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
330
- # open=False,
331
- # ):
332
- # with gr.Row():
333
- # pending_eval_table = gr.components.Dataframe(
334
- # value=pending_eval_queue_df,
335
- # headers=EVAL_COLS,
336
- # datatype=EVAL_TYPES,
337
- # row_count=5,
338
- # )
339
- # with gr.Accordion(
340
- # f"❌ Failed Evaluations ({len(failed_eval_queue_df)})",
341
- # open=False,
342
- # ):
343
- # with gr.Row():
344
- # pending_eval_table = gr.components.Dataframe(
345
- # value=failed_eval_queue_df,
346
- # headers=EVAL_COLS,
347
- # datatype=EVAL_TYPES,
348
- # row_count=5,
349
- # )
350
- # with gr.Row():
351
- # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
352
-
353
- # with gr.Row():
354
- # with gr.Column():
355
- # model_name_textbox = gr.Textbox(label="Model name")
356
- # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
357
- # private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
358
- # model_type = gr.Dropdown(
359
- # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
360
- # label="Model type",
361
- # multiselect=False,
362
- # value=ModelType.FT.to_str(" : "),
363
- # interactive=True,
364
- # )
365
-
366
- # with gr.Column():
367
- # precision = gr.Dropdown(
368
- # choices=[i.value.name for i in Precision if i != Precision.Unknown],
369
- # label="Precision",
370
- # multiselect=False,
371
- # value="float16",
372
- # interactive=True,
373
- # )
374
- # weight_type = gr.Dropdown(
375
- # choices=[i.value.name for i in WeightType],
376
- # label="Weights type",
377
- # multiselect=False,
378
- # value="Original",
379
- # interactive=True,
380
- # )
381
- # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
382
-
383
- # submit_button = gr.Button("Submit Evalulation!")
384
- # submission_result = gr.Markdown()
385
- # submit_button.click(
386
- # add_new_eval,
387
- # [
388
- # model_name_textbox,
389
- # base_model_name_textbox,
390
- # revision_name_textbox,
391
- # precision,
392
- # private,
393
- # weight_type,
394
- # model_type,
395
- # ],
396
- # submission_result,
397
- # )
 
398
 
399
  with gr.Row():
400
  with gr.Accordion("πŸ“™ Citation", open=False):
 
297
  gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
298
 
299
  # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
300
+ with gr.TabItem("Submission Info", elem_id="llm-benchmark-tab-table", id=3):
301
+ with gr.Column():
302
+ with gr.Row():
303
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
304
+
305
+ with gr.Column():
306
+ with gr.Accordion(
307
+ f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
308
+ open=False,
309
+ ):
310
+ with gr.Row():
311
+ finished_eval_table = gr.components.Dataframe(
312
+ value=finished_eval_queue_df,
313
+ headers=EVAL_COLS,
314
+ datatype=EVAL_TYPES,
315
+ row_count=5,
316
+ )
317
+ with gr.Accordion(
318
+ f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
319
+ open=False,
320
+ ):
321
+ with gr.Row():
322
+ running_eval_table = gr.components.Dataframe(
323
+ value=running_eval_queue_df,
324
+ headers=EVAL_COLS,
325
+ datatype=EVAL_TYPES,
326
+ row_count=5,
327
+ )
328
+
329
+ with gr.Accordion(
330
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
331
+ open=False,
332
+ ):
333
+ with gr.Row():
334
+ pending_eval_table = gr.components.Dataframe(
335
+ value=pending_eval_queue_df,
336
+ headers=EVAL_COLS,
337
+ datatype=EVAL_TYPES,
338
+ row_count=5,
339
+ )
340
+ with gr.Accordion(
341
+ f"❌ Failed Evaluations ({len(failed_eval_queue_df)})",
342
+ open=False,
343
+ ):
344
+ with gr.Row():
345
+ pending_eval_table = gr.components.Dataframe(
346
+ value=failed_eval_queue_df,
347
+ headers=EVAL_COLS,
348
+ datatype=EVAL_TYPES,
349
+ row_count=5,
350
+ )
351
+ # with gr.Row():
352
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
353
+
354
+ # with gr.Row():
355
+ # with gr.Column():
356
+ # model_name_textbox = gr.Textbox(label="Model name")
357
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
358
+ # private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
359
+ # model_type = gr.Dropdown(
360
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
361
+ # label="Model type",
362
+ # multiselect=False,
363
+ # value=ModelType.FT.to_str(" : "),
364
+ # interactive=True,
365
+ # )
366
+
367
+ # with gr.Column():
368
+ # precision = gr.Dropdown(
369
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
370
+ # label="Precision",
371
+ # multiselect=False,
372
+ # value="float16",
373
+ # interactive=True,
374
+ # )
375
+ # weight_type = gr.Dropdown(
376
+ # choices=[i.value.name for i in WeightType],
377
+ # label="Weights type",
378
+ # multiselect=False,
379
+ # value="Original",
380
+ # interactive=True,
381
+ # )
382
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
383
+
384
+ # submit_button = gr.Button("Submit Evalulation!")
385
+ # submission_result = gr.Markdown()
386
+ # submit_button.click(
387
+ # add_new_eval,
388
+ # [
389
+ # model_name_textbox,
390
+ # base_model_name_textbox,
391
+ # revision_name_textbox,
392
+ # precision,
393
+ # private,
394
+ # weight_type,
395
+ # model_type,
396
+ # ],
397
+ # submission_result,
398
+ # )
399
 
400
  with gr.Row():
401
  with gr.Accordion("πŸ“™ Citation", open=False):