File size: 1,401 Bytes
8460af1
b0ee7b4
 
 
f2e3361
 
 
 
 
 
 
 
 
b0ee7b4
 
 
aae65ae
 
f2e3361
b0ee7b4
 
 
20f0a61
 
 
 
 
 
b0ee7b4
f2e3361
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import pandas as pd
import gradio as gr

data = {
    "Method": ["GPT-4o", "GPT-4o-mini", "Gemini-1.5-Pro", "Gemini-1.5-Flash", "Qwen2-VL-2B"],
    "MM Understanding & Reasoning": [57.90, 48.82, 46.67, 45.58, 40.59],
    "OCR & Document Understanding": [59.11, 42.89, 36.59, 33.59, 25.68],
    "Charts & Diagram Understanding": [73.57, 64.98, 47.06, 48.25, 27.83],
    "Video Understanding": [74.27, 68.11, 42.94, 53.31, 38.90],
    "Cultural Specific Understanding": [80.86, 65.92, 56.24, 46.54, 34.27],
    "Medical Imaging": [49.90, 47.37, 33.77, 42.86, 29.12],
    "Agro Specific": [80.75, 79.58, 72.12, 76.06, 52.02],
    "Remote Sensing Understanding": [22.85, 16.93, 17.07, 14.95, 12.56]
}

df = pd.DataFrame(data)
df['Average Score'] = df.iloc[:, 1:].mean(axis=1)

def display_data():
    return df

with gr.Blocks() as demo:
    gr.Markdown("![camel icon](https://cdn-uploads.huggingface.co/production/uploads/656864e12d73834278a8dea7/n-XfVKd1xVywH_vgPyJyQ.png)", elem_id="camel-icon")  # Replace with actual camel icon URL
    gr.Markdown("# **CAMEL-Bench: Model Performance Across Vision Understanding Tasks**")
    gr.Markdown("""
    This table shows the performance of different models across various tasks including OCR, chart understanding, video, medical imaging, and more. 
    """)
    gr.Dataframe(value=df, label="CAMEL-Bench Model Performance", interactive=False)

demo.launch()