Spaces:
Running
Running
Upload 2 files
Browse files- OCRBench.csv +13 -0
- leadboard.py +174 -0
OCRBench.csv
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Model,Text Recognition,Scene Text-Centric VQA,Doc-Oriented VQA,KIE,HMER,Final Score,Link
|
2 |
+
Gemini,215,174,128,134,8,659,https://deepmind.google/technologies/gemini/
|
3 |
+
GPT4V,167,163,146,160,9,645,https://openai.com/
|
4 |
+
Monkey,174,161,91,88,0,514,https://arxiv.org/abs/2311.06607
|
5 |
+
mPLUG-Owl2,153,153,41,19,0,366,https://arxiv.org/abs/2311.04257
|
6 |
+
LLaVAR,186,122,25,13,0,346,https://arxiv.org/abs/2306.17107
|
7 |
+
LLaVA1.5-13B,176,129,19,7,0,331,https://arxiv.org/abs/2310.03744
|
8 |
+
LLaVA1.5-7B,160,117,15,5,0,297,https://arxiv.org/abs/2310.03744
|
9 |
+
mPLUG-Owl,172,104,18,3,0,297,https://arxiv.org/abs/2304.14178
|
10 |
+
BLIVA,165,103,22,1,0,291,https://arxiv.org/abs/2308.09936
|
11 |
+
InstructBLIP,168,93,14,1,0,276,https://arxiv.org/abs/2305.06500
|
12 |
+
BLIP2-6.7B,154,71,10,0,0,235,https://arxiv.org/abs/2301.12597
|
13 |
+
MiniGPT4V2,124,29,4,0,0,157,https://arxiv.org/abs/2310.09478
|
leadboard.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import argparse
|
3 |
+
import glob
|
4 |
+
import pickle
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
block_css = """
|
10 |
+
#notice_markdown {
|
11 |
+
font-size: 104%
|
12 |
+
}
|
13 |
+
#notice_markdown th {
|
14 |
+
display: none;
|
15 |
+
}
|
16 |
+
#notice_markdown td {
|
17 |
+
padding-top: 6px;
|
18 |
+
padding-bottom: 6px;
|
19 |
+
}
|
20 |
+
#leaderboard_markdown {
|
21 |
+
font-size: 104%
|
22 |
+
}
|
23 |
+
#leaderboard_markdown td {
|
24 |
+
padding-top: 6px;
|
25 |
+
padding-bottom: 6px;
|
26 |
+
}
|
27 |
+
#leaderboard_dataframe td {
|
28 |
+
line-height: 0.1em;
|
29 |
+
}
|
30 |
+
footer {
|
31 |
+
display:none !important
|
32 |
+
}
|
33 |
+
.image-container {
|
34 |
+
display: flex;
|
35 |
+
align-items: center;
|
36 |
+
padding: 1px;
|
37 |
+
}
|
38 |
+
.image-container img {
|
39 |
+
margin: 0 30px;
|
40 |
+
height: 20px;
|
41 |
+
max-height: 100%;
|
42 |
+
width: auto;
|
43 |
+
max-width: 20%;
|
44 |
+
}
|
45 |
+
"""
|
46 |
+
def model_hyperlink(model_name, link):
|
47 |
+
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
48 |
+
def load_leaderboard_table_csv(filename, add_hyperlink=True):
|
49 |
+
lines = open(filename).readlines()
|
50 |
+
heads = [v.strip() for v in lines[0].split(",")]
|
51 |
+
rows = []
|
52 |
+
for i in range(1, len(lines)):
|
53 |
+
row = [v.strip() for v in lines[i].split(",")]
|
54 |
+
for j in range(len(heads)):
|
55 |
+
item = {}
|
56 |
+
for h, v in zip(heads, row):
|
57 |
+
if h != "Model" and h != "Link":
|
58 |
+
item[h] = int(v)
|
59 |
+
else:
|
60 |
+
item[h] = v
|
61 |
+
if add_hyperlink:
|
62 |
+
item["Model"] = model_hyperlink(item["Model"], item["Link"])
|
63 |
+
rows.append(item)
|
64 |
+
return rows
|
65 |
+
|
66 |
+
def get_arena_table(model_table_df):
|
67 |
+
# sort by rating
|
68 |
+
model_table_df = model_table_df.sort_values(by=["Final Score"], ascending=False)
|
69 |
+
values = []
|
70 |
+
for i in range(len(model_table_df)):
|
71 |
+
row = []
|
72 |
+
model_key = model_table_df.index[i]
|
73 |
+
model_name = model_table_df["Model"].values[model_key]
|
74 |
+
# rank
|
75 |
+
row.append(i + 1)
|
76 |
+
# model display name
|
77 |
+
row.append(model_name)
|
78 |
+
|
79 |
+
row.append(
|
80 |
+
model_table_df["Text Recognition"].values[model_key]
|
81 |
+
)
|
82 |
+
|
83 |
+
row.append(
|
84 |
+
model_table_df["Scene Text-Centric VQA"].values[model_key]
|
85 |
+
)
|
86 |
+
|
87 |
+
row.append(
|
88 |
+
model_table_df["Doc-Oriented VQA"].values[model_key]
|
89 |
+
)
|
90 |
+
|
91 |
+
row.append(
|
92 |
+
model_table_df["KIE"].values[model_key]
|
93 |
+
)
|
94 |
+
|
95 |
+
row.append(
|
96 |
+
model_table_df["HMER"].values[model_key]
|
97 |
+
)
|
98 |
+
|
99 |
+
row.append(
|
100 |
+
model_table_df["Final Score"].values[model_key]
|
101 |
+
)
|
102 |
+
values.append(row)
|
103 |
+
return values
|
104 |
+
|
105 |
+
def build_leaderboard_tab(leaderboard_table_file, show_plot=False):
|
106 |
+
if leaderboard_table_file:
|
107 |
+
data = load_leaderboard_table_csv(leaderboard_table_file)
|
108 |
+
model_table_df = pd.DataFrame(data)
|
109 |
+
md_head = f"""
|
110 |
+
# π OCRBench Leaderboard
|
111 |
+
| [GitHub](https://github.com/Yuliang-Liu/MultimodalOCR) | [Paper](https://arxiv.org/abs/2305.07895) |
|
112 |
+
"""
|
113 |
+
gr.Markdown(md_head, elem_id="leaderboard_markdown")
|
114 |
+
with gr.Tabs() as tabs:
|
115 |
+
# arena table
|
116 |
+
arena_table_vals = get_arena_table(model_table_df)
|
117 |
+
with gr.Tab("OCRBench", id=0):
|
118 |
+
md = "OCRBench is a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models. It comprises five components: Text Recognition, SceneText-Centric VQA, Document-Oriented VQA, Key Information Extraction, and Handwritten Mathematical Expression Recognition. The benchmark includes 1000 question-answer pairs, and all the answers undergo manual verification and correction to ensure a more precise evaluation."
|
119 |
+
gr.Markdown(md, elem_id="leaderboard_markdown")
|
120 |
+
gr.Dataframe(
|
121 |
+
headers=[
|
122 |
+
"Rank",
|
123 |
+
"Name",
|
124 |
+
"Text Recognition",
|
125 |
+
"Scene Text-Centric VQA",
|
126 |
+
"Doc-Oriented VQA",
|
127 |
+
"KIE",
|
128 |
+
"HMER",
|
129 |
+
"Final Score",
|
130 |
+
],
|
131 |
+
datatype=[
|
132 |
+
"str",
|
133 |
+
"markdown",
|
134 |
+
"number",
|
135 |
+
"number",
|
136 |
+
"number",
|
137 |
+
"number",
|
138 |
+
"number",
|
139 |
+
"number",
|
140 |
+
],
|
141 |
+
value=arena_table_vals,
|
142 |
+
elem_id="arena_leaderboard_dataframe",
|
143 |
+
height=700,
|
144 |
+
column_widths=[60, 120, 150, 200, 180, 80, 80, 160],
|
145 |
+
wrap=True,
|
146 |
+
)
|
147 |
+
else:
|
148 |
+
pass
|
149 |
+
md_tail = f"""
|
150 |
+
# Notice
|
151 |
+
If you would like to include your model in the OCRBench leaderboard, please follow the evaluation instructions provided on [GitHub](https://github.com/Yuliang-Liu/MultimodalOCR) and feel free to contact us via email at [email protected]. We will update the leaderboard in time."""
|
152 |
+
gr.Markdown(md_tail, elem_id="leaderboard_markdown")
|
153 |
+
|
154 |
+
def build_demo(leaderboard_table_file):
|
155 |
+
text_size = gr.themes.sizes.text_lg
|
156 |
+
|
157 |
+
with gr.Blocks(
|
158 |
+
title="OCRBench Leaderboard",
|
159 |
+
theme=gr.themes.Base(text_size=text_size),
|
160 |
+
css=block_css,
|
161 |
+
) as demo:
|
162 |
+
leader_components = build_leaderboard_tab(
|
163 |
+
leaderboard_table_file, show_plot=True
|
164 |
+
)
|
165 |
+
return demo
|
166 |
+
|
167 |
+
if __name__ == "__main__":
|
168 |
+
parser = argparse.ArgumentParser()
|
169 |
+
parser.add_argument("--share", action="store_true")
|
170 |
+
parser.add_argument("--OCRBench_file", type=str, default="/home/zhangli/lz/OCRBench/OCRBench.csv")
|
171 |
+
args = parser.parse_args()
|
172 |
+
|
173 |
+
demo = build_demo(args.OCRBench_file)
|
174 |
+
demo.launch(server_name="0.0.0.0",server_port=7682)
|