CodingWithTim commited on
Commit
7205ba9
1 Parent(s): c34b84c

upload gpt-4o-mini battles

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. README.md +1 -1
  3. app.py +184 -0
  4. data/sample_gpt-4o-mini.jsonl +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
4
  colorFrom: pink
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: pink
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.40.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Usage:
3
+ python3 qa_browser.py --share
4
+ """
5
+
6
+ import argparse
7
+ from collections import defaultdict
8
+ from glob import glob
9
+ import re
10
+ import gradio as gr
11
+ import pandas as pd
12
+
13
+ questions = []
14
+
15
+ question_selector_map = {}
16
+ category_selector_map = defaultdict(list)
17
+
18
+
19
+ def display_question(category_selector, request: gr.Request):
20
+ # print(category_selector)
21
+ choices = category_selector_map[category_selector]
22
+ return gr.Dropdown.update(
23
+ value=choices[0],
24
+ choices=choices,
25
+ )
26
+
27
+
28
+ def display_pairwise_answer(
29
+ question_selector, model_selector1, model_selector2, request: gr.Request
30
+ ):
31
+ q = question_selector_map[question_selector]
32
+ qid = q["question_id"]
33
+
34
+ ans1 = q["conversation_a"]
35
+ ans2 = q["conversation_b"]
36
+
37
+ chat_mds = pairwise_to_gradio_chat_mds(q, ans1, ans2)
38
+
39
+ return chat_mds
40
+
41
+
42
+ newline_pattern1 = re.compile("\n\n(\d+\. )")
43
+ newline_pattern2 = re.compile("\n\n(- )")
44
+
45
+
46
+ def post_process_answer(x):
47
+ """Fix Markdown rendering problems."""
48
+ x = x.replace("\u2022", "- ")
49
+ x = re.sub(newline_pattern1, "\n\g<1>", x)
50
+ x = re.sub(newline_pattern2, "\n\g<1>", x)
51
+ return x
52
+
53
+
54
+ def pairwise_to_gradio_chat_mds(question, ans_a, ans_b, turn=None):
55
+ end = question["turn"] * 3
56
+
57
+ mds = [""] * end
58
+ base = 0
59
+ for i in range(0, end, 3):
60
+ mds[i] = "##### `User`\n" + question["conversation_a"][base]["content"].strip()
61
+ mds[i + 1] = f"##### `{question['model_a']}`\n" + post_process_answer(
62
+ ans_a[base + 1]["content"].strip()
63
+ )
64
+ mds[i + 2] = f"##### `{question['model_b']}`\n" + post_process_answer(
65
+ ans_b[base + 1]["content"].strip()
66
+ )
67
+ base += 2
68
+
69
+ winner = question["winner"] if "tie" in question["winner"] else question[question["winner"]]
70
+ mds += [f"##### Vote: {winner}"]
71
+ mds += [""] * (16 - len(mds))
72
+
73
+ return mds
74
+
75
+
76
+ def build_question_selector_map():
77
+ global question_selector_map, category_selector_map
78
+
79
+ # Build question selector map
80
+ for q in questions:
81
+ preview = q["conversation_a"][0]["content"][:128] + "..."
82
+ question_selector_map[preview] = q
83
+ category_selector_map[q["category"]].append(preview)
84
+
85
+
86
+ def build_pairwise_browser_tab():
87
+ global question_selector_map, category_selector_map
88
+
89
+ num_sides = 2
90
+ num_turns = 5
91
+ side_names = ["A", "B"]
92
+
93
+ question_selector_choices = list(question_selector_map.keys())
94
+ category_selector_choices = list(category_selector_map.keys())
95
+ print(category_selector_choices)
96
+ # Selectors
97
+ with gr.Row():
98
+ with gr.Column(scale=1, min_width=200):
99
+ category_selector = gr.Dropdown(
100
+ choices=category_selector_choices,
101
+ # value="Instruction Following",
102
+ label="Category", container=False
103
+ )
104
+ with gr.Column(scale=100):
105
+ question_selector = gr.Dropdown(
106
+ choices=question_selector_choices, label="Question", container=False
107
+ )
108
+
109
+ # Conversation
110
+ chat_mds = []
111
+ for i in range(num_turns):
112
+ chat_mds.append(gr.Markdown(elem_id=f"user_question_{i+1}"))
113
+ with gr.Row():
114
+ for j in range(num_sides):
115
+ with gr.Column(scale=100):
116
+ chat_mds.append(gr.Markdown())
117
+
118
+ if j == 0:
119
+ with gr.Column(scale=1, min_width=8):
120
+ gr.Markdown()
121
+ chat_mds.append(gr.Markdown())
122
+
123
+ # Callbacks
124
+ category_selector.change(display_question, [category_selector], [question_selector])
125
+ question_selector.change(
126
+ display_pairwise_answer,
127
+ [question_selector],
128
+ chat_mds,
129
+ )
130
+
131
+ return (category_selector,)
132
+
133
+
134
+ def load_demo():
135
+ dropdown_update = gr.Dropdown.update(value="Math")
136
+ return dropdown_update
137
+
138
+
139
+ def build_demo():
140
+ build_question_selector_map()
141
+
142
+ with gr.Blocks(
143
+ title="Chatbot Arena Samples",
144
+ theme=gr.themes.Base(text_size=gr.themes.sizes.text_lg),
145
+ ) as demo:
146
+ gr.Markdown(
147
+ """
148
+ # Chatbot Arena Samples
149
+ We randomly sample 20 battles from each category using seed 42.
150
+ | [Paper](https://arxiv.org/abs/2403.04132) | [Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) |
151
+ """
152
+ )
153
+ (category_selector,) = build_pairwise_browser_tab()
154
+ demo.load(load_demo, [], [category_selector])
155
+
156
+ return demo
157
+
158
+
159
+ def load_questions(directory: str):
160
+ import json
161
+ """Load questions from a file."""
162
+ questions = []
163
+ for file in glob(directory):
164
+ with open(file, "r") as ques_file:
165
+ for line in ques_file:
166
+ if line:
167
+ questions.append(json.loads(line))
168
+ return questions
169
+
170
+
171
+ if __name__ == "__main__":
172
+ parser = argparse.ArgumentParser()
173
+ parser.add_argument("--host", type=str, default="0.0.0.0")
174
+ parser.add_argument("--port", type=int)
175
+ parser.add_argument("--share", action="store_true")
176
+ args = parser.parse_args()
177
+ print(args)
178
+
179
+ questions = load_questions("data/*.jsonl")
180
+
181
+ demo = build_demo()
182
+ demo.launch(
183
+ server_name=args.host, server_port=args.port, share=args.share, max_threads=200
184
+ )
data/sample_gpt-4o-mini.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff44e8848d8d17ae70a9dcdcacfdf5abc1cd3013d3ba1675e0ae196e09427dc9
3
+ size 10864607