ggbetz commited on
Commit
6b6a158
1 Parent(s): 6a98b81

Upload results for model microsoft/Orca-2-7b (#219)

Browse files

- Upload results for model microsoft/Orca-2-7b (d5be6b9b881e21f4f0849ef4710f26d85dc30741)

data/microsoft/Orca-2-7b/orig/results_24-04-08-20:48:48.json ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "logiqa2_base": {
4
+ "acc,none": 0.3371501272264631,
5
+ "acc_stderr,none": 0.011926998374330696,
6
+ "alias": "logiqa2_base"
7
+ },
8
+ "logiqa_base": {
9
+ "acc,none": 0.28434504792332266,
10
+ "acc_stderr,none": 0.01804407677415737,
11
+ "alias": "logiqa_base"
12
+ },
13
+ "lsat-ar_base": {
14
+ "acc,none": 0.23478260869565218,
15
+ "acc_stderr,none": 0.028009647070930115,
16
+ "alias": "lsat-ar_base"
17
+ },
18
+ "lsat-lr_base": {
19
+ "acc,none": 0.25882352941176473,
20
+ "acc_stderr,none": 0.019413498131318402,
21
+ "alias": "lsat-lr_base"
22
+ },
23
+ "lsat-rc_base": {
24
+ "acc,none": 0.36059479553903345,
25
+ "acc_stderr,none": 0.02933123932995893,
26
+ "alias": "lsat-rc_base"
27
+ }
28
+ },
29
+ "configs": {
30
+ "logiqa2_base": {
31
+ "task": "logiqa2_base",
32
+ "group": "logikon-bench",
33
+ "dataset_path": "logikon/logikon-bench",
34
+ "dataset_name": "logiqa2",
35
+ "test_split": "test",
36
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
37
+ "doc_to_target": "{{answer}}",
38
+ "doc_to_choice": "{{options}}",
39
+ "description": "",
40
+ "target_delimiter": " ",
41
+ "fewshot_delimiter": "\n\n",
42
+ "num_fewshot": 0,
43
+ "metric_list": [
44
+ {
45
+ "metric": "acc",
46
+ "aggregation": "mean",
47
+ "higher_is_better": true
48
+ }
49
+ ],
50
+ "output_type": "multiple_choice",
51
+ "repeats": 1,
52
+ "should_decontaminate": false,
53
+ "metadata": {
54
+ "version": 0.0
55
+ }
56
+ },
57
+ "logiqa_base": {
58
+ "task": "logiqa_base",
59
+ "group": "logikon-bench",
60
+ "dataset_path": "logikon/logikon-bench",
61
+ "dataset_name": "logiqa",
62
+ "test_split": "test",
63
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
64
+ "doc_to_target": "{{answer}}",
65
+ "doc_to_choice": "{{options}}",
66
+ "description": "",
67
+ "target_delimiter": " ",
68
+ "fewshot_delimiter": "\n\n",
69
+ "num_fewshot": 0,
70
+ "metric_list": [
71
+ {
72
+ "metric": "acc",
73
+ "aggregation": "mean",
74
+ "higher_is_better": true
75
+ }
76
+ ],
77
+ "output_type": "multiple_choice",
78
+ "repeats": 1,
79
+ "should_decontaminate": false,
80
+ "metadata": {
81
+ "version": 0.0
82
+ }
83
+ },
84
+ "lsat-ar_base": {
85
+ "task": "lsat-ar_base",
86
+ "group": "logikon-bench",
87
+ "dataset_path": "logikon/logikon-bench",
88
+ "dataset_name": "lsat-ar",
89
+ "test_split": "test",
90
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
91
+ "doc_to_target": "{{answer}}",
92
+ "doc_to_choice": "{{options}}",
93
+ "description": "",
94
+ "target_delimiter": " ",
95
+ "fewshot_delimiter": "\n\n",
96
+ "num_fewshot": 0,
97
+ "metric_list": [
98
+ {
99
+ "metric": "acc",
100
+ "aggregation": "mean",
101
+ "higher_is_better": true
102
+ }
103
+ ],
104
+ "output_type": "multiple_choice",
105
+ "repeats": 1,
106
+ "should_decontaminate": false,
107
+ "metadata": {
108
+ "version": 0.0
109
+ }
110
+ },
111
+ "lsat-lr_base": {
112
+ "task": "lsat-lr_base",
113
+ "group": "logikon-bench",
114
+ "dataset_path": "logikon/logikon-bench",
115
+ "dataset_name": "lsat-lr",
116
+ "test_split": "test",
117
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
118
+ "doc_to_target": "{{answer}}",
119
+ "doc_to_choice": "{{options}}",
120
+ "description": "",
121
+ "target_delimiter": " ",
122
+ "fewshot_delimiter": "\n\n",
123
+ "num_fewshot": 0,
124
+ "metric_list": [
125
+ {
126
+ "metric": "acc",
127
+ "aggregation": "mean",
128
+ "higher_is_better": true
129
+ }
130
+ ],
131
+ "output_type": "multiple_choice",
132
+ "repeats": 1,
133
+ "should_decontaminate": false,
134
+ "metadata": {
135
+ "version": 0.0
136
+ }
137
+ },
138
+ "lsat-rc_base": {
139
+ "task": "lsat-rc_base",
140
+ "group": "logikon-bench",
141
+ "dataset_path": "logikon/logikon-bench",
142
+ "dataset_name": "lsat-rc",
143
+ "test_split": "test",
144
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
145
+ "doc_to_target": "{{answer}}",
146
+ "doc_to_choice": "{{options}}",
147
+ "description": "",
148
+ "target_delimiter": " ",
149
+ "fewshot_delimiter": "\n\n",
150
+ "num_fewshot": 0,
151
+ "metric_list": [
152
+ {
153
+ "metric": "acc",
154
+ "aggregation": "mean",
155
+ "higher_is_better": true
156
+ }
157
+ ],
158
+ "output_type": "multiple_choice",
159
+ "repeats": 1,
160
+ "should_decontaminate": false,
161
+ "metadata": {
162
+ "version": 0.0
163
+ }
164
+ }
165
+ },
166
+ "versions": {
167
+ "logiqa2_base": 0.0,
168
+ "logiqa_base": 0.0,
169
+ "lsat-ar_base": 0.0,
170
+ "lsat-lr_base": 0.0,
171
+ "lsat-rc_base": 0.0
172
+ },
173
+ "n-shot": {
174
+ "logiqa2_base": 0,
175
+ "logiqa_base": 0,
176
+ "lsat-ar_base": 0,
177
+ "lsat-lr_base": 0,
178
+ "lsat-rc_base": 0
179
+ },
180
+ "config": {
181
+ "model": "vllm",
182
+ "model_args": "pretrained=microsoft/Orca-2-7b,revision=main,dtype=float16,tensor_parallel_size=1,gpu_memory_utilization=0.7,trust_remote_code=true,max_length=2048",
183
+ "batch_size": "auto",
184
+ "batch_sizes": [],
185
+ "device": null,
186
+ "use_cache": null,
187
+ "limit": null,
188
+ "bootstrap_iters": 100000,
189
+ "gen_kwargs": null
190
+ },
191
+ "git_hash": "741db1c"
192
+ }