Datasets:

License:
aaditya commited on
Commit
6fdd321
1 Parent(s): c374f17

Update demo-leaderboard/gpt2-demo/results_2023-11-21T18-10-08.json

Browse files
demo-leaderboard/gpt2-demo/results_2023-11-21T18-10-08.json CHANGED
@@ -57,373 +57,6 @@
57
  "alias": " - pubmedqa"
58
  }
59
  },
60
- "groups": {
61
- "multimedqa": {
62
- "alias": "stem",
63
- "acc_norm,none": 0.33614369501466274,
64
- "acc_norm_stderr,none": 0.006394243385413683,
65
- "acc,none": 0.3666430092264017,
66
- "acc_stderr,none": 0.005640719286996951
67
- }
68
- },
69
- "group_subtasks": {
70
- "multimedqa": [
71
- "mmlu_college_biology",
72
- "mmlu_professional_medicine",
73
- "mmlu_medical_genetics",
74
- "mmlu_college_medicine",
75
- "mmlu_clinical_knowledge",
76
- "mmlu_anatomy",
77
- "medqa_4options",
78
- "medmcqa",
79
- "pubmedqa"
80
- ]
81
- },
82
- "configs": {
83
- "medmcqa": {
84
- "task": "medmcqa",
85
- "dataset_path": "medmcqa",
86
- "training_split": "train",
87
- "validation_split": "validation",
88
- "test_split": "validation",
89
- "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [doc[\"opa\"], doc[\"opb\"], doc[\"opc\"], doc[\"opd\"]]\n option_choices = {\n \"A\": choices[0],\n \"B\": choices[1],\n \"C\": choices[2],\n \"D\": choices[3],\n }\n\n prompt = \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in option_choices.items():\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n",
90
- "doc_to_target": "cop",
91
- "doc_to_choice": [
92
- "A",
93
- "B",
94
- "C",
95
- "D"
96
- ],
97
- "description": "",
98
- "target_delimiter": " ",
99
- "fewshot_delimiter": "\n\n",
100
- "metric_list": [
101
- {
102
- "metric": "acc",
103
- "aggregation": "mean",
104
- "higher_is_better": true
105
- },
106
- {
107
- "metric": "acc_norm",
108
- "aggregation": "mean",
109
- "higher_is_better": true
110
- }
111
- ],
112
- "output_type": "multiple_choice",
113
- "repeats": 1,
114
- "should_decontaminate": true,
115
- "doc_to_decontamination_query": "{{question}}"
116
- },
117
- "medqa_4options": {
118
- "task": "medqa_4options",
119
- "dataset_path": "GBaker/MedQA-USMLE-4-options-hf",
120
- "training_split": "train",
121
- "validation_split": "validation",
122
- "test_split": "test",
123
- "doc_to_text": "def doc_to_text(doc) -> str:\n option_choices = {\n \"A\": doc[\"ending0\"],\n \"B\": doc[\"ending1\"],\n \"C\": doc[\"ending2\"],\n \"D\": doc[\"ending3\"],\n }\n answers = \"\".join((f\"{k}. {v}\\n\") for k, v in option_choices.items())\n return f\"Question: {doc['sent1']}\\n{answers}Answer:\"\n",
124
- "doc_to_target": "def doc_to_target(doc) -> int:\n return doc[\"label\"]\n",
125
- "doc_to_choice": [
126
- "A",
127
- "B",
128
- "C",
129
- "D"
130
- ],
131
- "description": "",
132
- "target_delimiter": " ",
133
- "fewshot_delimiter": "\n\n",
134
- "metric_list": [
135
- {
136
- "metric": "acc",
137
- "aggregation": "mean",
138
- "higher_is_better": true
139
- },
140
- {
141
- "metric": "acc_norm",
142
- "aggregation": "mean",
143
- "higher_is_better": true
144
- }
145
- ],
146
- "output_type": "multiple_choice",
147
- "repeats": 1,
148
- "should_decontaminate": false
149
- },
150
- "mmlu_anatomy": {
151
- "task": "mmlu_anatomy",
152
- "task_alias": "anatomy (mmlu)",
153
- "group": "multimedqa",
154
- "group_alias": "stem",
155
- "dataset_path": "hails/mmlu_no_train",
156
- "dataset_name": "anatomy",
157
- "test_split": "test",
158
- "fewshot_split": "dev",
159
- "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
160
- "doc_to_target": "answer",
161
- "doc_to_choice": [
162
- "A",
163
- "B",
164
- "C",
165
- "D"
166
- ],
167
- "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
168
- "target_delimiter": " ",
169
- "fewshot_delimiter": "\n\n",
170
- "fewshot_config": {
171
- "sampler": "first_n"
172
- },
173
- "metric_list": [
174
- {
175
- "metric": "acc",
176
- "aggregation": "mean",
177
- "higher_is_better": true
178
- }
179
- ],
180
- "output_type": "multiple_choice",
181
- "repeats": 1,
182
- "should_decontaminate": false,
183
- "metadata": {
184
- "version": 0.0
185
- }
186
- },
187
- "mmlu_clinical_knowledge": {
188
- "task": "mmlu_clinical_knowledge",
189
- "task_alias": "clinical_knowledge (mmlu)",
190
- "group": "multimedqa",
191
- "group_alias": "other",
192
- "dataset_path": "hails/mmlu_no_train",
193
- "dataset_name": "clinical_knowledge",
194
- "test_split": "test",
195
- "fewshot_split": "dev",
196
- "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
197
- "doc_to_target": "answer",
198
- "doc_to_choice": [
199
- "A",
200
- "B",
201
- "C",
202
- "D"
203
- ],
204
- "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
205
- "target_delimiter": " ",
206
- "fewshot_delimiter": "\n\n",
207
- "fewshot_config": {
208
- "sampler": "first_n"
209
- },
210
- "metric_list": [
211
- {
212
- "metric": "acc",
213
- "aggregation": "mean",
214
- "higher_is_better": true
215
- }
216
- ],
217
- "output_type": "multiple_choice",
218
- "repeats": 1,
219
- "should_decontaminate": false,
220
- "metadata": {
221
- "version": 0.0
222
- }
223
- },
224
- "mmlu_college_biology": {
225
- "task": "mmlu_college_biology",
226
- "task_alias": "college_biology (mmlu)",
227
- "group": "multimedqa",
228
- "group_alias": "stem",
229
- "dataset_path": "hails/mmlu_no_train",
230
- "dataset_name": "college_biology",
231
- "test_split": "test",
232
- "fewshot_split": "dev",
233
- "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
234
- "doc_to_target": "answer",
235
- "doc_to_choice": [
236
- "A",
237
- "B",
238
- "C",
239
- "D"
240
- ],
241
- "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
242
- "target_delimiter": " ",
243
- "fewshot_delimiter": "\n\n",
244
- "fewshot_config": {
245
- "sampler": "first_n"
246
- },
247
- "metric_list": [
248
- {
249
- "metric": "acc",
250
- "aggregation": "mean",
251
- "higher_is_better": true
252
- }
253
- ],
254
- "output_type": "multiple_choice",
255
- "repeats": 1,
256
- "should_decontaminate": false,
257
- "metadata": {
258
- "version": 0.0
259
- }
260
- },
261
- "mmlu_college_medicine": {
262
- "task": "mmlu_college_medicine",
263
- "task_alias": "college_medicine (mmlu)",
264
- "group": "multimedqa",
265
- "group_alias": "other",
266
- "dataset_path": "hails/mmlu_no_train",
267
- "dataset_name": "college_medicine",
268
- "test_split": "test",
269
- "fewshot_split": "dev",
270
- "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
271
- "doc_to_target": "answer",
272
- "doc_to_choice": [
273
- "A",
274
- "B",
275
- "C",
276
- "D"
277
- ],
278
- "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
279
- "target_delimiter": " ",
280
- "fewshot_delimiter": "\n\n",
281
- "fewshot_config": {
282
- "sampler": "first_n"
283
- },
284
- "metric_list": [
285
- {
286
- "metric": "acc",
287
- "aggregation": "mean",
288
- "higher_is_better": true
289
- }
290
- ],
291
- "output_type": "multiple_choice",
292
- "repeats": 1,
293
- "should_decontaminate": false,
294
- "metadata": {
295
- "version": 0.0
296
- }
297
- },
298
- "mmlu_medical_genetics": {
299
- "task": "mmlu_medical_genetics",
300
- "task_alias": "medical_genetics (mmlu)",
301
- "group": "multimedqa",
302
- "group_alias": "other",
303
- "dataset_path": "hails/mmlu_no_train",
304
- "dataset_name": "medical_genetics",
305
- "test_split": "test",
306
- "fewshot_split": "dev",
307
- "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
308
- "doc_to_target": "answer",
309
- "doc_to_choice": [
310
- "A",
311
- "B",
312
- "C",
313
- "D"
314
- ],
315
- "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
316
- "target_delimiter": " ",
317
- "fewshot_delimiter": "\n\n",
318
- "fewshot_config": {
319
- "sampler": "first_n"
320
- },
321
- "metric_list": [
322
- {
323
- "metric": "acc",
324
- "aggregation": "mean",
325
- "higher_is_better": true
326
- }
327
- ],
328
- "output_type": "multiple_choice",
329
- "repeats": 1,
330
- "should_decontaminate": false,
331
- "metadata": {
332
- "version": 0.0
333
- }
334
- },
335
- "mmlu_professional_medicine": {
336
- "task": "mmlu_professional_medicine",
337
- "task_alias": "professional_medicine (mmlu)",
338
- "group": "multimedqa",
339
- "group_alias": "other",
340
- "dataset_path": "hails/mmlu_no_train",
341
- "dataset_name": "professional_medicine",
342
- "test_split": "test",
343
- "fewshot_split": "dev",
344
- "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
345
- "doc_to_target": "answer",
346
- "doc_to_choice": [
347
- "A",
348
- "B",
349
- "C",
350
- "D"
351
- ],
352
- "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
353
- "target_delimiter": " ",
354
- "fewshot_delimiter": "\n\n",
355
- "fewshot_config": {
356
- "sampler": "first_n"
357
- },
358
- "metric_list": [
359
- {
360
- "metric": "acc",
361
- "aggregation": "mean",
362
- "higher_is_better": true
363
- }
364
- ],
365
- "output_type": "multiple_choice",
366
- "repeats": 1,
367
- "should_decontaminate": false,
368
- "metadata": {
369
- "version": 0.0
370
- }
371
- },
372
- "pubmedqa": {
373
- "task": "pubmedqa",
374
- "dataset_path": "bigbio/pubmed_qa",
375
- "dataset_name": "pubmed_qa_labeled_fold0_source",
376
- "training_split": "train",
377
- "validation_split": "validation",
378
- "test_split": "test",
379
- "doc_to_text": "def doc_to_text(doc) -> str:\n ctxs = \"\\n\".join(doc[\"CONTEXTS\"])\n return \"Abstract: {}\\nQuestion: {}\\nAnswer:\".format(\n ctxs,\n doc[\"QUESTION\"],\n )\n",
380
- "doc_to_target": "final_decision",
381
- "doc_to_choice": [
382
- "yes",
383
- "no",
384
- "maybe"
385
- ],
386
- "description": "",
387
- "target_delimiter": " ",
388
- "fewshot_delimiter": "\n\n",
389
- "metric_list": [
390
- {
391
- "metric": "acc",
392
- "aggregation": "mean",
393
- "higher_is_better": true
394
- }
395
- ],
396
- "output_type": "multiple_choice",
397
- "repeats": 1,
398
- "should_decontaminate": false,
399
- "metadata": {
400
- "version": 1.0
401
- }
402
- }
403
- },
404
- "versions": {
405
- "medmcqa": "Yaml",
406
- "medqa_4options": "Yaml",
407
- "mmlu_anatomy": 0.0,
408
- "mmlu_clinical_knowledge": 0.0,
409
- "mmlu_college_biology": 0.0,
410
- "mmlu_college_medicine": 0.0,
411
- "mmlu_medical_genetics": 0.0,
412
- "mmlu_professional_medicine": 0.0,
413
- "pubmedqa": 1.0
414
- },
415
- "n-shot": {
416
- "medmcqa": null,
417
- "medqa_4options": null,
418
- "mmlu_anatomy": null,
419
- "mmlu_clinical_knowledge": null,
420
- "mmlu_college_biology": null,
421
- "mmlu_college_medicine": null,
422
- "mmlu_medical_genetics": null,
423
- "mmlu_professional_medicine": null,
424
- "multimedqa": null,
425
- "pubmedqa": null
426
- },
427
  "config": {
428
  "model_dtype": "torch.float16",
429
  "model_name": "demo-leaderboard/gpt2-demo",
 
57
  "alias": " - pubmedqa"
58
  }
59
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  "config": {
61
  "model_dtype": "torch.float16",
62
  "model_name": "demo-leaderboard/gpt2-demo",