Datasets:

License:
aaditya commited on
Commit
c374f17
1 Parent(s): aa106db

Update demo-leaderboard/gpt2-demo/results_2023-11-21T18-10-08.json

Browse files
demo-leaderboard/gpt2-demo/results_2023-11-21T18-10-08.json CHANGED
@@ -1,15 +1,436 @@
1
  {
2
- "config": {
3
- "model_dtype": "torch.float16",
4
- "model_name": "demo-leaderboard/gpt2-demo",
5
- "model_sha": "ac3299b02780836378b9e1e68c6eead546e89f90"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  },
7
- "results": {
8
- "medmcqa": {
9
- "acc,none": 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  },
11
- "task_name2": {
12
- "metric_name": 0.90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  }
 
1
  {
2
+ "results": {
3
+ "multimedqa": {
4
+ "alias": "stem",
5
+ "acc_norm,none": 0.33614369501466274,
6
+ "acc_norm_stderr,none": 0.006394243385413683,
7
+ "acc,none": 0.3666430092264017,
8
+ "acc_stderr,none": 0.005640719286996951
9
+ },
10
+ "medmcqa": {
11
+ "acc,none": 0.34305522352378676,
12
+ "acc_stderr,none": 0.007340986677455324,
13
+ "acc_norm,none": 0.34305522352378676,
14
+ "acc_norm_stderr,none": 0.007340986677455324,
15
+ "alias": " - medmcqa"
16
+ },
17
+ "medqa_4options": {
18
+ "acc,none": 0.31343283582089554,
19
+ "acc_stderr,none": 0.013006792288528347,
20
+ "acc_norm,none": 0.31343283582089554,
21
+ "acc_norm_stderr,none": 0.013006792288528347,
22
+ "alias": " - medqa_4options"
23
+ },
24
+ "mmlu_anatomy": {
25
+ "alias": " - anatomy (mmlu)",
26
+ "acc,none": 0.3925925925925926,
27
+ "acc_stderr,none": 0.0421850621536888
28
+ },
29
+ "mmlu_clinical_knowledge": {
30
+ "alias": " - clinical_knowledge (mmlu)",
31
+ "acc,none": 0.46037735849056605,
32
+ "acc_stderr,none": 0.030676096599389188
33
+ },
34
+ "mmlu_college_biology": {
35
+ "alias": " - college_biology (mmlu)",
36
+ "acc,none": 0.3402777777777778,
37
+ "acc_stderr,none": 0.03962135573486219
38
+ },
39
+ "mmlu_college_medicine": {
40
+ "alias": " - college_medicine (mmlu)",
41
+ "acc,none": 0.37572254335260113,
42
+ "acc_stderr,none": 0.036928207672648664
43
  },
44
+ "mmlu_medical_genetics": {
45
+ "alias": " - medical_genetics (mmlu)",
46
+ "acc,none": 0.42,
47
+ "acc_stderr,none": 0.04960449637488584
48
+ },
49
+ "mmlu_professional_medicine": {
50
+ "alias": " - professional_medicine (mmlu)",
51
+ "acc,none": 0.29044117647058826,
52
+ "acc_stderr,none": 0.027576468622740522
53
+ },
54
+ "pubmedqa": {
55
+ "acc,none": 0.678,
56
+ "acc_stderr,none": 0.02091666833001987,
57
+ "alias": " - pubmedqa"
58
+ }
59
+ },
60
+ "groups": {
61
+ "multimedqa": {
62
+ "alias": "stem",
63
+ "acc_norm,none": 0.33614369501466274,
64
+ "acc_norm_stderr,none": 0.006394243385413683,
65
+ "acc,none": 0.3666430092264017,
66
+ "acc_stderr,none": 0.005640719286996951
67
+ }
68
+ },
69
+ "group_subtasks": {
70
+ "multimedqa": [
71
+ "mmlu_college_biology",
72
+ "mmlu_professional_medicine",
73
+ "mmlu_medical_genetics",
74
+ "mmlu_college_medicine",
75
+ "mmlu_clinical_knowledge",
76
+ "mmlu_anatomy",
77
+ "medqa_4options",
78
+ "medmcqa",
79
+ "pubmedqa"
80
+ ]
81
+ },
82
+ "configs": {
83
+ "medmcqa": {
84
+ "task": "medmcqa",
85
+ "dataset_path": "medmcqa",
86
+ "training_split": "train",
87
+ "validation_split": "validation",
88
+ "test_split": "validation",
89
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [doc[\"opa\"], doc[\"opb\"], doc[\"opc\"], doc[\"opd\"]]\n option_choices = {\n \"A\": choices[0],\n \"B\": choices[1],\n \"C\": choices[2],\n \"D\": choices[3],\n }\n\n prompt = \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in option_choices.items():\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n",
90
+ "doc_to_target": "cop",
91
+ "doc_to_choice": [
92
+ "A",
93
+ "B",
94
+ "C",
95
+ "D"
96
+ ],
97
+ "description": "",
98
+ "target_delimiter": " ",
99
+ "fewshot_delimiter": "\n\n",
100
+ "metric_list": [
101
+ {
102
+ "metric": "acc",
103
+ "aggregation": "mean",
104
+ "higher_is_better": true
105
  },
106
+ {
107
+ "metric": "acc_norm",
108
+ "aggregation": "mean",
109
+ "higher_is_better": true
110
+ }
111
+ ],
112
+ "output_type": "multiple_choice",
113
+ "repeats": 1,
114
+ "should_decontaminate": true,
115
+ "doc_to_decontamination_query": "{{question}}"
116
+ },
117
+ "medqa_4options": {
118
+ "task": "medqa_4options",
119
+ "dataset_path": "GBaker/MedQA-USMLE-4-options-hf",
120
+ "training_split": "train",
121
+ "validation_split": "validation",
122
+ "test_split": "test",
123
+ "doc_to_text": "def doc_to_text(doc) -> str:\n option_choices = {\n \"A\": doc[\"ending0\"],\n \"B\": doc[\"ending1\"],\n \"C\": doc[\"ending2\"],\n \"D\": doc[\"ending3\"],\n }\n answers = \"\".join((f\"{k}. {v}\\n\") for k, v in option_choices.items())\n return f\"Question: {doc['sent1']}\\n{answers}Answer:\"\n",
124
+ "doc_to_target": "def doc_to_target(doc) -> int:\n return doc[\"label\"]\n",
125
+ "doc_to_choice": [
126
+ "A",
127
+ "B",
128
+ "C",
129
+ "D"
130
+ ],
131
+ "description": "",
132
+ "target_delimiter": " ",
133
+ "fewshot_delimiter": "\n\n",
134
+ "metric_list": [
135
+ {
136
+ "metric": "acc",
137
+ "aggregation": "mean",
138
+ "higher_is_better": true
139
+ },
140
+ {
141
+ "metric": "acc_norm",
142
+ "aggregation": "mean",
143
+ "higher_is_better": true
144
+ }
145
+ ],
146
+ "output_type": "multiple_choice",
147
+ "repeats": 1,
148
+ "should_decontaminate": false
149
+ },
150
+ "mmlu_anatomy": {
151
+ "task": "mmlu_anatomy",
152
+ "task_alias": "anatomy (mmlu)",
153
+ "group": "multimedqa",
154
+ "group_alias": "stem",
155
+ "dataset_path": "hails/mmlu_no_train",
156
+ "dataset_name": "anatomy",
157
+ "test_split": "test",
158
+ "fewshot_split": "dev",
159
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
160
+ "doc_to_target": "answer",
161
+ "doc_to_choice": [
162
+ "A",
163
+ "B",
164
+ "C",
165
+ "D"
166
+ ],
167
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
168
+ "target_delimiter": " ",
169
+ "fewshot_delimiter": "\n\n",
170
+ "fewshot_config": {
171
+ "sampler": "first_n"
172
+ },
173
+ "metric_list": [
174
+ {
175
+ "metric": "acc",
176
+ "aggregation": "mean",
177
+ "higher_is_better": true
178
+ }
179
+ ],
180
+ "output_type": "multiple_choice",
181
+ "repeats": 1,
182
+ "should_decontaminate": false,
183
+ "metadata": {
184
+ "version": 0.0
185
+ }
186
+ },
187
+ "mmlu_clinical_knowledge": {
188
+ "task": "mmlu_clinical_knowledge",
189
+ "task_alias": "clinical_knowledge (mmlu)",
190
+ "group": "multimedqa",
191
+ "group_alias": "other",
192
+ "dataset_path": "hails/mmlu_no_train",
193
+ "dataset_name": "clinical_knowledge",
194
+ "test_split": "test",
195
+ "fewshot_split": "dev",
196
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
197
+ "doc_to_target": "answer",
198
+ "doc_to_choice": [
199
+ "A",
200
+ "B",
201
+ "C",
202
+ "D"
203
+ ],
204
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
205
+ "target_delimiter": " ",
206
+ "fewshot_delimiter": "\n\n",
207
+ "fewshot_config": {
208
+ "sampler": "first_n"
209
+ },
210
+ "metric_list": [
211
+ {
212
+ "metric": "acc",
213
+ "aggregation": "mean",
214
+ "higher_is_better": true
215
+ }
216
+ ],
217
+ "output_type": "multiple_choice",
218
+ "repeats": 1,
219
+ "should_decontaminate": false,
220
+ "metadata": {
221
+ "version": 0.0
222
+ }
223
+ },
224
+ "mmlu_college_biology": {
225
+ "task": "mmlu_college_biology",
226
+ "task_alias": "college_biology (mmlu)",
227
+ "group": "multimedqa",
228
+ "group_alias": "stem",
229
+ "dataset_path": "hails/mmlu_no_train",
230
+ "dataset_name": "college_biology",
231
+ "test_split": "test",
232
+ "fewshot_split": "dev",
233
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
234
+ "doc_to_target": "answer",
235
+ "doc_to_choice": [
236
+ "A",
237
+ "B",
238
+ "C",
239
+ "D"
240
+ ],
241
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
242
+ "target_delimiter": " ",
243
+ "fewshot_delimiter": "\n\n",
244
+ "fewshot_config": {
245
+ "sampler": "first_n"
246
+ },
247
+ "metric_list": [
248
+ {
249
+ "metric": "acc",
250
+ "aggregation": "mean",
251
+ "higher_is_better": true
252
+ }
253
+ ],
254
+ "output_type": "multiple_choice",
255
+ "repeats": 1,
256
+ "should_decontaminate": false,
257
+ "metadata": {
258
+ "version": 0.0
259
+ }
260
+ },
261
+ "mmlu_college_medicine": {
262
+ "task": "mmlu_college_medicine",
263
+ "task_alias": "college_medicine (mmlu)",
264
+ "group": "multimedqa",
265
+ "group_alias": "other",
266
+ "dataset_path": "hails/mmlu_no_train",
267
+ "dataset_name": "college_medicine",
268
+ "test_split": "test",
269
+ "fewshot_split": "dev",
270
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
271
+ "doc_to_target": "answer",
272
+ "doc_to_choice": [
273
+ "A",
274
+ "B",
275
+ "C",
276
+ "D"
277
+ ],
278
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
279
+ "target_delimiter": " ",
280
+ "fewshot_delimiter": "\n\n",
281
+ "fewshot_config": {
282
+ "sampler": "first_n"
283
+ },
284
+ "metric_list": [
285
+ {
286
+ "metric": "acc",
287
+ "aggregation": "mean",
288
+ "higher_is_better": true
289
+ }
290
+ ],
291
+ "output_type": "multiple_choice",
292
+ "repeats": 1,
293
+ "should_decontaminate": false,
294
+ "metadata": {
295
+ "version": 0.0
296
+ }
297
+ },
298
+ "mmlu_medical_genetics": {
299
+ "task": "mmlu_medical_genetics",
300
+ "task_alias": "medical_genetics (mmlu)",
301
+ "group": "multimedqa",
302
+ "group_alias": "other",
303
+ "dataset_path": "hails/mmlu_no_train",
304
+ "dataset_name": "medical_genetics",
305
+ "test_split": "test",
306
+ "fewshot_split": "dev",
307
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
308
+ "doc_to_target": "answer",
309
+ "doc_to_choice": [
310
+ "A",
311
+ "B",
312
+ "C",
313
+ "D"
314
+ ],
315
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
316
+ "target_delimiter": " ",
317
+ "fewshot_delimiter": "\n\n",
318
+ "fewshot_config": {
319
+ "sampler": "first_n"
320
+ },
321
+ "metric_list": [
322
+ {
323
+ "metric": "acc",
324
+ "aggregation": "mean",
325
+ "higher_is_better": true
326
  }
327
+ ],
328
+ "output_type": "multiple_choice",
329
+ "repeats": 1,
330
+ "should_decontaminate": false,
331
+ "metadata": {
332
+ "version": 0.0
333
+ }
334
+ },
335
+ "mmlu_professional_medicine": {
336
+ "task": "mmlu_professional_medicine",
337
+ "task_alias": "professional_medicine (mmlu)",
338
+ "group": "multimedqa",
339
+ "group_alias": "other",
340
+ "dataset_path": "hails/mmlu_no_train",
341
+ "dataset_name": "professional_medicine",
342
+ "test_split": "test",
343
+ "fewshot_split": "dev",
344
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
345
+ "doc_to_target": "answer",
346
+ "doc_to_choice": [
347
+ "A",
348
+ "B",
349
+ "C",
350
+ "D"
351
+ ],
352
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
353
+ "target_delimiter": " ",
354
+ "fewshot_delimiter": "\n\n",
355
+ "fewshot_config": {
356
+ "sampler": "first_n"
357
+ },
358
+ "metric_list": [
359
+ {
360
+ "metric": "acc",
361
+ "aggregation": "mean",
362
+ "higher_is_better": true
363
+ }
364
+ ],
365
+ "output_type": "multiple_choice",
366
+ "repeats": 1,
367
+ "should_decontaminate": false,
368
+ "metadata": {
369
+ "version": 0.0
370
+ }
371
+ },
372
+ "pubmedqa": {
373
+ "task": "pubmedqa",
374
+ "dataset_path": "bigbio/pubmed_qa",
375
+ "dataset_name": "pubmed_qa_labeled_fold0_source",
376
+ "training_split": "train",
377
+ "validation_split": "validation",
378
+ "test_split": "test",
379
+ "doc_to_text": "def doc_to_text(doc) -> str:\n ctxs = \"\\n\".join(doc[\"CONTEXTS\"])\n return \"Abstract: {}\\nQuestion: {}\\nAnswer:\".format(\n ctxs,\n doc[\"QUESTION\"],\n )\n",
380
+ "doc_to_target": "final_decision",
381
+ "doc_to_choice": [
382
+ "yes",
383
+ "no",
384
+ "maybe"
385
+ ],
386
+ "description": "",
387
+ "target_delimiter": " ",
388
+ "fewshot_delimiter": "\n\n",
389
+ "metric_list": [
390
+ {
391
+ "metric": "acc",
392
+ "aggregation": "mean",
393
+ "higher_is_better": true
394
+ }
395
+ ],
396
+ "output_type": "multiple_choice",
397
+ "repeats": 1,
398
+ "should_decontaminate": false,
399
+ "metadata": {
400
+ "version": 1.0
401
+ }
402
  }
403
+ },
404
+ "versions": {
405
+ "medmcqa": "Yaml",
406
+ "medqa_4options": "Yaml",
407
+ "mmlu_anatomy": 0.0,
408
+ "mmlu_clinical_knowledge": 0.0,
409
+ "mmlu_college_biology": 0.0,
410
+ "mmlu_college_medicine": 0.0,
411
+ "mmlu_medical_genetics": 0.0,
412
+ "mmlu_professional_medicine": 0.0,
413
+ "pubmedqa": 1.0
414
+ },
415
+ "n-shot": {
416
+ "medmcqa": null,
417
+ "medqa_4options": null,
418
+ "mmlu_anatomy": null,
419
+ "mmlu_clinical_knowledge": null,
420
+ "mmlu_college_biology": null,
421
+ "mmlu_college_medicine": null,
422
+ "mmlu_medical_genetics": null,
423
+ "mmlu_professional_medicine": null,
424
+ "multimedqa": null,
425
+ "pubmedqa": null
426
+ },
427
+ "config": {
428
+ "model_dtype": "torch.float16",
429
+ "model_name": "demo-leaderboard/gpt2-demo",
430
+ "model_sha": "ac3299b02780836378b9e1e68c6eead546e89f90"
431
+ },
432
+ "git_hash": "a3e56afe",
433
+ "pretty_env_info": "PyTorch version: 2.1.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: 14.0.0-1ubuntu1.1\nCMake version: version 3.27.9\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.1.58+-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.140\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: Tesla V100-SXM2-16GB\nNvidia driver version: 535.104.05\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.6\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 2\nOn-line CPU(s) list: 0,1\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.00GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 1\nSocket(s): 1\nStepping: 3\nBogoMIPS: 4000.32\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 32 KiB (1 instance)\nL1i cache: 32 KiB (1 instance)\nL2 cache: 1 MiB (1 instance)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0,1\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Mitigation; PTE Inversion\nVulnerability Mds: Vulnerable; SMT Host state unknown\nVulnerability Meltdown: Vulnerable\nVulnerability Mmio stale data: Vulnerable\nVulnerability Retbleed: Vulnerable\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Vulnerable\nVulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers\nVulnerability Spectre v2: Vulnerable, IBPB: disabled, STIBP: disabled, PBRSB-eIBRS: Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Vulnerable\n\nVersions of relevant libraries:\n[pip3] numpy==1.25.2\n[pip3] torch==2.1.0+cu121\n[pip3] torchaudio==2.1.0+cu121\n[pip3] torchdata==0.7.0\n[pip3] torchsummary==1.5.1\n[pip3] torchtext==0.16.0\n[pip3] torchvision==0.16.0+cu121\n[pip3] triton==2.1.0\n[conda] Could not collect",
434
+ "transformers_version": "4.38.2",
435
+ "upper_git_hash": null
436
  }