andrewlastmile commited on
Commit
650da80
·
verified ·
1 Parent(s): b98c534

Update my_app.aiconfig.json

Browse files
Files changed (1) hide show
  1. my_app.aiconfig.json +33 -12
my_app.aiconfig.json CHANGED
@@ -14,23 +14,24 @@
14
  "description": "In this notebook, we compare the individual performance of HF hosted LLMs () on a few example questions from the GLUE benchmarks (https://gluebenchmark.com/tasks).\n\nExample questions taken from \"What is the GLUE Benchmark\" medium post - https://angelina-yang.medium.com/what-is-the-glue-benchmark-for-nlu-systems-61127b3cab3f",
15
  "prompts": [
16
  {
17
- "name": "msft_phi_2",
18
- "input": "{{CoLA_ex_prompt}}",
19
  "metadata": {
20
  "model": {
21
- "name": "Text Generation",
22
  "settings": {
23
- "model": "microsoft/phi-2"
24
  }
25
  },
26
  "tags": null,
27
  "parameters": {}
28
  },
29
- "outputs": null
 
30
  },
31
  {
32
  "name": "mistral_7b_v0.1",
33
- "input": "{{CoLA_ex_prompt}}",
34
  "metadata": {
35
  "model": {
36
  "name": "Text Generation",
@@ -41,22 +42,42 @@
41
  "tags": null,
42
  "parameters": {}
43
  },
44
- "outputs": []
 
45
  },
46
  {
47
- "name": "google_flan_t5_lg",
48
- "input": "{{CoLA_ex_prompt}}",
49
  "metadata": {
50
  "model": {
51
- "name": "Text Generation",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  "settings": {
53
- "model": "google/flan-t5-large"
54
  }
55
  },
56
  "tags": null,
57
  "parameters": {}
58
  },
59
- "outputs": []
 
60
  }
61
  ]
62
  }
 
14
  "description": "In this notebook, we compare the individual performance of HF hosted LLMs () on a few example questions from the GLUE benchmarks (https://gluebenchmark.com/tasks).\n\nExample questions taken from \"What is the GLUE Benchmark\" medium post - https://angelina-yang.medium.com/what-is-the-glue-benchmark-for-nlu-systems-61127b3cab3f",
15
  "prompts": [
16
  {
17
+ "name": "msft_phi_1.5",
18
+ "input": "{{SST_2_ex_prompt}}",
19
  "metadata": {
20
  "model": {
21
+ "name": "Conversational",
22
  "settings": {
23
+ "model": "microsoft/phi-1_5"
24
  }
25
  },
26
  "tags": null,
27
  "parameters": {}
28
  },
29
+ "outputs": [
30
+ ]
31
  },
32
  {
33
  "name": "mistral_7b_v0.1",
34
+ "input": "{{SST_2_ex_prompt}}",
35
  "metadata": {
36
  "model": {
37
  "name": "Text Generation",
 
42
  "tags": null,
43
  "parameters": {}
44
  },
45
+ "outputs": [
46
+ ]
47
  },
48
  {
49
+ "name": "google_flan_t5_sm",
50
+ "input": "{{SST_2_ex_prompt}}",
51
  "metadata": {
52
  "model": {
53
+ "name": "Conversational",
54
+ "settings": {
55
+ "model": "google/flan-t5-small",
56
+ "max_new_tokens": 250,
57
+ "stream": false
58
+ }
59
+ },
60
+ "tags": null,
61
+ "parameters": {}
62
+ },
63
+ "outputs": [
64
+ ]
65
+ },
66
+ {
67
+ "name": "tinyllama-1_1B",
68
+ "input": "<|system|>\nYou are to answer the following question by the user</s>\n<|user|>\n{{SST_2_ex_prompt}}</s>\n<|assistant|>",
69
+ "metadata": {
70
+ "model": {
71
+ "name": "Conversational",
72
  "settings": {
73
+ "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
74
  }
75
  },
76
  "tags": null,
77
  "parameters": {}
78
  },
79
+ "outputs": [
80
+ ]
81
  }
82
  ]
83
  }