steinhaug commited on
Commit
7bbfbc9
1 Parent(s): abdda28

Upload folder using huggingface_hub

Browse files
Pygmalion-7b-Metharme/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Pygmalion-7b-Metharme/README.md ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ thumbnail: null
5
+ tags:
6
+ - text generation
7
+ - instruct
8
+ pipeline_tag: text-generation
9
+ inference: false
10
+ ---
11
+ <h1 style="text-align: center">Metharme 7B 4bit</h1>
12
+ <h2 style="text-align: center">An instruction-tuned LLaMA biased towards fiction writing and conversation.</h2>
13
+
14
+ ## Model Details
15
+
16
+ Converted from the XORs weights from PygmalionAI's release https://huggingface.co/PygmalionAI/metharme-7b
17
+
18
+ Quantized for KoboldAI use using https://github.com/0cc4m/GPTQ-for-LLaMa
19
+
20
+ I created several quantized variations of this model and believe this variation to be "best." <br>
21
+ <html>
22
+ <head>
23
+ <style>
24
+ table {
25
+ border:1px solid #b3adad;
26
+ border-collapse:collapse;
27
+ padding:5px;
28
+ }
29
+ table th {
30
+ border:1px solid #b3adad;
31
+ padding:5px;
32
+ background: #f0f0f0;
33
+ color: #313030;
34
+ }
35
+ table td {
36
+ border:1px solid #b3adad;
37
+ text-align:center;
38
+ padding:5px;
39
+ background: #ffffff;
40
+ color: #313030;
41
+ }
42
+ </style>
43
+ </head>
44
+ <body>
45
+ <table>
46
+ <thead>
47
+ <tr>
48
+ <th>Model:</th>
49
+ <th>Wikitext 2</th>
50
+ <th>Ptb-New</th>
51
+ <th>C4-New</th>
52
+ </tr>
53
+ </thead>
54
+ <tbody>
55
+ <tr>
56
+ <td>Metharme 7b - 16bit</td>
57
+ <td>5.7208476066589355</td>
58
+ <td>41.61103439331055</td>
59
+ <td>7.512405872344971</td>
60
+ </tr>
61
+ <tr>
62
+ <td>Metharme 7b - 4bit --act-order</td>
63
+ <td><span style="font-family: &quot;Source Sans Pro&quot;, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, &quot;Segoe UI&quot;, Roboto, &quot;Helvetica Neue&quot;, Arial, &quot;Noto Sans&quot;, sans-serif, &quot;Apple Color Emoji&quot;, &quot;Segoe UI Emoji&quot;, &quot;Segoe UI Symbol&quot;, &quot;Noto Color Emoji&quot;; font-size: 14.7px; font-style: normal; font-weight: 400;">6.281311511993408</span></td>
64
+ <td>46.79158401489258</td>
65
+ <td><span style="font-family: &quot;Source Sans Pro&quot;, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, &quot;Segoe UI&quot;, Roboto, &quot;Helvetica Neue&quot;, Arial, &quot;Noto Sans&quot;, sans-serif, &quot;Apple Color Emoji&quot;, &quot;Segoe UI Emoji&quot;, &quot;Segoe UI Symbol&quot;, &quot;Noto Color Emoji&quot;; font-size: 14.7px; font-style: normal; font-weight: 400;">7.906069755554199</span></td>
66
+ </tr>
67
+ </tbody>
68
+ </table>
69
+ </body>
70
+ </html>
71
+ <br>Other benchmark scores at the bottom of readme.
72
+ <hr>
73
+ Metharme 7B is an instruct model based on Meta's LLaMA-7B.
74
+
75
+ This is an experiment to try and get a model that is usable for conversation, roleplaying and storywriting, but which can be guided using natural language like other instruct models. See the [prompting](#prompting) section below for examples.
76
+
77
+ It was trained by doing supervised fine-tuning over a mixture of regular instruction data alongside roleplay, fictional stories and conversations with synthetically generated instructions attached.
78
+
79
+ ## Prompting
80
+
81
+ The current model version has been trained on prompts using three different roles, which are denoted by the following tokens: `<|system|>`, `<|user|>` and `<|model|>`.
82
+
83
+ The `<|system|>` prompt can be used to inject out-of-channel information behind the scenes, while the `<|user|>` prompt should be used to indicate user input. The `<|model|>` token should then be used to indicate that the model should generate a response. These tokens can happen multiple times and be chained up to form a conversation history.
84
+
85
+ ### Prompting example
86
+
87
+ Here's a concrete example. Let's say you have the following system prompt:
88
+
89
+ > This is a text adventure game. Describe the scenario to the user and give him three options to pick from on each turn.
90
+
91
+ And the user has typed in the following message:
92
+
93
+ > Start!
94
+
95
+ To perform inference against the model, the prompt should be formatted like this:
96
+
97
+ ```
98
+ <|system|>This is a text adventure game. Describe the scenario to the user and give him three options to pick from on each turn.<|user|>Start!<|model|>
99
+ ```
100
+
101
+ The model might generate something like:
102
+
103
+ > You are standing in front of an old, abandoned house. The windows are boarded up, and there's no sign of life around it. As you approach, you notice a strange feeling emanating from within. Suddenly, you hear a voice calling out to you... 'Come inside!'
104
+ >
105
+ > - Go inside the house.
106
+ > - Ignore the call and move away.
107
+ > - Run as fast as you can.
108
+
109
+ It will automatically emit an end-of-text token (`</s>`) when it judges that the response is complete.
110
+
111
+ Pretend then that the user has replied with `go inside`. The next prompt should then be:
112
+
113
+ ```
114
+ <|system|>This is a text adventure game. Describe the scenario to the user and give him three options to pick from on each turn.<|user|>Start!<|model|>You are standing in front of an old, abandoned house. The windows are boarded up, and there's no sign of life around it. As you approach, you notice a strange feeling emanating from within. Suddenly, you hear a voice calling out to you... 'Come inside!'
115
+
116
+ - Go inside the house.
117
+ - Ignore the call and move away.
118
+ - Run as fast as you can.<|user|>go inside<|model|>
119
+ ```
120
+
121
+ Which might generate something like:
122
+
123
+ > You enter the house, and immediately, a wave of unsettling energy washes over you. Your senses seem heightened, but your mind feels foggy. A figure appears before you - a tall man with piercing eyes. He speaks to you in a language you don't understand.
124
+ >
125
+ > - Ask him what he wants.
126
+ > - Attack him.
127
+ > - Run away.
128
+
129
+ Same process applies. Usually, it is best to do a sliding window over the user and model turns, but keep the system prompt fixed at the start of the context window.
130
+
131
+ ## Limitations and biases
132
+
133
+ The intended use-case for this model is fictional writing for entertainment purposes. Any other sort of usage is out of scope.
134
+
135
+ As such, it was **not** fine-tuned to be safe and harmless: the base model _and_ this fine-tune have been trained on data known to contain profanity and texts that are lewd or otherwise offensive. It may produce socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive. Outputs might often be factually wrong or misleading.
136
+
137
+ <hr>
138
+ <p><strong><font size="5">Benchmarks of different quantized variations</font></strong></p>
139
+ <html>
140
+ <head>
141
+ <style>
142
+ table {
143
+ border:1px solid #b3adad;
144
+ border-collapse:collapse;
145
+ padding:5px;
146
+ }
147
+ table th {
148
+ border:1px solid #b3adad;
149
+ padding:5px;
150
+ background: #f0f0f0;
151
+ color: #313030;
152
+ }
153
+ table td {
154
+ border:1px solid #b3adad;
155
+ text-align:center;
156
+ padding:5px;
157
+ background: #ffffff;
158
+ color: #313030;
159
+ }
160
+ </style>
161
+ </head>
162
+ <body>
163
+ <table>
164
+ <thead>
165
+ <tr>
166
+ <th>GPTQ Variation:</th>
167
+ <th>Wikitext2</th>
168
+ <th>Ptb-New</th>
169
+ <th>C4-New</th>
170
+ </tr>
171
+ </thead>
172
+ <tbody>
173
+ <tr>
174
+ <td>Metharme 7b - 16bit</td>
175
+ <td>5.7208476066589355</td>
176
+ <td>41.61103439331055</td>
177
+ <td>7.512405872344971</td>
178
+ </tr>
179
+ <tr>
180
+ <td>4bit --act-order</td>
181
+ <td>6.281311511993408</td>
182
+ <td>46.79158401489258</td>
183
+ <td>7.906069755554199</td>
184
+ </tr>
185
+ <tr>
186
+ <td>4bit --act-order --sym</td>
187
+ <td>6.477842807769775</td>
188
+ <td>48.53507614135742</td>
189
+ <td>7.993765354156494</td>
190
+ </tr>
191
+ <tr>
192
+ <td>4bit alone</td>
193
+ <td>6.997134685516357</td>
194
+ <td>58.87525177001953</td>
195
+ <td>8.824191093444824</td>
196
+ </tr>
197
+ <tr>
198
+ <td>4bit --trits</td>
199
+ <td>7.832409858703613</td>
200
+ <td>55.383026123046875</td>
201
+ <td>8.806737899780273</td>
202
+ </tr>
203
+ <tr>
204
+ <td>4bit --groupsize 32</td>
205
+ <td>6.902080535888672</td>
206
+ <td>50.237754821777344</td>
207
+ <td>8.081602096557617</td>
208
+ </tr>
209
+ <tr>
210
+ <td>4bit --groupsize 128</td>
211
+ <td>10.206376075744629</td>
212
+ <td>49.00401306152344</td>
213
+ <td>8.198845863342285</td>
214
+ </tr>
215
+ <tr>
216
+ <td>4bit --true-sequential</td>
217
+ <td>7.735969543457031</td>
218
+ <td>62.92238235473633</td>
219
+ <td>8.913650512695312</td>
220
+ </tr>
221
+ <tr>
222
+ <td>4bit --true-sequential --groupsize 32</td>
223
+ <td>6.424218654632568</td>
224
+ <td>48.48588943481445</td>
225
+ <td>8.089512825012207</td>
226
+ </tr>
227
+ <tr>
228
+ <td>4bit --true-sequential --groupsize 128</td>
229
+ <td>9.475017547607422</td>
230
+ <td>52.358829498291016</td>
231
+ <td>8.340700149536133</td>
232
+ </tr>
233
+ <tr>
234
+ <td>4bit --act-order --groupsize 32 - ERROR</td>
235
+ <td>275425.5</td>
236
+ <td>267733.25</td>
237
+ <td>254506.71875</td>
238
+ </tr>
239
+ <tr>
240
+ <td>4bit --act-order --groupsize 128 - ERROR</td>
241
+ <td>251321.265625</td>
242
+ <td>250117.859375</td>
243
+ <td>232929.234375</td>
244
+ </tr>
245
+ </tbody>
246
+ </table>
247
+ </body>
248
+ </html>
Pygmalion-7b-Metharme/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "metharme-7b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_position_embeddings": 2048,
13
+ "max_sequence_length": 2048,
14
+ "model_type": "llama",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "pad_token_id": 0,
18
+ "rms_norm_eps": 1e-06,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "bfloat16",
21
+ "transformers_version": "4.28.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 32000,
24
+ "badwordsids": [[0]]
25
+ }
Pygmalion-7b-Metharme/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.28.0.dev0"
7
+ }
Pygmalion-7b-Metharme/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
Pygmalion-7b-Metharme/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
Pygmalion-7b-Metharme/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
Pygmalion-7b-Metharme/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 2048,
22
+ "pad_token": null,
23
+ "padding_side": "right",
24
+ "sp_model_kwargs": {},
25
+ "special_tokens_map_file": "special_tokens_map.json",
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }