Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- README.md +676 -0
- config.json +39 -0
- config.yml +228 -0
- generation_config.json +7 -0
- measurement.json +0 -0
- model.safetensors.index.json +649 -0
- output-00001-of-00006.safetensors +3 -0
- output-00002-of-00006.safetensors +3 -0
- output-00003-of-00006.safetensors +3 -0
- output-00004-of-00006.safetensors +3 -0
- output-00005-of-00006.safetensors +3 -0
- output-00006-of-00006.safetensors +3 -0
- special_tokens_map.json +23 -0
- tokenizer.json +3 -0
- tokenizer_config.json +330 -0
- upload.py +45 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,676 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
inference: false
|
3 |
+
license: cc-by-nc-4.0
|
4 |
+
library_name: transformers
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
- fr
|
8 |
+
- de
|
9 |
+
- es
|
10 |
+
- it
|
11 |
+
- pt
|
12 |
+
- ja
|
13 |
+
- ko
|
14 |
+
- zh
|
15 |
+
- ar
|
16 |
+
extra_gated_prompt: "By submitting this form, you agree to the [License Agreement](https://cohere.com/c4ai-cc-by-nc-license) and acknowledge that the information you provide will be collected, used, and shared in accordance with Cohere’s [Privacy Policy]( https://cohere.com/privacy)."
|
17 |
+
extra_gated_fields:
|
18 |
+
Name: text
|
19 |
+
Affiliation: text
|
20 |
+
Country:
|
21 |
+
type: select
|
22 |
+
options:
|
23 |
+
- Aruba
|
24 |
+
- Afghanistan
|
25 |
+
- Angola
|
26 |
+
- Anguilla
|
27 |
+
- Åland Islands
|
28 |
+
- Albania
|
29 |
+
- Andorra
|
30 |
+
- United Arab Emirates
|
31 |
+
- Argentina
|
32 |
+
- Armenia
|
33 |
+
- American Samoa
|
34 |
+
- Antarctica
|
35 |
+
- French Southern Territories
|
36 |
+
- Antigua and Barbuda
|
37 |
+
- Australia
|
38 |
+
- Austria
|
39 |
+
- Azerbaijan
|
40 |
+
- Burundi
|
41 |
+
- Belgium
|
42 |
+
- Benin
|
43 |
+
- Bonaire Sint Eustatius and Saba
|
44 |
+
- Burkina Faso
|
45 |
+
- Bangladesh
|
46 |
+
- Bulgaria
|
47 |
+
- Bahrain
|
48 |
+
- Bahamas
|
49 |
+
- Bosnia and Herzegovina
|
50 |
+
- Saint Barthélemy
|
51 |
+
- Belarus
|
52 |
+
- Belize
|
53 |
+
- Bermuda
|
54 |
+
- Plurinational State of Bolivia
|
55 |
+
- Brazil
|
56 |
+
- Barbados
|
57 |
+
- Brunei-Darussalam
|
58 |
+
- Bhutan
|
59 |
+
- Bouvet-Island
|
60 |
+
- Botswana
|
61 |
+
- Central African Republic
|
62 |
+
- Canada
|
63 |
+
- Cocos (Keeling) Islands
|
64 |
+
- Switzerland
|
65 |
+
- Chile
|
66 |
+
- China
|
67 |
+
- Côte-dIvoire
|
68 |
+
- Cameroon
|
69 |
+
- Democratic Republic of the Congo
|
70 |
+
- Cook Islands
|
71 |
+
- Colombia
|
72 |
+
- Comoros
|
73 |
+
- Cabo Verde
|
74 |
+
- Costa Rica
|
75 |
+
- Cuba
|
76 |
+
- Curaçao
|
77 |
+
- Christmas Island
|
78 |
+
- Cayman Islands
|
79 |
+
- Cyprus
|
80 |
+
- Czechia
|
81 |
+
- Germany
|
82 |
+
- Djibouti
|
83 |
+
- Dominica
|
84 |
+
- Denmark
|
85 |
+
- Dominican Republic
|
86 |
+
- Algeria
|
87 |
+
- Ecuador
|
88 |
+
- Egypt
|
89 |
+
- Eritrea
|
90 |
+
- Western Sahara
|
91 |
+
- Spain
|
92 |
+
- Estonia
|
93 |
+
- Ethiopia
|
94 |
+
- Finland
|
95 |
+
- Fiji
|
96 |
+
- Falkland Islands (Malvinas)
|
97 |
+
- France
|
98 |
+
- Faroe Islands
|
99 |
+
- Federated States of Micronesia
|
100 |
+
- Gabon
|
101 |
+
- United Kingdom
|
102 |
+
- Georgia
|
103 |
+
- Guernsey
|
104 |
+
- Ghana
|
105 |
+
- Gibraltar
|
106 |
+
- Guinea
|
107 |
+
- Guadeloupe
|
108 |
+
- Gambia
|
109 |
+
- Guinea Bissau
|
110 |
+
- Equatorial Guinea
|
111 |
+
- Greece
|
112 |
+
- Grenada
|
113 |
+
- Greenland
|
114 |
+
- Guatemala
|
115 |
+
- French Guiana
|
116 |
+
- Guam
|
117 |
+
- Guyana
|
118 |
+
- Hong Kong
|
119 |
+
- Heard Island and McDonald Islands
|
120 |
+
- Honduras
|
121 |
+
- Croatia
|
122 |
+
- Haiti
|
123 |
+
- Hungary
|
124 |
+
- Indonesia
|
125 |
+
- Isle of Man
|
126 |
+
- India
|
127 |
+
- British Indian Ocean Territory
|
128 |
+
- Ireland
|
129 |
+
- Islamic Republic of Iran
|
130 |
+
- Iraq
|
131 |
+
- Iceland
|
132 |
+
- Israel
|
133 |
+
- Italy
|
134 |
+
- Jamaica
|
135 |
+
- Jersey
|
136 |
+
- Jordan
|
137 |
+
- Japan
|
138 |
+
- Kazakhstan
|
139 |
+
- Kenya
|
140 |
+
- Kyrgyzstan
|
141 |
+
- Cambodia
|
142 |
+
- Kiribati
|
143 |
+
- Saint-Kitts-and-Nevis
|
144 |
+
- South Korea
|
145 |
+
- Kuwait
|
146 |
+
- Lao-Peoples-Democratic-Republic
|
147 |
+
- Lebanon
|
148 |
+
- Liberia
|
149 |
+
- Libya
|
150 |
+
- Saint-Lucia
|
151 |
+
- Liechtenstein
|
152 |
+
- Sri Lanka
|
153 |
+
- Lesotho
|
154 |
+
- Lithuania
|
155 |
+
- Luxembourg
|
156 |
+
- Latvia
|
157 |
+
- Macao
|
158 |
+
- Saint Martin (French-part)
|
159 |
+
- Morocco
|
160 |
+
- Monaco
|
161 |
+
- Republic of Moldova
|
162 |
+
- Madagascar
|
163 |
+
- Maldives
|
164 |
+
- Mexico
|
165 |
+
- Marshall Islands
|
166 |
+
- North Macedonia
|
167 |
+
- Mali
|
168 |
+
- Malta
|
169 |
+
- Myanmar
|
170 |
+
- Montenegro
|
171 |
+
- Mongolia
|
172 |
+
- Northern Mariana Islands
|
173 |
+
- Mozambique
|
174 |
+
- Mauritania
|
175 |
+
- Montserrat
|
176 |
+
- Martinique
|
177 |
+
- Mauritius
|
178 |
+
- Malawi
|
179 |
+
- Malaysia
|
180 |
+
- Mayotte
|
181 |
+
- Namibia
|
182 |
+
- New Caledonia
|
183 |
+
- Niger
|
184 |
+
- Norfolk Island
|
185 |
+
- Nigeria
|
186 |
+
- Nicaragua
|
187 |
+
- Niue
|
188 |
+
- Netherlands
|
189 |
+
- Norway
|
190 |
+
- Nepal
|
191 |
+
- Nauru
|
192 |
+
- New Zealand
|
193 |
+
- Oman
|
194 |
+
- Pakistan
|
195 |
+
- Panama
|
196 |
+
- Pitcairn
|
197 |
+
- Peru
|
198 |
+
- Philippines
|
199 |
+
- Palau
|
200 |
+
- Papua New Guinea
|
201 |
+
- Poland
|
202 |
+
- Puerto Rico
|
203 |
+
- North Korea
|
204 |
+
- Portugal
|
205 |
+
- Paraguay
|
206 |
+
- State of Palestine
|
207 |
+
- French Polynesia
|
208 |
+
- Qatar
|
209 |
+
- Réunion
|
210 |
+
- Romania
|
211 |
+
- Russia
|
212 |
+
- Rwanda
|
213 |
+
- Saudi Arabia
|
214 |
+
- Sudan
|
215 |
+
- Senegal
|
216 |
+
- Singapore
|
217 |
+
- South Georgia and the South Sandwich Islands
|
218 |
+
- Saint Helena Ascension and Tristan da Cunha
|
219 |
+
- Svalbard and Jan Mayen
|
220 |
+
- Solomon Islands
|
221 |
+
- Sierra Leone
|
222 |
+
- El Salvador
|
223 |
+
- San Marino
|
224 |
+
- Somalia
|
225 |
+
- Saint Pierre and Miquelon
|
226 |
+
- Serbia
|
227 |
+
- South Sudan
|
228 |
+
- Sao Tome and Principe
|
229 |
+
- Suriname
|
230 |
+
- Slovakia
|
231 |
+
- Slovenia
|
232 |
+
- Sweden
|
233 |
+
- Eswatini
|
234 |
+
- Sint Maarten (Dutch-part)
|
235 |
+
- Seychelles
|
236 |
+
- Syrian Arab Republic
|
237 |
+
- Turks and Caicos Islands
|
238 |
+
- Chad
|
239 |
+
- Togo
|
240 |
+
- Thailand
|
241 |
+
- Tajikistan
|
242 |
+
- Tokelau
|
243 |
+
- Turkmenistan
|
244 |
+
- Timor Leste
|
245 |
+
- Tonga
|
246 |
+
- Trinidad and Tobago
|
247 |
+
- Tunisia
|
248 |
+
- Turkey
|
249 |
+
- Tuvalu
|
250 |
+
- Taiwan
|
251 |
+
- United Republic of Tanzania
|
252 |
+
- Uganda
|
253 |
+
- Ukraine
|
254 |
+
- United States Minor Outlying Islands
|
255 |
+
- Uruguay
|
256 |
+
- United-States
|
257 |
+
- Uzbekistan
|
258 |
+
- Holy See (Vatican City State)
|
259 |
+
- Saint Vincent and the Grenadines
|
260 |
+
- Bolivarian Republic of Venezuela
|
261 |
+
- Virgin Islands British
|
262 |
+
- Virgin Islands U.S.
|
263 |
+
- VietNam
|
264 |
+
- Vanuatu
|
265 |
+
- Wallis and Futuna
|
266 |
+
- Samoa
|
267 |
+
- Yemen
|
268 |
+
- South Africa
|
269 |
+
- Zambia
|
270 |
+
- Zimbabwe
|
271 |
+
Receive email updates on C4AI and Cohere research, events, products and services?:
|
272 |
+
type: select
|
273 |
+
options:
|
274 |
+
- Yes
|
275 |
+
- No
|
276 |
+
I agree to use this model for non-commercial use ONLY: checkbox
|
277 |
+
---
|
278 |
+
|
279 |
+
# Model Card for C4AI Command R+
|
280 |
+
|
281 |
+
🚨 **This model is non-quantized version of C4AI Command R+. You can find the quantized version of C4AI Command R+ using bitsandbytes [here](https://huggingface.co/CohereForAI/c4ai-command-r-plus-4bit)**.
|
282 |
+
|
283 |
+
|
284 |
+
## Model Summary
|
285 |
+
|
286 |
+
C4AI Command R+ is an open weights research release of a 104B billion parameter model with highly advanced capabilities, this includes Retrieval Augmented Generation (RAG) and tool use to automate sophisticated tasks. The tool use in this model generation enables multi-step tool use which allows the model to combine multiple tools over multiple steps to accomplish difficult tasks. C4AI Command R+ is a multilingual model evaluated in 10 languages for performance: English, French, Spanish, Italian, German, Brazilian Portuguese, Japanese, Korean, Arabic, and Simplified Chinese. Command R+ is optimized for a variety of use cases including reasoning, summarization, and question answering.
|
287 |
+
|
288 |
+
C4AI Command R+ is part of a family of open weight releases from Cohere For AI and Cohere. Our smaller companion model is [C4AI Command R](https://huggingface.co/CohereForAI/c4ai-command-r-v01)
|
289 |
+
|
290 |
+
Developed by: [Cohere](https://cohere.com/) and [Cohere For AI](https://cohere.for.ai)
|
291 |
+
|
292 |
+
- Point of Contact: Cohere For AI: [cohere.for.ai](https://cohere.for.ai/)
|
293 |
+
- License: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
|
294 |
+
- Model: c4ai-command-r-plus
|
295 |
+
- Model Size: 104 billion parameters
|
296 |
+
- Context length: 128K
|
297 |
+
|
298 |
+
**Try C4AI Command R+**
|
299 |
+
|
300 |
+
You can try out C4AI Command R+ before downloading the weights in our hosted [Hugging Face Space](https://huggingface.co/spaces/CohereForAI/c4ai-command?model=command-r-plus).
|
301 |
+
|
302 |
+
**Usage**
|
303 |
+
|
304 |
+
Please install `transformers` from the source repository that includes the necessary changes for this model.
|
305 |
+
```python
|
306 |
+
# pip install 'git+https://github.com/huggingface/transformers.git'
|
307 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
308 |
+
|
309 |
+
model_id = "CohereForAI/c4ai-command-r-plus"
|
310 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
311 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
312 |
+
|
313 |
+
# Format message with the command-r-plus chat template
|
314 |
+
messages = [{"role": "user", "content": "Hello, how are you?"}]
|
315 |
+
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
316 |
+
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
317 |
+
|
318 |
+
gen_tokens = model.generate(
|
319 |
+
input_ids,
|
320 |
+
max_new_tokens=100,
|
321 |
+
do_sample=True,
|
322 |
+
temperature=0.3,
|
323 |
+
)
|
324 |
+
|
325 |
+
gen_text = tokenizer.decode(gen_tokens[0])
|
326 |
+
print(gen_text)
|
327 |
+
```
|
328 |
+
|
329 |
+
**Quantized model through bitsandbytes, 8-bit precision**
|
330 |
+
|
331 |
+
```python
|
332 |
+
# pip install 'git+https://github.com/huggingface/transformers.git' bitsandbytes accelerate
|
333 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
334 |
+
|
335 |
+
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
|
336 |
+
|
337 |
+
model_id = "CohereForAI/c4ai-command-r-plus"
|
338 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
339 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
|
340 |
+
|
341 |
+
# Format message with the command-r-plus chat template
|
342 |
+
messages = [{"role": "user", "content": "Hello, how are you?"}]
|
343 |
+
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
344 |
+
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
345 |
+
|
346 |
+
gen_tokens = model.generate(
|
347 |
+
input_ids,
|
348 |
+
max_new_tokens=100,
|
349 |
+
do_sample=True,
|
350 |
+
temperature=0.3,
|
351 |
+
)
|
352 |
+
|
353 |
+
gen_text = tokenizer.decode(gen_tokens[0])
|
354 |
+
print(gen_text)
|
355 |
+
```
|
356 |
+
|
357 |
+
**Quantized model through bitsandbytes, 4-bit precision**
|
358 |
+
|
359 |
+
This model is non-quantized version of C4AI Command R+. You can find the quantized version of C4AI Command R+ using bitsandbytes [here](https://huggingface.co/CohereForAI/c4ai-command-r-plus-4bit).
|
360 |
+
|
361 |
+
## Model Details
|
362 |
+
|
363 |
+
**Input**: Models input text only.
|
364 |
+
|
365 |
+
**Output**: Models generate text only.
|
366 |
+
|
367 |
+
**Model Architecture**: This is an auto-regressive language model that uses an optimized transformer architecture. After pretraining, this model uses supervised fine-tuning (SFT) and preference training to align model behavior to human preferences for helpfulness and safety.
|
368 |
+
|
369 |
+
**Languages covered**: The model is optimized to perform well in the following languages: English, French, Spanish, Italian, German, Brazilian Portuguese, Japanese, Korean, Simplified Chinese, and Arabic.
|
370 |
+
|
371 |
+
Pre-training data additionally included the following 13 languages: Russian, Polish, Turkish, Vietnamese, Dutch, Czech, Indonesian, Ukrainian, Romanian, Greek, Hindi, Hebrew, Persian.
|
372 |
+
|
373 |
+
**Context length**: Command R+ supports a context length of 128K.
|
374 |
+
|
375 |
+
## Evaluations
|
376 |
+
|
377 |
+
Command R+ has been submitted to the [Open LLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). We include the results below, along with a direct comparison to the strongest state-of-art open weights models currently available on Hugging Face. We note that these results are only useful to compare when evaluations are implemented for all models in a [standardized way](https://github.com/EleutherAI/lm-evaluation-harness) using publically available code, and hence shouldn't be used for comparison outside of models submitted to the leaderboard or compared to self-reported numbers which can't be replicated in the same way.
|
378 |
+
|
379 |
+
| Model | Average | Arc (Challenge) | Hella Swag | MMLU | Truthful QA | Winogrande | GSM8k |
|
380 |
+
|:--------------------------------|----------:|------------------:|-------------:|-------:|--------------:|-------------:|--------:|
|
381 |
+
| **CohereForAI/c4ai-command-r-plus** | 74.6 | 70.99 | 88.6 | 75.7 | 56.3 | 85.4 | 70.7 |
|
382 |
+
| [DBRX Instruct](https://huggingface.co/databricks/dbrx-instruct) | 74.5 | 68.9 | 89 | 73.7 | 66.9 | 81.8 | 66.9 |
|
383 |
+
| [Mixtral 8x7B-Instruct](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) | 72.7 | 70.1 | 87.6 | 71.4 | 65 | 81.1 | 61.1 |
|
384 |
+
| [Mixtral 8x7B Chat](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) | 72.6 | 70.2 | 87.6 | 71.2 | 64.6 | 81.4 | 60.7 |
|
385 |
+
| [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) | 68.5 | 65.5 | 87 | 68.2 | 52.3 | 81.5 | 56.6 |
|
386 |
+
| [Llama 2 70B](https://huggingface.co/meta-llama/Llama-2-70b-hf) | 67.9 | 67.3 | 87.3 | 69.8 | 44.9 | 83.7 | 54.1 |
|
387 |
+
| [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) | 65.3 | 65.4 | 84.2 | 74.9 | 55.4 | 80.1 | 31.9 |
|
388 |
+
| [Gemma-7B](https://huggingface.co/google/gemma-7b) | 63.8 | 61.1 | 82.2 | 64.6 | 44.8 | 79 | 50.9 |
|
389 |
+
| [LLama 2 70B Chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) | 62.4 | 64.6 | 85.9 | 63.9 | 52.8 | 80.5 | 26.7 |
|
390 |
+
| [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) | 61 | 60 | 83.3 | 64.2 | 42.2 | 78.4 | 37.8 |
|
391 |
+
|
392 |
+
We include these metrics here because they are frequently requested, but note that these metrics do not capture RAG, multilingual, tooling performance or the evaluation of open ended generations which we believe Command R+ to be state-of-art at. For evaluations of RAG, multilingual and tooling read more [here](https://txt.cohere.com/command-r-plus-microsoft-azure/). For evaluation of open ended generation, Command R+ is currently being evaluated on the [chatbot arena](https://chat.lmsys.org/).
|
393 |
+
|
394 |
+
|
395 |
+
### Grounded Generation and RAG Capabilities:
|
396 |
+
|
397 |
+
Command R+ has been specifically trained with grounded generation capabilities. This means that it can generate responses based on a list of supplied document snippets, and it will include grounding spans (citations) in its response indicating the source of the information. This can be used to enable behaviors such as grounded summarization and the final step of Retrieval Augmented Generation (RAG). This behavior has been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance, but we encourage experimentation.
|
398 |
+
|
399 |
+
Command R+’s grounded generation behavior takes a conversation as input (with an optional user-supplied system preamble, indicating task, context and desired output style), along with a list of retrieved document snippets. The document snippets should be chunks, rather than long documents, typically around 100-400 words per chunk. Document snippets consist of key-value pairs. The keys should be short descriptive strings, the values can be text or semi-structured.
|
400 |
+
|
401 |
+
By default, Command R+ will generate grounded responses by first predicting which documents are relevant, then predicting which ones it will cite, then generating an answer. Finally, it will then insert grounding spans into the answer. See below for an example. This is referred to as `accurate` grounded generation.
|
402 |
+
|
403 |
+
The model is trained with a number of other answering modes, which can be selected by prompt changes. A `fast` citation mode is supported in the tokenizer, which will directly generate an answer with grounding spans in it, without first writing the answer out in full. This sacrifices some grounding accuracy in favor of generating fewer tokens.
|
404 |
+
|
405 |
+
Comprehensive documentation for working with Command R+'s grounded generation prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r).
|
406 |
+
|
407 |
+
The code snippet below shows a minimal working example on how to render a prompt.
|
408 |
+
|
409 |
+
<details>
|
410 |
+
<summary> <b>Usage: Rendering Grounded Generation prompts [CLICK TO EXPAND]</b> </summary>
|
411 |
+
|
412 |
+
````python
|
413 |
+
from transformers import AutoTokenizer
|
414 |
+
|
415 |
+
model_id = "CohereForAI/c4ai-command-r-plus"
|
416 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
417 |
+
|
418 |
+
# define conversation input:
|
419 |
+
conversation = [
|
420 |
+
{"role": "user", "content": "Whats the biggest penguin in the world?"}
|
421 |
+
]
|
422 |
+
# define documents to ground on:
|
423 |
+
documents = [
|
424 |
+
{ "title": "Tall penguins", "text": "Emperor penguins are the tallest growing up to 122 cm in height." },
|
425 |
+
{ "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."}
|
426 |
+
]
|
427 |
+
|
428 |
+
# render the tool use prompt as a string:
|
429 |
+
grounded_generation_prompt = tokenizer.apply_grounded_generation_template(
|
430 |
+
conversation,
|
431 |
+
documents=documents,
|
432 |
+
citation_mode="accurate", # or "fast"
|
433 |
+
tokenize=False,
|
434 |
+
add_generation_prompt=True,
|
435 |
+
)
|
436 |
+
print(grounded_generation_prompt)
|
437 |
+
````
|
438 |
+
</details>
|
439 |
+
|
440 |
+
<details>
|
441 |
+
<summary><b>Example Rendered Grounded Generation Prompt [CLICK TO EXPAND]</b></summary>
|
442 |
+
|
443 |
+
````<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
|
444 |
+
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
|
445 |
+
|
446 |
+
# System Preamble
|
447 |
+
## Basic Rules
|
448 |
+
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
|
449 |
+
|
450 |
+
# User Preamble
|
451 |
+
## Task and Context
|
452 |
+
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
|
453 |
+
|
454 |
+
## Style Guide
|
455 |
+
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>
|
456 |
+
Document: 0
|
457 |
+
title: Tall penguins
|
458 |
+
text: Emperor penguins are the tallest growing up to 122 cm in height.
|
459 |
+
|
460 |
+
Document: 1
|
461 |
+
title: Penguin habitats
|
462 |
+
text: Emperor penguins only live in Antarctica.
|
463 |
+
</results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line.
|
464 |
+
Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'.
|
465 |
+
Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'.
|
466 |
+
Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup.
|
467 |
+
Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
468 |
+
````
|
469 |
+
|
470 |
+
</details>
|
471 |
+
|
472 |
+
<details>
|
473 |
+
<summary><b>Example Rendered Grounded Generation Completion [CLICK TO EXPAND]</b></summary>
|
474 |
+
|
475 |
+
````
|
476 |
+
Relevant Documents: 0,1
|
477 |
+
Cited Documents: 0,1
|
478 |
+
Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres.
|
479 |
+
Grounded answer: The <co: 0>Emperor Penguin</co: 0> is the <co: 0>tallest</co: 0> or biggest penguin in the world. It is a bird that <co: 1>lives only in Antarctica</co: 1> and <co: 0>grows to a height of around 122 centimetres.</co: 0>
|
480 |
+
````
|
481 |
+
</details>
|
482 |
+
|
483 |
+
|
484 |
+
### Single-Step Tool Use Capabilities ("Function Calling"):
|
485 |
+
Single-step tool use (or “Function Calling”) allows Command R+ to interact with external tools like APIs, databases, or search engines. Single-step tool use is made of two model inferences:
|
486 |
+
- Tool Selection: The model decides which tools to call and with what parameters. It’s then up to the developer to execute these tool calls and obtain tool results.
|
487 |
+
- Response Generation: The model generates the final response given the tool results.
|
488 |
+
You can learn more about single step tool use in our [documentation](https://docs.cohere.com/docs/tool-use).
|
489 |
+
|
490 |
+
Command R+ has been specifically trained with single-step tool use (or “Function Calling”) capabilities. These have been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance. This is why we recommend using the prompt template described below.
|
491 |
+
|
492 |
+
Command R+’s single-step tool use functionality takes a conversation as input (with an optional user-system preamble), along with a list of available tools. The model will then generate a json-formatted list of actions to execute on a subset of those tools. Command R+ may use one of its supplied tools more than once.
|
493 |
+
|
494 |
+
The model has been trained to recognise a special `directly_answer` tool, which it uses to indicate that it doesn’t want to use any of its other tools. The ability to abstain from calling a specific tool can be useful in a range of situations, such as greeting a user, or asking clarifying questions. We recommend including the `directly_answer` tool, but it can be removed or renamed if required.
|
495 |
+
|
496 |
+
Comprehensive documentation for working with Command R+'s single-step tool use prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r#single-step-tool-use-with-command-rr-function-calling) and [here](https://docs.cohere.com/docs/prompting-command-r#single-step-tool-use-with-command-rr-function-calling-1).
|
497 |
+
|
498 |
+
You can render the single-step tool use prompt template by using the function `apply_tool_use_template()`. The code snippet below shows a minimal working example on how to render this prompt.
|
499 |
+
|
500 |
+
Command R+ also supports Hugging Face's [tool use API](https://huggingface.co/docs/transformers/main/en/chat_templating#advanced-tool-use--function-calling) to render the same prompt.
|
501 |
+
|
502 |
+
|
503 |
+
<details>
|
504 |
+
<summary><b>Usage: Rendering Single-Step Tool Use Prompts [CLICK TO EXPAND]</b> </summary>
|
505 |
+
|
506 |
+
```python
|
507 |
+
from transformers import AutoTokenizer
|
508 |
+
|
509 |
+
model_id = "CohereForAI/c4ai-command-r-plus"
|
510 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
511 |
+
|
512 |
+
# define conversation input:
|
513 |
+
conversation = [
|
514 |
+
{"role": "user", "content": "Whats the biggest penguin in the world?"}
|
515 |
+
]
|
516 |
+
# Define tools available for the model to use:
|
517 |
+
tools = [
|
518 |
+
{
|
519 |
+
"name": "internet_search",
|
520 |
+
"description": "Returns a list of relevant document snippets for a textual query retrieved from the internet",
|
521 |
+
"parameter_definitions": {
|
522 |
+
"query": {
|
523 |
+
"description": "Query to search the internet with",
|
524 |
+
"type": 'str',
|
525 |
+
"required": True
|
526 |
+
}
|
527 |
+
}
|
528 |
+
},
|
529 |
+
{
|
530 |
+
'name': "directly_answer",
|
531 |
+
"description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history",
|
532 |
+
'parameter_definitions': {}
|
533 |
+
}
|
534 |
+
]
|
535 |
+
|
536 |
+
# render the tool use prompt as a string:
|
537 |
+
tool_use_prompt = tokenizer.apply_tool_use_template(
|
538 |
+
conversation,
|
539 |
+
tools=tools,
|
540 |
+
tokenize=False,
|
541 |
+
add_generation_prompt=True,
|
542 |
+
)
|
543 |
+
print(tool_use_prompt)
|
544 |
+
```
|
545 |
+
|
546 |
+
</details>
|
547 |
+
|
548 |
+
|
549 |
+
<details>
|
550 |
+
<summary><b>Usage: Rendering prompts with the Single-Step Tool Use API [CLICK TO EXPAND]</b> </summary>
|
551 |
+
|
552 |
+
```python
|
553 |
+
from transformers import AutoTokenizer
|
554 |
+
|
555 |
+
model_id = "CohereForAI/c4ai-command-r-plus"
|
556 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
557 |
+
|
558 |
+
# define conversation input:
|
559 |
+
conversation = [
|
560 |
+
{"role": "user", "content": "Whats the biggest penguin in the world?"}
|
561 |
+
]
|
562 |
+
|
563 |
+
# Define tools available for the model to use
|
564 |
+
# Type hints and docstrings from Python functions are automatically extracted
|
565 |
+
def internet_search(query: str):
|
566 |
+
"""
|
567 |
+
Returns a list of relevant document snippets for a textual query retrieved from the internet
|
568 |
+
|
569 |
+
Args:
|
570 |
+
query: Query to search the internet with
|
571 |
+
"""
|
572 |
+
pass
|
573 |
+
|
574 |
+
def directly_answer():
|
575 |
+
"""
|
576 |
+
Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history
|
577 |
+
"""
|
578 |
+
pass
|
579 |
+
|
580 |
+
tools = [internet_search, directly_answer]
|
581 |
+
|
582 |
+
# render the tool use prompt as a string:
|
583 |
+
tool_use_prompt = tokenizer.apply_chat_template(
|
584 |
+
conversation,
|
585 |
+
tools=tools,
|
586 |
+
tokenize=False,
|
587 |
+
add_generation_prompt=True,
|
588 |
+
)
|
589 |
+
print(tool_use_prompt)
|
590 |
+
```
|
591 |
+
|
592 |
+
</details>
|
593 |
+
|
594 |
+
<details>
|
595 |
+
<summary><b>Example Rendered Single-Step Tool Use Prompt [CLICK TO EXPAND]</b></summary>
|
596 |
+
|
597 |
+
````
|
598 |
+
<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
|
599 |
+
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
|
600 |
+
|
601 |
+
# System Preamble
|
602 |
+
## Basic Rules
|
603 |
+
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
|
604 |
+
|
605 |
+
# User Preamble
|
606 |
+
## Task and Context
|
607 |
+
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
|
608 |
+
|
609 |
+
## Style Guide
|
610 |
+
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.
|
611 |
+
|
612 |
+
## Available Tools
|
613 |
+
Here is a list of tools that you have available to you:
|
614 |
+
|
615 |
+
```python
|
616 |
+
def internet_search(query: str) -> List[Dict]:
|
617 |
+
"""Returns a list of relevant document snippets for a textual query retrieved from the internet
|
618 |
+
|
619 |
+
Args:
|
620 |
+
query (str): Query to search the internet with
|
621 |
+
"""
|
622 |
+
pass
|
623 |
+
```
|
624 |
+
|
625 |
+
```python
|
626 |
+
def directly_answer() -> List[Dict]:
|
627 |
+
"""Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history
|
628 |
+
"""
|
629 |
+
pass
|
630 |
+
```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:
|
631 |
+
```json
|
632 |
+
[
|
633 |
+
{
|
634 |
+
"tool_name": title of the tool in the specification,
|
635 |
+
"parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters
|
636 |
+
}
|
637 |
+
]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
638 |
+
````
|
639 |
+
|
640 |
+
</details>
|
641 |
+
|
642 |
+
<details>
|
643 |
+
<summary><b>Example Rendered Single-Step Tool Use Completion [CLICK TO EXPAND]</b></summary>
|
644 |
+
|
645 |
+
````
|
646 |
+
Action: ```json
|
647 |
+
[
|
648 |
+
{
|
649 |
+
"tool_name": "internet_search",
|
650 |
+
"parameters": {
|
651 |
+
"query": "biggest penguin in the world"
|
652 |
+
}
|
653 |
+
}
|
654 |
+
]
|
655 |
+
```
|
656 |
+
````
|
657 |
+
</details>
|
658 |
+
|
659 |
+
### Multi-Step Tool Use Capabilities ("Agents"):
|
660 |
+
Multi-step tool use is suited for building agents that can plan and execute a sequence of actions using multiple tools. Unlike single-step tool use, the model can perform several inference cycles, iterating through Action → Observation → Reflection until it decides on a final response. For more details, refer to our [documentation on multi-step tool use](https://docs.cohere.com/docs/multi-step-tool-use).
|
661 |
+
|
662 |
+
Command R+ has been specifically trained with multi-step tool use (or “Agents”) capabilities. These have been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance. This is why we recommend using the prompt template described below.
|
663 |
+
|
664 |
+
The prompt template is not yet available in the HuggingFace tokenizer. However, comprehensive documentation for working with Command R+'s multi-step tool use prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r#multi-step-tool-use-with-command-rr-agents) and [here](https://docs.cohere.com/docs/prompting-command-r#multihop-tool-use-with-command-rr-agents).
|
665 |
+
|
666 |
+
### Code Capabilities:
|
667 |
+
Command R+ has been optimized to interact with your code, by requesting code snippets, code explanations, or code rewrites. It might not perform well out-of-the-box for pure code completion. For better performance, we also recommend using a low temperature (and even greedy decoding) for code-generation related instructions.
|
668 |
+
|
669 |
+
### Model Card Contact
|
670 |
+
For errors or additional questions about details in this model card, contact [[email protected]](mailto:[email protected]).
|
671 |
+
|
672 |
+
### Terms of Use:
|
673 |
+
We hope that the release of this model will make community-based research efforts more accessible, by releasing the weights of a highly performant 104 billion parameter model to researchers all over the world. This model is governed by a [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license) License with an acceptable use addendum, and also requires adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).
|
674 |
+
|
675 |
+
### Try Chat:
|
676 |
+
You can try Command R+ chat in the playground [here](https://dashboard.cohere.com/playground/chat). You can also use it in our dedicated Hugging Face Space [here](https://huggingface.co/spaces/CohereForAI/c4ai-command-r-plus).
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"CohereForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_bias": false,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 5,
|
8 |
+
"eos_token_id": 255001,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 12288,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 33792,
|
13 |
+
"layer_norm_eps": 1e-05,
|
14 |
+
"logit_scale": 0.8333333333333334,
|
15 |
+
"max_position_embeddings": 8192,
|
16 |
+
"model_max_length": 131072,
|
17 |
+
"model_type": "cohere",
|
18 |
+
"num_attention_heads": 96,
|
19 |
+
"num_hidden_layers": 64,
|
20 |
+
"num_key_value_heads": 8,
|
21 |
+
"pad_token_id": 0,
|
22 |
+
"rope_theta": 75000000.0,
|
23 |
+
"torch_dtype": "float16",
|
24 |
+
"transformers_version": "4.40.0.dev0",
|
25 |
+
"use_cache": true,
|
26 |
+
"use_qk_norm": true,
|
27 |
+
"vocab_size": 256000,
|
28 |
+
"quantization_config": {
|
29 |
+
"quant_method": "exl2",
|
30 |
+
"version": "0.2.1",
|
31 |
+
"bits": 3.2,
|
32 |
+
"head_bits": 6,
|
33 |
+
"calibration": {
|
34 |
+
"rows": 115,
|
35 |
+
"length": 2048,
|
36 |
+
"dataset": "(default)"
|
37 |
+
}
|
38 |
+
}
|
39 |
+
}
|
config.yml
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Sample YAML file for configuration.
|
2 |
+
# Comment and uncomment values as needed. Every value has a default within the application.
|
3 |
+
# This file serves to be a drop in for config.yml
|
4 |
+
|
5 |
+
# Unless specified in the comments, DO NOT put these options in quotes!
|
6 |
+
# You can use https://www.yamllint.com/ if you want to check your YAML formatting.
|
7 |
+
|
8 |
+
# Options for networking
|
9 |
+
network:
|
10 |
+
# The IP to host on (default: 127.0.0.1).
|
11 |
+
# Use 0.0.0.0 to expose on all network adapters
|
12 |
+
host: 0.0.0.0
|
13 |
+
|
14 |
+
# The port to host on (default: 5000)
|
15 |
+
port: 5000
|
16 |
+
|
17 |
+
# Disable HTTP token authenticaion with requests
|
18 |
+
# WARNING: This will make your instance vulnerable!
|
19 |
+
# Turn on this option if you are ONLY connecting from localhost
|
20 |
+
disable_auth: False
|
21 |
+
|
22 |
+
# Send tracebacks over the API to clients (default: False)
|
23 |
+
# NOTE: Only enable this for debug purposes
|
24 |
+
send_tracebacks: False
|
25 |
+
|
26 |
+
# Select API servers to enable (default: ["OAI"])
|
27 |
+
# Possible values: OAI
|
28 |
+
api_servers: ["OAI"]
|
29 |
+
|
30 |
+
# Options for logging
|
31 |
+
logging:
|
32 |
+
# Enable prompt logging (default: False)
|
33 |
+
prompt: False
|
34 |
+
|
35 |
+
# Enable generation parameter logging (default: False)
|
36 |
+
generation_params: False
|
37 |
+
|
38 |
+
# Enable request logging (default: False)
|
39 |
+
# NOTE: Only use this for debugging!
|
40 |
+
requests: False
|
41 |
+
|
42 |
+
# Options for sampling
|
43 |
+
sampling:
|
44 |
+
# Override preset name. Find this in the sampler-overrides folder (default: None)
|
45 |
+
# This overrides default fallbacks for sampler values that are passed to the API
|
46 |
+
# Server-side overrides are NOT needed by default
|
47 |
+
# WARNING: Using this can result in a generation speed penalty
|
48 |
+
#override_preset:
|
49 |
+
|
50 |
+
# Options for development and experimentation
|
51 |
+
developer:
|
52 |
+
# Skips exllamav2 version check (default: False)
|
53 |
+
# It's highly recommended to update your dependencies rather than enabling this flag
|
54 |
+
# WARNING: Don't set this unless you know what you're doing!
|
55 |
+
#unsafe_launch: False
|
56 |
+
|
57 |
+
# Disable all request streaming (default: False)
|
58 |
+
# A kill switch for turning off SSE in the API server
|
59 |
+
#disable_request_streaming: False
|
60 |
+
|
61 |
+
# Enable the torch CUDA malloc backend (default: False)
|
62 |
+
# This can save a few MBs of VRAM, but has a risk of errors. Use at your own risk.
|
63 |
+
cuda_malloc_backend: True
|
64 |
+
|
65 |
+
# Enable Uvloop or Winloop (default: False)
|
66 |
+
# Make the program utilize a faster async event loop which can improve performance
|
67 |
+
# NOTE: It's recommended to enable this, but if something breaks, turn this off.
|
68 |
+
uvloop: True
|
69 |
+
|
70 |
+
# Set process to use a higher priority
|
71 |
+
# For realtime process priority, run as administrator or sudo
|
72 |
+
# Otherwise, the priority will be set to high
|
73 |
+
realtime_process_priority: True
|
74 |
+
|
75 |
+
# Options for model overrides and loading
|
76 |
+
# Please read the comments to understand how arguments are handled between initial and API loads
|
77 |
+
model:
|
78 |
+
# Overrides the directory to look for models (default: models)
|
79 |
+
# Windows users, DO NOT put this path in quotes! This directory will be invalid otherwise.
|
80 |
+
model_dir: models
|
81 |
+
|
82 |
+
# Sends dummy model names when the models endpoint is queried
|
83 |
+
# Enable this if the program is looking for a specific OAI model
|
84 |
+
#use_dummy_models: False
|
85 |
+
|
86 |
+
# An initial model to load. Make sure the model is located in the model directory!
|
87 |
+
# A model can be loaded later via the API.
|
88 |
+
# REQUIRED: This must be filled out to load a model on startup!
|
89 |
+
model_name: command-r-plus-3.2bpw-h6-exl2
|
90 |
+
|
91 |
+
# The below parameters only apply for initial loads
|
92 |
+
# All API based loads do NOT inherit these settings unless specified in use_as_default
|
93 |
+
|
94 |
+
# Names of args to use as a default fallback for API load requests (default: [])
|
95 |
+
# For example, if you always want cache_mode to be Q4 instead of on the inital model load,
|
96 |
+
# Add "cache_mode" to this array
|
97 |
+
# Ex. ["max_seq_len", "cache_mode"]
|
98 |
+
#use_as_default: []
|
99 |
+
|
100 |
+
# The below parameters apply only if model_name is set
|
101 |
+
|
102 |
+
# Max sequence length (default: Empty)
|
103 |
+
# Fetched from the model's base sequence length in config.json by default
|
104 |
+
max_seq_len: 32768
|
105 |
+
|
106 |
+
# Overrides base model context length (default: Empty)
|
107 |
+
# WARNING: Don't set this unless you know what you're doing!
|
108 |
+
# Again, do NOT use this for configuring context length, use max_seq_len above ^
|
109 |
+
# Only use this if the model's base sequence length in config.json is incorrect (ex. Mistral 7B)
|
110 |
+
#override_base_seq_len:
|
111 |
+
|
112 |
+
# Load model with tensor parallelism
|
113 |
+
# If a GPU split isn't provided, the TP loader will fallback to autosplit
|
114 |
+
# Enabling ignores the gpu_split_auto and autosplit_reserve values
|
115 |
+
#tensor_parallel: True
|
116 |
+
|
117 |
+
# Automatically allocate resources to GPUs (default: True)
|
118 |
+
# NOTE: Not parsed for single GPU users
|
119 |
+
gpu_split_auto: True
|
120 |
+
|
121 |
+
# Reserve VRAM used for autosplit loading (default: 96 MB on GPU 0)
|
122 |
+
# This is represented as an array of MB per GPU used
|
123 |
+
autosplit_reserve: [0]
|
124 |
+
|
125 |
+
# An integer array of GBs of vram to split between GPUs (default: [])
|
126 |
+
# Used with tensor parallelism
|
127 |
+
# NOTE: Not parsed for single GPU users
|
128 |
+
#gpu_split: [20.6, 24]
|
129 |
+
|
130 |
+
# Rope scale (default: 1.0)
|
131 |
+
# Same thing as compress_pos_emb
|
132 |
+
# Only use if your model was trained on long context with rope (check config.json)
|
133 |
+
# Leave blank to pull the value from the model
|
134 |
+
#rope_scale: 1.0
|
135 |
+
|
136 |
+
# Rope alpha (default: 1.0)
|
137 |
+
# Same thing as alpha_value
|
138 |
+
# Leave blank to automatically calculate alpha
|
139 |
+
#rope_alpha: 1.0
|
140 |
+
|
141 |
+
# Enable different cache modes for VRAM savings (slight performance hit).
|
142 |
+
# Possible values FP16, Q8, Q6, Q4. (default: FP16)
|
143 |
+
cache_mode: Q4
|
144 |
+
|
145 |
+
# Size of the prompt cache to allocate (default: max_seq_len)
|
146 |
+
# This must be a multiple of 256. A larger cache uses more VRAM, but allows for more prompts to be processed at once.
|
147 |
+
# NOTE: Cache size should not be less than max_seq_len.
|
148 |
+
# For CFG, set this to 2 * max_seq_len to make room for both positive and negative prompts.
|
149 |
+
# cache_size:
|
150 |
+
|
151 |
+
# Chunk size for prompt ingestion. A lower value reduces VRAM usage at the cost of ingestion speed (default: 2048)
|
152 |
+
# NOTE: Effects vary depending on the model. An ideal value is between 512 and 4096
|
153 |
+
chunk_size: 1536
|
154 |
+
|
155 |
+
# Set the maximum amount of prompts to process at one time (default: None/Automatic)
|
156 |
+
# This will be automatically calculated if left blank.
|
157 |
+
# A max batch size of 1 processes prompts one at a time.
|
158 |
+
# NOTE: Only available for Nvidia ampere (30 series) and above GPUs
|
159 |
+
#max_batch_size:
|
160 |
+
|
161 |
+
# Set the prompt template for this model. If empty, attempts to look for the model's chat template. (default: None)
|
162 |
+
# If a model contains multiple templates in its tokenizer_config.json, set prompt_template to the name
|
163 |
+
# of the template you want to use.
|
164 |
+
# NOTE: Only works with chat completion message lists!
|
165 |
+
#prompt_template:
|
166 |
+
|
167 |
+
# Number of experts to use PER TOKEN. Fetched from the model's config.json if not specified (default: Empty)
|
168 |
+
# WARNING: Don't set this unless you know what you're doing!
|
169 |
+
# NOTE: For MoE models (ex. Mixtral) only!
|
170 |
+
#num_experts_per_token:
|
171 |
+
|
172 |
+
# Enables fasttensors to possibly increase model loading speeds (default: False)
|
173 |
+
fasttensors: true
|
174 |
+
|
175 |
+
# Options for draft models (speculative decoding). This will use more VRAM!
|
176 |
+
#draft:
|
177 |
+
# Overrides the directory to look for draft (default: models)
|
178 |
+
#draft_model_dir: models
|
179 |
+
|
180 |
+
# An initial draft model to load. Make sure this model is located in the model directory!
|
181 |
+
# A draft model can be loaded later via the API.
|
182 |
+
#draft_model_name: A model name
|
183 |
+
|
184 |
+
# The below parameters only apply for initial loads
|
185 |
+
# All API based loads do NOT inherit these settings unless specified in use_as_default
|
186 |
+
|
187 |
+
# Rope scale for draft models (default: 1.0)
|
188 |
+
# Same thing as compress_pos_emb
|
189 |
+
# Only use if your draft model was trained on long context with rope (check config.json)
|
190 |
+
#draft_rope_scale: 1.0
|
191 |
+
|
192 |
+
# Rope alpha for draft model (default: 1.0)
|
193 |
+
# Same thing as alpha_value
|
194 |
+
# Leave blank to automatically calculate alpha value
|
195 |
+
#draft_rope_alpha: 1.0
|
196 |
+
|
197 |
+
# Enable different draft model cache modes for VRAM savings (slight performance hit).
|
198 |
+
# Possible values FP16, Q8, Q6, Q4. (default: FP16)
|
199 |
+
#draft_cache_mode: FP16
|
200 |
+
|
201 |
+
# Options for loras
|
202 |
+
#lora:
|
203 |
+
# Overrides the directory to look for loras (default: loras)
|
204 |
+
#lora_dir: loras
|
205 |
+
|
206 |
+
# List of loras to load and associated scaling factors (default: 1.0). Comment out unused entries or add more rows as needed.
|
207 |
+
#loras:
|
208 |
+
#- name: lora1
|
209 |
+
# scaling: 1.0
|
210 |
+
|
211 |
+
# Options for embedding models and loading.
|
212 |
+
# NOTE: Embeddings requires the "extras" feature to be installed
|
213 |
+
# Install it via "pip install .[extras]"
|
214 |
+
embeddings:
|
215 |
+
# Overrides directory to look for embedding models (default: models)
|
216 |
+
embedding_model_dir: models
|
217 |
+
|
218 |
+
# Device to load embedding models on (default: cpu)
|
219 |
+
# Possible values: cpu, auto, cuda
|
220 |
+
# NOTE: It's recommended to load embedding models on the CPU.
|
221 |
+
# If you'd like to load on an AMD gpu, set this value to "cuda" as well.
|
222 |
+
embeddings_device: cpu
|
223 |
+
|
224 |
+
# The below parameters only apply for initial loads
|
225 |
+
# All API based loads do NOT inherit these settings unless specified in use_as_default
|
226 |
+
|
227 |
+
# An initial embedding model to load on the infinity backend (default: None)
|
228 |
+
embedding_model_name:
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 5,
|
4 |
+
"eos_token_id": 255001,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.40.0.dev0"
|
7 |
+
}
|
measurement.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 207621349376
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"model.embed_tokens.weight": "model-00001-of-00044.safetensors",
|
7 |
+
"model.layers.0.input_layernorm.weight": "model-00002-of-00044.safetensors",
|
8 |
+
"model.layers.0.mlp.down_proj.weight": "model-00002-of-00044.safetensors",
|
9 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00002-of-00044.safetensors",
|
10 |
+
"model.layers.0.mlp.up_proj.weight": "model-00002-of-00044.safetensors",
|
11 |
+
"model.layers.0.self_attn.k_norm.weight": "model-00002-of-00044.safetensors",
|
12 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00002-of-00044.safetensors",
|
13 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00002-of-00044.safetensors",
|
14 |
+
"model.layers.0.self_attn.q_norm.weight": "model-00002-of-00044.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00002-of-00044.safetensors",
|
16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00002-of-00044.safetensors",
|
17 |
+
"model.layers.1.input_layernorm.weight": "model-00003-of-00044.safetensors",
|
18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00003-of-00044.safetensors",
|
19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00002-of-00044.safetensors",
|
20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00003-of-00044.safetensors",
|
21 |
+
"model.layers.1.self_attn.k_norm.weight": "model-00002-of-00044.safetensors",
|
22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00002-of-00044.safetensors",
|
23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00002-of-00044.safetensors",
|
24 |
+
"model.layers.1.self_attn.q_norm.weight": "model-00002-of-00044.safetensors",
|
25 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00002-of-00044.safetensors",
|
26 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00002-of-00044.safetensors",
|
27 |
+
"model.layers.10.input_layernorm.weight": "model-00009-of-00044.safetensors",
|
28 |
+
"model.layers.10.mlp.down_proj.weight": "model-00009-of-00044.safetensors",
|
29 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00008-of-00044.safetensors",
|
30 |
+
"model.layers.10.mlp.up_proj.weight": "model-00009-of-00044.safetensors",
|
31 |
+
"model.layers.10.self_attn.k_norm.weight": "model-00008-of-00044.safetensors",
|
32 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00008-of-00044.safetensors",
|
33 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00008-of-00044.safetensors",
|
34 |
+
"model.layers.10.self_attn.q_norm.weight": "model-00008-of-00044.safetensors",
|
35 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00008-of-00044.safetensors",
|
36 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00008-of-00044.safetensors",
|
37 |
+
"model.layers.11.input_layernorm.weight": "model-00009-of-00044.safetensors",
|
38 |
+
"model.layers.11.mlp.down_proj.weight": "model-00009-of-00044.safetensors",
|
39 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00009-of-00044.safetensors",
|
40 |
+
"model.layers.11.mlp.up_proj.weight": "model-00009-of-00044.safetensors",
|
41 |
+
"model.layers.11.self_attn.k_norm.weight": "model-00009-of-00044.safetensors",
|
42 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00009-of-00044.safetensors",
|
43 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00009-of-00044.safetensors",
|
44 |
+
"model.layers.11.self_attn.q_norm.weight": "model-00009-of-00044.safetensors",
|
45 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00009-of-00044.safetensors",
|
46 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00009-of-00044.safetensors",
|
47 |
+
"model.layers.12.input_layernorm.weight": "model-00010-of-00044.safetensors",
|
48 |
+
"model.layers.12.mlp.down_proj.weight": "model-00010-of-00044.safetensors",
|
49 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00010-of-00044.safetensors",
|
50 |
+
"model.layers.12.mlp.up_proj.weight": "model-00010-of-00044.safetensors",
|
51 |
+
"model.layers.12.self_attn.k_norm.weight": "model-00009-of-00044.safetensors",
|
52 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00010-of-00044.safetensors",
|
53 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00010-of-00044.safetensors",
|
54 |
+
"model.layers.12.self_attn.q_norm.weight": "model-00009-of-00044.safetensors",
|
55 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00010-of-00044.safetensors",
|
56 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00010-of-00044.safetensors",
|
57 |
+
"model.layers.13.input_layernorm.weight": "model-00011-of-00044.safetensors",
|
58 |
+
"model.layers.13.mlp.down_proj.weight": "model-00011-of-00044.safetensors",
|
59 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00010-of-00044.safetensors",
|
60 |
+
"model.layers.13.mlp.up_proj.weight": "model-00011-of-00044.safetensors",
|
61 |
+
"model.layers.13.self_attn.k_norm.weight": "model-00010-of-00044.safetensors",
|
62 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00010-of-00044.safetensors",
|
63 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00010-of-00044.safetensors",
|
64 |
+
"model.layers.13.self_attn.q_norm.weight": "model-00010-of-00044.safetensors",
|
65 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00010-of-00044.safetensors",
|
66 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00010-of-00044.safetensors",
|
67 |
+
"model.layers.14.input_layernorm.weight": "model-00011-of-00044.safetensors",
|
68 |
+
"model.layers.14.mlp.down_proj.weight": "model-00011-of-00044.safetensors",
|
69 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00011-of-00044.safetensors",
|
70 |
+
"model.layers.14.mlp.up_proj.weight": "model-00011-of-00044.safetensors",
|
71 |
+
"model.layers.14.self_attn.k_norm.weight": "model-00011-of-00044.safetensors",
|
72 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00011-of-00044.safetensors",
|
73 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00011-of-00044.safetensors",
|
74 |
+
"model.layers.14.self_attn.q_norm.weight": "model-00011-of-00044.safetensors",
|
75 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00011-of-00044.safetensors",
|
76 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00011-of-00044.safetensors",
|
77 |
+
"model.layers.15.input_layernorm.weight": "model-00012-of-00044.safetensors",
|
78 |
+
"model.layers.15.mlp.down_proj.weight": "model-00012-of-00044.safetensors",
|
79 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00012-of-00044.safetensors",
|
80 |
+
"model.layers.15.mlp.up_proj.weight": "model-00012-of-00044.safetensors",
|
81 |
+
"model.layers.15.self_attn.k_norm.weight": "model-00011-of-00044.safetensors",
|
82 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00012-of-00044.safetensors",
|
83 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00012-of-00044.safetensors",
|
84 |
+
"model.layers.15.self_attn.q_norm.weight": "model-00011-of-00044.safetensors",
|
85 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00012-of-00044.safetensors",
|
86 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00012-of-00044.safetensors",
|
87 |
+
"model.layers.16.input_layernorm.weight": "model-00013-of-00044.safetensors",
|
88 |
+
"model.layers.16.mlp.down_proj.weight": "model-00013-of-00044.safetensors",
|
89 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00012-of-00044.safetensors",
|
90 |
+
"model.layers.16.mlp.up_proj.weight": "model-00013-of-00044.safetensors",
|
91 |
+
"model.layers.16.self_attn.k_norm.weight": "model-00012-of-00044.safetensors",
|
92 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00012-of-00044.safetensors",
|
93 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00012-of-00044.safetensors",
|
94 |
+
"model.layers.16.self_attn.q_norm.weight": "model-00012-of-00044.safetensors",
|
95 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00012-of-00044.safetensors",
|
96 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00012-of-00044.safetensors",
|
97 |
+
"model.layers.17.input_layernorm.weight": "model-00013-of-00044.safetensors",
|
98 |
+
"model.layers.17.mlp.down_proj.weight": "model-00013-of-00044.safetensors",
|
99 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00013-of-00044.safetensors",
|
100 |
+
"model.layers.17.mlp.up_proj.weight": "model-00013-of-00044.safetensors",
|
101 |
+
"model.layers.17.self_attn.k_norm.weight": "model-00013-of-00044.safetensors",
|
102 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00013-of-00044.safetensors",
|
103 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00013-of-00044.safetensors",
|
104 |
+
"model.layers.17.self_attn.q_norm.weight": "model-00013-of-00044.safetensors",
|
105 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00013-of-00044.safetensors",
|
106 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00013-of-00044.safetensors",
|
107 |
+
"model.layers.18.input_layernorm.weight": "model-00014-of-00044.safetensors",
|
108 |
+
"model.layers.18.mlp.down_proj.weight": "model-00014-of-00044.safetensors",
|
109 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00014-of-00044.safetensors",
|
110 |
+
"model.layers.18.mlp.up_proj.weight": "model-00014-of-00044.safetensors",
|
111 |
+
"model.layers.18.self_attn.k_norm.weight": "model-00013-of-00044.safetensors",
|
112 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00014-of-00044.safetensors",
|
113 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00014-of-00044.safetensors",
|
114 |
+
"model.layers.18.self_attn.q_norm.weight": "model-00013-of-00044.safetensors",
|
115 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00014-of-00044.safetensors",
|
116 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00014-of-00044.safetensors",
|
117 |
+
"model.layers.19.input_layernorm.weight": "model-00015-of-00044.safetensors",
|
118 |
+
"model.layers.19.mlp.down_proj.weight": "model-00015-of-00044.safetensors",
|
119 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00014-of-00044.safetensors",
|
120 |
+
"model.layers.19.mlp.up_proj.weight": "model-00015-of-00044.safetensors",
|
121 |
+
"model.layers.19.self_attn.k_norm.weight": "model-00014-of-00044.safetensors",
|
122 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00014-of-00044.safetensors",
|
123 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00014-of-00044.safetensors",
|
124 |
+
"model.layers.19.self_attn.q_norm.weight": "model-00014-of-00044.safetensors",
|
125 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00014-of-00044.safetensors",
|
126 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00014-of-00044.safetensors",
|
127 |
+
"model.layers.2.input_layernorm.weight": "model-00003-of-00044.safetensors",
|
128 |
+
"model.layers.2.mlp.down_proj.weight": "model-00003-of-00044.safetensors",
|
129 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00003-of-00044.safetensors",
|
130 |
+
"model.layers.2.mlp.up_proj.weight": "model-00003-of-00044.safetensors",
|
131 |
+
"model.layers.2.self_attn.k_norm.weight": "model-00003-of-00044.safetensors",
|
132 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00003-of-00044.safetensors",
|
133 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00003-of-00044.safetensors",
|
134 |
+
"model.layers.2.self_attn.q_norm.weight": "model-00003-of-00044.safetensors",
|
135 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00003-of-00044.safetensors",
|
136 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00003-of-00044.safetensors",
|
137 |
+
"model.layers.20.input_layernorm.weight": "model-00015-of-00044.safetensors",
|
138 |
+
"model.layers.20.mlp.down_proj.weight": "model-00015-of-00044.safetensors",
|
139 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00015-of-00044.safetensors",
|
140 |
+
"model.layers.20.mlp.up_proj.weight": "model-00015-of-00044.safetensors",
|
141 |
+
"model.layers.20.self_attn.k_norm.weight": "model-00015-of-00044.safetensors",
|
142 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00015-of-00044.safetensors",
|
143 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00015-of-00044.safetensors",
|
144 |
+
"model.layers.20.self_attn.q_norm.weight": "model-00015-of-00044.safetensors",
|
145 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00015-of-00044.safetensors",
|
146 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00015-of-00044.safetensors",
|
147 |
+
"model.layers.21.input_layernorm.weight": "model-00016-of-00044.safetensors",
|
148 |
+
"model.layers.21.mlp.down_proj.weight": "model-00016-of-00044.safetensors",
|
149 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00016-of-00044.safetensors",
|
150 |
+
"model.layers.21.mlp.up_proj.weight": "model-00016-of-00044.safetensors",
|
151 |
+
"model.layers.21.self_attn.k_norm.weight": "model-00015-of-00044.safetensors",
|
152 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00016-of-00044.safetensors",
|
153 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00016-of-00044.safetensors",
|
154 |
+
"model.layers.21.self_attn.q_norm.weight": "model-00015-of-00044.safetensors",
|
155 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00016-of-00044.safetensors",
|
156 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00016-of-00044.safetensors",
|
157 |
+
"model.layers.22.input_layernorm.weight": "model-00017-of-00044.safetensors",
|
158 |
+
"model.layers.22.mlp.down_proj.weight": "model-00017-of-00044.safetensors",
|
159 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00016-of-00044.safetensors",
|
160 |
+
"model.layers.22.mlp.up_proj.weight": "model-00017-of-00044.safetensors",
|
161 |
+
"model.layers.22.self_attn.k_norm.weight": "model-00016-of-00044.safetensors",
|
162 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00016-of-00044.safetensors",
|
163 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00016-of-00044.safetensors",
|
164 |
+
"model.layers.22.self_attn.q_norm.weight": "model-00016-of-00044.safetensors",
|
165 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00016-of-00044.safetensors",
|
166 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00016-of-00044.safetensors",
|
167 |
+
"model.layers.23.input_layernorm.weight": "model-00017-of-00044.safetensors",
|
168 |
+
"model.layers.23.mlp.down_proj.weight": "model-00017-of-00044.safetensors",
|
169 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00017-of-00044.safetensors",
|
170 |
+
"model.layers.23.mlp.up_proj.weight": "model-00017-of-00044.safetensors",
|
171 |
+
"model.layers.23.self_attn.k_norm.weight": "model-00017-of-00044.safetensors",
|
172 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00017-of-00044.safetensors",
|
173 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00017-of-00044.safetensors",
|
174 |
+
"model.layers.23.self_attn.q_norm.weight": "model-00017-of-00044.safetensors",
|
175 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00017-of-00044.safetensors",
|
176 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00017-of-00044.safetensors",
|
177 |
+
"model.layers.24.input_layernorm.weight": "model-00018-of-00044.safetensors",
|
178 |
+
"model.layers.24.mlp.down_proj.weight": "model-00018-of-00044.safetensors",
|
179 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00018-of-00044.safetensors",
|
180 |
+
"model.layers.24.mlp.up_proj.weight": "model-00018-of-00044.safetensors",
|
181 |
+
"model.layers.24.self_attn.k_norm.weight": "model-00017-of-00044.safetensors",
|
182 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00018-of-00044.safetensors",
|
183 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00018-of-00044.safetensors",
|
184 |
+
"model.layers.24.self_attn.q_norm.weight": "model-00017-of-00044.safetensors",
|
185 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00018-of-00044.safetensors",
|
186 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00018-of-00044.safetensors",
|
187 |
+
"model.layers.25.input_layernorm.weight": "model-00019-of-00044.safetensors",
|
188 |
+
"model.layers.25.mlp.down_proj.weight": "model-00019-of-00044.safetensors",
|
189 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00018-of-00044.safetensors",
|
190 |
+
"model.layers.25.mlp.up_proj.weight": "model-00019-of-00044.safetensors",
|
191 |
+
"model.layers.25.self_attn.k_norm.weight": "model-00018-of-00044.safetensors",
|
192 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00018-of-00044.safetensors",
|
193 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00018-of-00044.safetensors",
|
194 |
+
"model.layers.25.self_attn.q_norm.weight": "model-00018-of-00044.safetensors",
|
195 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00018-of-00044.safetensors",
|
196 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00018-of-00044.safetensors",
|
197 |
+
"model.layers.26.input_layernorm.weight": "model-00019-of-00044.safetensors",
|
198 |
+
"model.layers.26.mlp.down_proj.weight": "model-00019-of-00044.safetensors",
|
199 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00019-of-00044.safetensors",
|
200 |
+
"model.layers.26.mlp.up_proj.weight": "model-00019-of-00044.safetensors",
|
201 |
+
"model.layers.26.self_attn.k_norm.weight": "model-00019-of-00044.safetensors",
|
202 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00019-of-00044.safetensors",
|
203 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00019-of-00044.safetensors",
|
204 |
+
"model.layers.26.self_attn.q_norm.weight": "model-00019-of-00044.safetensors",
|
205 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00019-of-00044.safetensors",
|
206 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00019-of-00044.safetensors",
|
207 |
+
"model.layers.27.input_layernorm.weight": "model-00020-of-00044.safetensors",
|
208 |
+
"model.layers.27.mlp.down_proj.weight": "model-00020-of-00044.safetensors",
|
209 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00020-of-00044.safetensors",
|
210 |
+
"model.layers.27.mlp.up_proj.weight": "model-00020-of-00044.safetensors",
|
211 |
+
"model.layers.27.self_attn.k_norm.weight": "model-00019-of-00044.safetensors",
|
212 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00020-of-00044.safetensors",
|
213 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00020-of-00044.safetensors",
|
214 |
+
"model.layers.27.self_attn.q_norm.weight": "model-00019-of-00044.safetensors",
|
215 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00020-of-00044.safetensors",
|
216 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00020-of-00044.safetensors",
|
217 |
+
"model.layers.28.input_layernorm.weight": "model-00021-of-00044.safetensors",
|
218 |
+
"model.layers.28.mlp.down_proj.weight": "model-00021-of-00044.safetensors",
|
219 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00020-of-00044.safetensors",
|
220 |
+
"model.layers.28.mlp.up_proj.weight": "model-00021-of-00044.safetensors",
|
221 |
+
"model.layers.28.self_attn.k_norm.weight": "model-00020-of-00044.safetensors",
|
222 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00020-of-00044.safetensors",
|
223 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00020-of-00044.safetensors",
|
224 |
+
"model.layers.28.self_attn.q_norm.weight": "model-00020-of-00044.safetensors",
|
225 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00020-of-00044.safetensors",
|
226 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00020-of-00044.safetensors",
|
227 |
+
"model.layers.29.input_layernorm.weight": "model-00021-of-00044.safetensors",
|
228 |
+
"model.layers.29.mlp.down_proj.weight": "model-00021-of-00044.safetensors",
|
229 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00021-of-00044.safetensors",
|
230 |
+
"model.layers.29.mlp.up_proj.weight": "model-00021-of-00044.safetensors",
|
231 |
+
"model.layers.29.self_attn.k_norm.weight": "model-00021-of-00044.safetensors",
|
232 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00021-of-00044.safetensors",
|
233 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00021-of-00044.safetensors",
|
234 |
+
"model.layers.29.self_attn.q_norm.weight": "model-00021-of-00044.safetensors",
|
235 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00021-of-00044.safetensors",
|
236 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00021-of-00044.safetensors",
|
237 |
+
"model.layers.3.input_layernorm.weight": "model-00004-of-00044.safetensors",
|
238 |
+
"model.layers.3.mlp.down_proj.weight": "model-00004-of-00044.safetensors",
|
239 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00004-of-00044.safetensors",
|
240 |
+
"model.layers.3.mlp.up_proj.weight": "model-00004-of-00044.safetensors",
|
241 |
+
"model.layers.3.self_attn.k_norm.weight": "model-00003-of-00044.safetensors",
|
242 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00004-of-00044.safetensors",
|
243 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00004-of-00044.safetensors",
|
244 |
+
"model.layers.3.self_attn.q_norm.weight": "model-00003-of-00044.safetensors",
|
245 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00004-of-00044.safetensors",
|
246 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00004-of-00044.safetensors",
|
247 |
+
"model.layers.30.input_layernorm.weight": "model-00022-of-00044.safetensors",
|
248 |
+
"model.layers.30.mlp.down_proj.weight": "model-00022-of-00044.safetensors",
|
249 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00022-of-00044.safetensors",
|
250 |
+
"model.layers.30.mlp.up_proj.weight": "model-00022-of-00044.safetensors",
|
251 |
+
"model.layers.30.self_attn.k_norm.weight": "model-00021-of-00044.safetensors",
|
252 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00022-of-00044.safetensors",
|
253 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00022-of-00044.safetensors",
|
254 |
+
"model.layers.30.self_attn.q_norm.weight": "model-00021-of-00044.safetensors",
|
255 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00022-of-00044.safetensors",
|
256 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00022-of-00044.safetensors",
|
257 |
+
"model.layers.31.input_layernorm.weight": "model-00023-of-00044.safetensors",
|
258 |
+
"model.layers.31.mlp.down_proj.weight": "model-00023-of-00044.safetensors",
|
259 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00022-of-00044.safetensors",
|
260 |
+
"model.layers.31.mlp.up_proj.weight": "model-00023-of-00044.safetensors",
|
261 |
+
"model.layers.31.self_attn.k_norm.weight": "model-00022-of-00044.safetensors",
|
262 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00022-of-00044.safetensors",
|
263 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00022-of-00044.safetensors",
|
264 |
+
"model.layers.31.self_attn.q_norm.weight": "model-00022-of-00044.safetensors",
|
265 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00022-of-00044.safetensors",
|
266 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00022-of-00044.safetensors",
|
267 |
+
"model.layers.32.input_layernorm.weight": "model-00023-of-00044.safetensors",
|
268 |
+
"model.layers.32.mlp.down_proj.weight": "model-00023-of-00044.safetensors",
|
269 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00023-of-00044.safetensors",
|
270 |
+
"model.layers.32.mlp.up_proj.weight": "model-00023-of-00044.safetensors",
|
271 |
+
"model.layers.32.self_attn.k_norm.weight": "model-00023-of-00044.safetensors",
|
272 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00023-of-00044.safetensors",
|
273 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00023-of-00044.safetensors",
|
274 |
+
"model.layers.32.self_attn.q_norm.weight": "model-00023-of-00044.safetensors",
|
275 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00023-of-00044.safetensors",
|
276 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00023-of-00044.safetensors",
|
277 |
+
"model.layers.33.input_layernorm.weight": "model-00024-of-00044.safetensors",
|
278 |
+
"model.layers.33.mlp.down_proj.weight": "model-00024-of-00044.safetensors",
|
279 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00024-of-00044.safetensors",
|
280 |
+
"model.layers.33.mlp.up_proj.weight": "model-00024-of-00044.safetensors",
|
281 |
+
"model.layers.33.self_attn.k_norm.weight": "model-00023-of-00044.safetensors",
|
282 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00024-of-00044.safetensors",
|
283 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00024-of-00044.safetensors",
|
284 |
+
"model.layers.33.self_attn.q_norm.weight": "model-00023-of-00044.safetensors",
|
285 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00024-of-00044.safetensors",
|
286 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00024-of-00044.safetensors",
|
287 |
+
"model.layers.34.input_layernorm.weight": "model-00025-of-00044.safetensors",
|
288 |
+
"model.layers.34.mlp.down_proj.weight": "model-00025-of-00044.safetensors",
|
289 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00024-of-00044.safetensors",
|
290 |
+
"model.layers.34.mlp.up_proj.weight": "model-00025-of-00044.safetensors",
|
291 |
+
"model.layers.34.self_attn.k_norm.weight": "model-00024-of-00044.safetensors",
|
292 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00024-of-00044.safetensors",
|
293 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00024-of-00044.safetensors",
|
294 |
+
"model.layers.34.self_attn.q_norm.weight": "model-00024-of-00044.safetensors",
|
295 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00024-of-00044.safetensors",
|
296 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00024-of-00044.safetensors",
|
297 |
+
"model.layers.35.input_layernorm.weight": "model-00025-of-00044.safetensors",
|
298 |
+
"model.layers.35.mlp.down_proj.weight": "model-00025-of-00044.safetensors",
|
299 |
+
"model.layers.35.mlp.gate_proj.weight": "model-00025-of-00044.safetensors",
|
300 |
+
"model.layers.35.mlp.up_proj.weight": "model-00025-of-00044.safetensors",
|
301 |
+
"model.layers.35.self_attn.k_norm.weight": "model-00025-of-00044.safetensors",
|
302 |
+
"model.layers.35.self_attn.k_proj.weight": "model-00025-of-00044.safetensors",
|
303 |
+
"model.layers.35.self_attn.o_proj.weight": "model-00025-of-00044.safetensors",
|
304 |
+
"model.layers.35.self_attn.q_norm.weight": "model-00025-of-00044.safetensors",
|
305 |
+
"model.layers.35.self_attn.q_proj.weight": "model-00025-of-00044.safetensors",
|
306 |
+
"model.layers.35.self_attn.v_proj.weight": "model-00025-of-00044.safetensors",
|
307 |
+
"model.layers.36.input_layernorm.weight": "model-00026-of-00044.safetensors",
|
308 |
+
"model.layers.36.mlp.down_proj.weight": "model-00026-of-00044.safetensors",
|
309 |
+
"model.layers.36.mlp.gate_proj.weight": "model-00026-of-00044.safetensors",
|
310 |
+
"model.layers.36.mlp.up_proj.weight": "model-00026-of-00044.safetensors",
|
311 |
+
"model.layers.36.self_attn.k_norm.weight": "model-00025-of-00044.safetensors",
|
312 |
+
"model.layers.36.self_attn.k_proj.weight": "model-00026-of-00044.safetensors",
|
313 |
+
"model.layers.36.self_attn.o_proj.weight": "model-00026-of-00044.safetensors",
|
314 |
+
"model.layers.36.self_attn.q_norm.weight": "model-00025-of-00044.safetensors",
|
315 |
+
"model.layers.36.self_attn.q_proj.weight": "model-00026-of-00044.safetensors",
|
316 |
+
"model.layers.36.self_attn.v_proj.weight": "model-00026-of-00044.safetensors",
|
317 |
+
"model.layers.37.input_layernorm.weight": "model-00027-of-00044.safetensors",
|
318 |
+
"model.layers.37.mlp.down_proj.weight": "model-00027-of-00044.safetensors",
|
319 |
+
"model.layers.37.mlp.gate_proj.weight": "model-00026-of-00044.safetensors",
|
320 |
+
"model.layers.37.mlp.up_proj.weight": "model-00027-of-00044.safetensors",
|
321 |
+
"model.layers.37.self_attn.k_norm.weight": "model-00026-of-00044.safetensors",
|
322 |
+
"model.layers.37.self_attn.k_proj.weight": "model-00026-of-00044.safetensors",
|
323 |
+
"model.layers.37.self_attn.o_proj.weight": "model-00026-of-00044.safetensors",
|
324 |
+
"model.layers.37.self_attn.q_norm.weight": "model-00026-of-00044.safetensors",
|
325 |
+
"model.layers.37.self_attn.q_proj.weight": "model-00026-of-00044.safetensors",
|
326 |
+
"model.layers.37.self_attn.v_proj.weight": "model-00026-of-00044.safetensors",
|
327 |
+
"model.layers.38.input_layernorm.weight": "model-00027-of-00044.safetensors",
|
328 |
+
"model.layers.38.mlp.down_proj.weight": "model-00027-of-00044.safetensors",
|
329 |
+
"model.layers.38.mlp.gate_proj.weight": "model-00027-of-00044.safetensors",
|
330 |
+
"model.layers.38.mlp.up_proj.weight": "model-00027-of-00044.safetensors",
|
331 |
+
"model.layers.38.self_attn.k_norm.weight": "model-00027-of-00044.safetensors",
|
332 |
+
"model.layers.38.self_attn.k_proj.weight": "model-00027-of-00044.safetensors",
|
333 |
+
"model.layers.38.self_attn.o_proj.weight": "model-00027-of-00044.safetensors",
|
334 |
+
"model.layers.38.self_attn.q_norm.weight": "model-00027-of-00044.safetensors",
|
335 |
+
"model.layers.38.self_attn.q_proj.weight": "model-00027-of-00044.safetensors",
|
336 |
+
"model.layers.38.self_attn.v_proj.weight": "model-00027-of-00044.safetensors",
|
337 |
+
"model.layers.39.input_layernorm.weight": "model-00028-of-00044.safetensors",
|
338 |
+
"model.layers.39.mlp.down_proj.weight": "model-00028-of-00044.safetensors",
|
339 |
+
"model.layers.39.mlp.gate_proj.weight": "model-00028-of-00044.safetensors",
|
340 |
+
"model.layers.39.mlp.up_proj.weight": "model-00028-of-00044.safetensors",
|
341 |
+
"model.layers.39.self_attn.k_norm.weight": "model-00027-of-00044.safetensors",
|
342 |
+
"model.layers.39.self_attn.k_proj.weight": "model-00028-of-00044.safetensors",
|
343 |
+
"model.layers.39.self_attn.o_proj.weight": "model-00028-of-00044.safetensors",
|
344 |
+
"model.layers.39.self_attn.q_norm.weight": "model-00027-of-00044.safetensors",
|
345 |
+
"model.layers.39.self_attn.q_proj.weight": "model-00028-of-00044.safetensors",
|
346 |
+
"model.layers.39.self_attn.v_proj.weight": "model-00028-of-00044.safetensors",
|
347 |
+
"model.layers.4.input_layernorm.weight": "model-00005-of-00044.safetensors",
|
348 |
+
"model.layers.4.mlp.down_proj.weight": "model-00005-of-00044.safetensors",
|
349 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00004-of-00044.safetensors",
|
350 |
+
"model.layers.4.mlp.up_proj.weight": "model-00005-of-00044.safetensors",
|
351 |
+
"model.layers.4.self_attn.k_norm.weight": "model-00004-of-00044.safetensors",
|
352 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00004-of-00044.safetensors",
|
353 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00004-of-00044.safetensors",
|
354 |
+
"model.layers.4.self_attn.q_norm.weight": "model-00004-of-00044.safetensors",
|
355 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00004-of-00044.safetensors",
|
356 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00004-of-00044.safetensors",
|
357 |
+
"model.layers.40.input_layernorm.weight": "model-00029-of-00044.safetensors",
|
358 |
+
"model.layers.40.mlp.down_proj.weight": "model-00029-of-00044.safetensors",
|
359 |
+
"model.layers.40.mlp.gate_proj.weight": "model-00028-of-00044.safetensors",
|
360 |
+
"model.layers.40.mlp.up_proj.weight": "model-00029-of-00044.safetensors",
|
361 |
+
"model.layers.40.self_attn.k_norm.weight": "model-00028-of-00044.safetensors",
|
362 |
+
"model.layers.40.self_attn.k_proj.weight": "model-00028-of-00044.safetensors",
|
363 |
+
"model.layers.40.self_attn.o_proj.weight": "model-00028-of-00044.safetensors",
|
364 |
+
"model.layers.40.self_attn.q_norm.weight": "model-00028-of-00044.safetensors",
|
365 |
+
"model.layers.40.self_attn.q_proj.weight": "model-00028-of-00044.safetensors",
|
366 |
+
"model.layers.40.self_attn.v_proj.weight": "model-00028-of-00044.safetensors",
|
367 |
+
"model.layers.41.input_layernorm.weight": "model-00029-of-00044.safetensors",
|
368 |
+
"model.layers.41.mlp.down_proj.weight": "model-00029-of-00044.safetensors",
|
369 |
+
"model.layers.41.mlp.gate_proj.weight": "model-00029-of-00044.safetensors",
|
370 |
+
"model.layers.41.mlp.up_proj.weight": "model-00029-of-00044.safetensors",
|
371 |
+
"model.layers.41.self_attn.k_norm.weight": "model-00029-of-00044.safetensors",
|
372 |
+
"model.layers.41.self_attn.k_proj.weight": "model-00029-of-00044.safetensors",
|
373 |
+
"model.layers.41.self_attn.o_proj.weight": "model-00029-of-00044.safetensors",
|
374 |
+
"model.layers.41.self_attn.q_norm.weight": "model-00029-of-00044.safetensors",
|
375 |
+
"model.layers.41.self_attn.q_proj.weight": "model-00029-of-00044.safetensors",
|
376 |
+
"model.layers.41.self_attn.v_proj.weight": "model-00029-of-00044.safetensors",
|
377 |
+
"model.layers.42.input_layernorm.weight": "model-00030-of-00044.safetensors",
|
378 |
+
"model.layers.42.mlp.down_proj.weight": "model-00030-of-00044.safetensors",
|
379 |
+
"model.layers.42.mlp.gate_proj.weight": "model-00030-of-00044.safetensors",
|
380 |
+
"model.layers.42.mlp.up_proj.weight": "model-00030-of-00044.safetensors",
|
381 |
+
"model.layers.42.self_attn.k_norm.weight": "model-00029-of-00044.safetensors",
|
382 |
+
"model.layers.42.self_attn.k_proj.weight": "model-00030-of-00044.safetensors",
|
383 |
+
"model.layers.42.self_attn.o_proj.weight": "model-00030-of-00044.safetensors",
|
384 |
+
"model.layers.42.self_attn.q_norm.weight": "model-00029-of-00044.safetensors",
|
385 |
+
"model.layers.42.self_attn.q_proj.weight": "model-00030-of-00044.safetensors",
|
386 |
+
"model.layers.42.self_attn.v_proj.weight": "model-00030-of-00044.safetensors",
|
387 |
+
"model.layers.43.input_layernorm.weight": "model-00031-of-00044.safetensors",
|
388 |
+
"model.layers.43.mlp.down_proj.weight": "model-00031-of-00044.safetensors",
|
389 |
+
"model.layers.43.mlp.gate_proj.weight": "model-00030-of-00044.safetensors",
|
390 |
+
"model.layers.43.mlp.up_proj.weight": "model-00031-of-00044.safetensors",
|
391 |
+
"model.layers.43.self_attn.k_norm.weight": "model-00030-of-00044.safetensors",
|
392 |
+
"model.layers.43.self_attn.k_proj.weight": "model-00030-of-00044.safetensors",
|
393 |
+
"model.layers.43.self_attn.o_proj.weight": "model-00030-of-00044.safetensors",
|
394 |
+
"model.layers.43.self_attn.q_norm.weight": "model-00030-of-00044.safetensors",
|
395 |
+
"model.layers.43.self_attn.q_proj.weight": "model-00030-of-00044.safetensors",
|
396 |
+
"model.layers.43.self_attn.v_proj.weight": "model-00030-of-00044.safetensors",
|
397 |
+
"model.layers.44.input_layernorm.weight": "model-00031-of-00044.safetensors",
|
398 |
+
"model.layers.44.mlp.down_proj.weight": "model-00031-of-00044.safetensors",
|
399 |
+
"model.layers.44.mlp.gate_proj.weight": "model-00031-of-00044.safetensors",
|
400 |
+
"model.layers.44.mlp.up_proj.weight": "model-00031-of-00044.safetensors",
|
401 |
+
"model.layers.44.self_attn.k_norm.weight": "model-00031-of-00044.safetensors",
|
402 |
+
"model.layers.44.self_attn.k_proj.weight": "model-00031-of-00044.safetensors",
|
403 |
+
"model.layers.44.self_attn.o_proj.weight": "model-00031-of-00044.safetensors",
|
404 |
+
"model.layers.44.self_attn.q_norm.weight": "model-00031-of-00044.safetensors",
|
405 |
+
"model.layers.44.self_attn.q_proj.weight": "model-00031-of-00044.safetensors",
|
406 |
+
"model.layers.44.self_attn.v_proj.weight": "model-00031-of-00044.safetensors",
|
407 |
+
"model.layers.45.input_layernorm.weight": "model-00032-of-00044.safetensors",
|
408 |
+
"model.layers.45.mlp.down_proj.weight": "model-00032-of-00044.safetensors",
|
409 |
+
"model.layers.45.mlp.gate_proj.weight": "model-00032-of-00044.safetensors",
|
410 |
+
"model.layers.45.mlp.up_proj.weight": "model-00032-of-00044.safetensors",
|
411 |
+
"model.layers.45.self_attn.k_norm.weight": "model-00031-of-00044.safetensors",
|
412 |
+
"model.layers.45.self_attn.k_proj.weight": "model-00032-of-00044.safetensors",
|
413 |
+
"model.layers.45.self_attn.o_proj.weight": "model-00032-of-00044.safetensors",
|
414 |
+
"model.layers.45.self_attn.q_norm.weight": "model-00031-of-00044.safetensors",
|
415 |
+
"model.layers.45.self_attn.q_proj.weight": "model-00032-of-00044.safetensors",
|
416 |
+
"model.layers.45.self_attn.v_proj.weight": "model-00032-of-00044.safetensors",
|
417 |
+
"model.layers.46.input_layernorm.weight": "model-00033-of-00044.safetensors",
|
418 |
+
"model.layers.46.mlp.down_proj.weight": "model-00033-of-00044.safetensors",
|
419 |
+
"model.layers.46.mlp.gate_proj.weight": "model-00032-of-00044.safetensors",
|
420 |
+
"model.layers.46.mlp.up_proj.weight": "model-00033-of-00044.safetensors",
|
421 |
+
"model.layers.46.self_attn.k_norm.weight": "model-00032-of-00044.safetensors",
|
422 |
+
"model.layers.46.self_attn.k_proj.weight": "model-00032-of-00044.safetensors",
|
423 |
+
"model.layers.46.self_attn.o_proj.weight": "model-00032-of-00044.safetensors",
|
424 |
+
"model.layers.46.self_attn.q_norm.weight": "model-00032-of-00044.safetensors",
|
425 |
+
"model.layers.46.self_attn.q_proj.weight": "model-00032-of-00044.safetensors",
|
426 |
+
"model.layers.46.self_attn.v_proj.weight": "model-00032-of-00044.safetensors",
|
427 |
+
"model.layers.47.input_layernorm.weight": "model-00033-of-00044.safetensors",
|
428 |
+
"model.layers.47.mlp.down_proj.weight": "model-00033-of-00044.safetensors",
|
429 |
+
"model.layers.47.mlp.gate_proj.weight": "model-00033-of-00044.safetensors",
|
430 |
+
"model.layers.47.mlp.up_proj.weight": "model-00033-of-00044.safetensors",
|
431 |
+
"model.layers.47.self_attn.k_norm.weight": "model-00033-of-00044.safetensors",
|
432 |
+
"model.layers.47.self_attn.k_proj.weight": "model-00033-of-00044.safetensors",
|
433 |
+
"model.layers.47.self_attn.o_proj.weight": "model-00033-of-00044.safetensors",
|
434 |
+
"model.layers.47.self_attn.q_norm.weight": "model-00033-of-00044.safetensors",
|
435 |
+
"model.layers.47.self_attn.q_proj.weight": "model-00033-of-00044.safetensors",
|
436 |
+
"model.layers.47.self_attn.v_proj.weight": "model-00033-of-00044.safetensors",
|
437 |
+
"model.layers.48.input_layernorm.weight": "model-00034-of-00044.safetensors",
|
438 |
+
"model.layers.48.mlp.down_proj.weight": "model-00034-of-00044.safetensors",
|
439 |
+
"model.layers.48.mlp.gate_proj.weight": "model-00034-of-00044.safetensors",
|
440 |
+
"model.layers.48.mlp.up_proj.weight": "model-00034-of-00044.safetensors",
|
441 |
+
"model.layers.48.self_attn.k_norm.weight": "model-00033-of-00044.safetensors",
|
442 |
+
"model.layers.48.self_attn.k_proj.weight": "model-00034-of-00044.safetensors",
|
443 |
+
"model.layers.48.self_attn.o_proj.weight": "model-00034-of-00044.safetensors",
|
444 |
+
"model.layers.48.self_attn.q_norm.weight": "model-00033-of-00044.safetensors",
|
445 |
+
"model.layers.48.self_attn.q_proj.weight": "model-00034-of-00044.safetensors",
|
446 |
+
"model.layers.48.self_attn.v_proj.weight": "model-00034-of-00044.safetensors",
|
447 |
+
"model.layers.49.input_layernorm.weight": "model-00035-of-00044.safetensors",
|
448 |
+
"model.layers.49.mlp.down_proj.weight": "model-00035-of-00044.safetensors",
|
449 |
+
"model.layers.49.mlp.gate_proj.weight": "model-00034-of-00044.safetensors",
|
450 |
+
"model.layers.49.mlp.up_proj.weight": "model-00035-of-00044.safetensors",
|
451 |
+
"model.layers.49.self_attn.k_norm.weight": "model-00034-of-00044.safetensors",
|
452 |
+
"model.layers.49.self_attn.k_proj.weight": "model-00034-of-00044.safetensors",
|
453 |
+
"model.layers.49.self_attn.o_proj.weight": "model-00034-of-00044.safetensors",
|
454 |
+
"model.layers.49.self_attn.q_norm.weight": "model-00034-of-00044.safetensors",
|
455 |
+
"model.layers.49.self_attn.q_proj.weight": "model-00034-of-00044.safetensors",
|
456 |
+
"model.layers.49.self_attn.v_proj.weight": "model-00034-of-00044.safetensors",
|
457 |
+
"model.layers.5.input_layernorm.weight": "model-00005-of-00044.safetensors",
|
458 |
+
"model.layers.5.mlp.down_proj.weight": "model-00005-of-00044.safetensors",
|
459 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00005-of-00044.safetensors",
|
460 |
+
"model.layers.5.mlp.up_proj.weight": "model-00005-of-00044.safetensors",
|
461 |
+
"model.layers.5.self_attn.k_norm.weight": "model-00005-of-00044.safetensors",
|
462 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00005-of-00044.safetensors",
|
463 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00005-of-00044.safetensors",
|
464 |
+
"model.layers.5.self_attn.q_norm.weight": "model-00005-of-00044.safetensors",
|
465 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00005-of-00044.safetensors",
|
466 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00005-of-00044.safetensors",
|
467 |
+
"model.layers.50.input_layernorm.weight": "model-00035-of-00044.safetensors",
|
468 |
+
"model.layers.50.mlp.down_proj.weight": "model-00035-of-00044.safetensors",
|
469 |
+
"model.layers.50.mlp.gate_proj.weight": "model-00035-of-00044.safetensors",
|
470 |
+
"model.layers.50.mlp.up_proj.weight": "model-00035-of-00044.safetensors",
|
471 |
+
"model.layers.50.self_attn.k_norm.weight": "model-00035-of-00044.safetensors",
|
472 |
+
"model.layers.50.self_attn.k_proj.weight": "model-00035-of-00044.safetensors",
|
473 |
+
"model.layers.50.self_attn.o_proj.weight": "model-00035-of-00044.safetensors",
|
474 |
+
"model.layers.50.self_attn.q_norm.weight": "model-00035-of-00044.safetensors",
|
475 |
+
"model.layers.50.self_attn.q_proj.weight": "model-00035-of-00044.safetensors",
|
476 |
+
"model.layers.50.self_attn.v_proj.weight": "model-00035-of-00044.safetensors",
|
477 |
+
"model.layers.51.input_layernorm.weight": "model-00036-of-00044.safetensors",
|
478 |
+
"model.layers.51.mlp.down_proj.weight": "model-00036-of-00044.safetensors",
|
479 |
+
"model.layers.51.mlp.gate_proj.weight": "model-00036-of-00044.safetensors",
|
480 |
+
"model.layers.51.mlp.up_proj.weight": "model-00036-of-00044.safetensors",
|
481 |
+
"model.layers.51.self_attn.k_norm.weight": "model-00035-of-00044.safetensors",
|
482 |
+
"model.layers.51.self_attn.k_proj.weight": "model-00036-of-00044.safetensors",
|
483 |
+
"model.layers.51.self_attn.o_proj.weight": "model-00036-of-00044.safetensors",
|
484 |
+
"model.layers.51.self_attn.q_norm.weight": "model-00035-of-00044.safetensors",
|
485 |
+
"model.layers.51.self_attn.q_proj.weight": "model-00036-of-00044.safetensors",
|
486 |
+
"model.layers.51.self_attn.v_proj.weight": "model-00036-of-00044.safetensors",
|
487 |
+
"model.layers.52.input_layernorm.weight": "model-00037-of-00044.safetensors",
|
488 |
+
"model.layers.52.mlp.down_proj.weight": "model-00037-of-00044.safetensors",
|
489 |
+
"model.layers.52.mlp.gate_proj.weight": "model-00036-of-00044.safetensors",
|
490 |
+
"model.layers.52.mlp.up_proj.weight": "model-00037-of-00044.safetensors",
|
491 |
+
"model.layers.52.self_attn.k_norm.weight": "model-00036-of-00044.safetensors",
|
492 |
+
"model.layers.52.self_attn.k_proj.weight": "model-00036-of-00044.safetensors",
|
493 |
+
"model.layers.52.self_attn.o_proj.weight": "model-00036-of-00044.safetensors",
|
494 |
+
"model.layers.52.self_attn.q_norm.weight": "model-00036-of-00044.safetensors",
|
495 |
+
"model.layers.52.self_attn.q_proj.weight": "model-00036-of-00044.safetensors",
|
496 |
+
"model.layers.52.self_attn.v_proj.weight": "model-00036-of-00044.safetensors",
|
497 |
+
"model.layers.53.input_layernorm.weight": "model-00037-of-00044.safetensors",
|
498 |
+
"model.layers.53.mlp.down_proj.weight": "model-00037-of-00044.safetensors",
|
499 |
+
"model.layers.53.mlp.gate_proj.weight": "model-00037-of-00044.safetensors",
|
500 |
+
"model.layers.53.mlp.up_proj.weight": "model-00037-of-00044.safetensors",
|
501 |
+
"model.layers.53.self_attn.k_norm.weight": "model-00037-of-00044.safetensors",
|
502 |
+
"model.layers.53.self_attn.k_proj.weight": "model-00037-of-00044.safetensors",
|
503 |
+
"model.layers.53.self_attn.o_proj.weight": "model-00037-of-00044.safetensors",
|
504 |
+
"model.layers.53.self_attn.q_norm.weight": "model-00037-of-00044.safetensors",
|
505 |
+
"model.layers.53.self_attn.q_proj.weight": "model-00037-of-00044.safetensors",
|
506 |
+
"model.layers.53.self_attn.v_proj.weight": "model-00037-of-00044.safetensors",
|
507 |
+
"model.layers.54.input_layernorm.weight": "model-00038-of-00044.safetensors",
|
508 |
+
"model.layers.54.mlp.down_proj.weight": "model-00038-of-00044.safetensors",
|
509 |
+
"model.layers.54.mlp.gate_proj.weight": "model-00038-of-00044.safetensors",
|
510 |
+
"model.layers.54.mlp.up_proj.weight": "model-00038-of-00044.safetensors",
|
511 |
+
"model.layers.54.self_attn.k_norm.weight": "model-00037-of-00044.safetensors",
|
512 |
+
"model.layers.54.self_attn.k_proj.weight": "model-00038-of-00044.safetensors",
|
513 |
+
"model.layers.54.self_attn.o_proj.weight": "model-00038-of-00044.safetensors",
|
514 |
+
"model.layers.54.self_attn.q_norm.weight": "model-00037-of-00044.safetensors",
|
515 |
+
"model.layers.54.self_attn.q_proj.weight": "model-00038-of-00044.safetensors",
|
516 |
+
"model.layers.54.self_attn.v_proj.weight": "model-00038-of-00044.safetensors",
|
517 |
+
"model.layers.55.input_layernorm.weight": "model-00039-of-00044.safetensors",
|
518 |
+
"model.layers.55.mlp.down_proj.weight": "model-00039-of-00044.safetensors",
|
519 |
+
"model.layers.55.mlp.gate_proj.weight": "model-00038-of-00044.safetensors",
|
520 |
+
"model.layers.55.mlp.up_proj.weight": "model-00039-of-00044.safetensors",
|
521 |
+
"model.layers.55.self_attn.k_norm.weight": "model-00038-of-00044.safetensors",
|
522 |
+
"model.layers.55.self_attn.k_proj.weight": "model-00038-of-00044.safetensors",
|
523 |
+
"model.layers.55.self_attn.o_proj.weight": "model-00038-of-00044.safetensors",
|
524 |
+
"model.layers.55.self_attn.q_norm.weight": "model-00038-of-00044.safetensors",
|
525 |
+
"model.layers.55.self_attn.q_proj.weight": "model-00038-of-00044.safetensors",
|
526 |
+
"model.layers.55.self_attn.v_proj.weight": "model-00038-of-00044.safetensors",
|
527 |
+
"model.layers.56.input_layernorm.weight": "model-00039-of-00044.safetensors",
|
528 |
+
"model.layers.56.mlp.down_proj.weight": "model-00039-of-00044.safetensors",
|
529 |
+
"model.layers.56.mlp.gate_proj.weight": "model-00039-of-00044.safetensors",
|
530 |
+
"model.layers.56.mlp.up_proj.weight": "model-00039-of-00044.safetensors",
|
531 |
+
"model.layers.56.self_attn.k_norm.weight": "model-00039-of-00044.safetensors",
|
532 |
+
"model.layers.56.self_attn.k_proj.weight": "model-00039-of-00044.safetensors",
|
533 |
+
"model.layers.56.self_attn.o_proj.weight": "model-00039-of-00044.safetensors",
|
534 |
+
"model.layers.56.self_attn.q_norm.weight": "model-00039-of-00044.safetensors",
|
535 |
+
"model.layers.56.self_attn.q_proj.weight": "model-00039-of-00044.safetensors",
|
536 |
+
"model.layers.56.self_attn.v_proj.weight": "model-00039-of-00044.safetensors",
|
537 |
+
"model.layers.57.input_layernorm.weight": "model-00040-of-00044.safetensors",
|
538 |
+
"model.layers.57.mlp.down_proj.weight": "model-00040-of-00044.safetensors",
|
539 |
+
"model.layers.57.mlp.gate_proj.weight": "model-00040-of-00044.safetensors",
|
540 |
+
"model.layers.57.mlp.up_proj.weight": "model-00040-of-00044.safetensors",
|
541 |
+
"model.layers.57.self_attn.k_norm.weight": "model-00039-of-00044.safetensors",
|
542 |
+
"model.layers.57.self_attn.k_proj.weight": "model-00040-of-00044.safetensors",
|
543 |
+
"model.layers.57.self_attn.o_proj.weight": "model-00040-of-00044.safetensors",
|
544 |
+
"model.layers.57.self_attn.q_norm.weight": "model-00039-of-00044.safetensors",
|
545 |
+
"model.layers.57.self_attn.q_proj.weight": "model-00040-of-00044.safetensors",
|
546 |
+
"model.layers.57.self_attn.v_proj.weight": "model-00040-of-00044.safetensors",
|
547 |
+
"model.layers.58.input_layernorm.weight": "model-00041-of-00044.safetensors",
|
548 |
+
"model.layers.58.mlp.down_proj.weight": "model-00041-of-00044.safetensors",
|
549 |
+
"model.layers.58.mlp.gate_proj.weight": "model-00040-of-00044.safetensors",
|
550 |
+
"model.layers.58.mlp.up_proj.weight": "model-00041-of-00044.safetensors",
|
551 |
+
"model.layers.58.self_attn.k_norm.weight": "model-00040-of-00044.safetensors",
|
552 |
+
"model.layers.58.self_attn.k_proj.weight": "model-00040-of-00044.safetensors",
|
553 |
+
"model.layers.58.self_attn.o_proj.weight": "model-00040-of-00044.safetensors",
|
554 |
+
"model.layers.58.self_attn.q_norm.weight": "model-00040-of-00044.safetensors",
|
555 |
+
"model.layers.58.self_attn.q_proj.weight": "model-00040-of-00044.safetensors",
|
556 |
+
"model.layers.58.self_attn.v_proj.weight": "model-00040-of-00044.safetensors",
|
557 |
+
"model.layers.59.input_layernorm.weight": "model-00041-of-00044.safetensors",
|
558 |
+
"model.layers.59.mlp.down_proj.weight": "model-00041-of-00044.safetensors",
|
559 |
+
"model.layers.59.mlp.gate_proj.weight": "model-00041-of-00044.safetensors",
|
560 |
+
"model.layers.59.mlp.up_proj.weight": "model-00041-of-00044.safetensors",
|
561 |
+
"model.layers.59.self_attn.k_norm.weight": "model-00041-of-00044.safetensors",
|
562 |
+
"model.layers.59.self_attn.k_proj.weight": "model-00041-of-00044.safetensors",
|
563 |
+
"model.layers.59.self_attn.o_proj.weight": "model-00041-of-00044.safetensors",
|
564 |
+
"model.layers.59.self_attn.q_norm.weight": "model-00041-of-00044.safetensors",
|
565 |
+
"model.layers.59.self_attn.q_proj.weight": "model-00041-of-00044.safetensors",
|
566 |
+
"model.layers.59.self_attn.v_proj.weight": "model-00041-of-00044.safetensors",
|
567 |
+
"model.layers.6.input_layernorm.weight": "model-00006-of-00044.safetensors",
|
568 |
+
"model.layers.6.mlp.down_proj.weight": "model-00006-of-00044.safetensors",
|
569 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00006-of-00044.safetensors",
|
570 |
+
"model.layers.6.mlp.up_proj.weight": "model-00006-of-00044.safetensors",
|
571 |
+
"model.layers.6.self_attn.k_norm.weight": "model-00005-of-00044.safetensors",
|
572 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00006-of-00044.safetensors",
|
573 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00006-of-00044.safetensors",
|
574 |
+
"model.layers.6.self_attn.q_norm.weight": "model-00005-of-00044.safetensors",
|
575 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00006-of-00044.safetensors",
|
576 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00006-of-00044.safetensors",
|
577 |
+
"model.layers.60.input_layernorm.weight": "model-00042-of-00044.safetensors",
|
578 |
+
"model.layers.60.mlp.down_proj.weight": "model-00042-of-00044.safetensors",
|
579 |
+
"model.layers.60.mlp.gate_proj.weight": "model-00042-of-00044.safetensors",
|
580 |
+
"model.layers.60.mlp.up_proj.weight": "model-00042-of-00044.safetensors",
|
581 |
+
"model.layers.60.self_attn.k_norm.weight": "model-00041-of-00044.safetensors",
|
582 |
+
"model.layers.60.self_attn.k_proj.weight": "model-00042-of-00044.safetensors",
|
583 |
+
"model.layers.60.self_attn.o_proj.weight": "model-00042-of-00044.safetensors",
|
584 |
+
"model.layers.60.self_attn.q_norm.weight": "model-00041-of-00044.safetensors",
|
585 |
+
"model.layers.60.self_attn.q_proj.weight": "model-00042-of-00044.safetensors",
|
586 |
+
"model.layers.60.self_attn.v_proj.weight": "model-00042-of-00044.safetensors",
|
587 |
+
"model.layers.61.input_layernorm.weight": "model-00043-of-00044.safetensors",
|
588 |
+
"model.layers.61.mlp.down_proj.weight": "model-00043-of-00044.safetensors",
|
589 |
+
"model.layers.61.mlp.gate_proj.weight": "model-00042-of-00044.safetensors",
|
590 |
+
"model.layers.61.mlp.up_proj.weight": "model-00043-of-00044.safetensors",
|
591 |
+
"model.layers.61.self_attn.k_norm.weight": "model-00042-of-00044.safetensors",
|
592 |
+
"model.layers.61.self_attn.k_proj.weight": "model-00042-of-00044.safetensors",
|
593 |
+
"model.layers.61.self_attn.o_proj.weight": "model-00042-of-00044.safetensors",
|
594 |
+
"model.layers.61.self_attn.q_norm.weight": "model-00042-of-00044.safetensors",
|
595 |
+
"model.layers.61.self_attn.q_proj.weight": "model-00042-of-00044.safetensors",
|
596 |
+
"model.layers.61.self_attn.v_proj.weight": "model-00042-of-00044.safetensors",
|
597 |
+
"model.layers.62.input_layernorm.weight": "model-00043-of-00044.safetensors",
|
598 |
+
"model.layers.62.mlp.down_proj.weight": "model-00043-of-00044.safetensors",
|
599 |
+
"model.layers.62.mlp.gate_proj.weight": "model-00043-of-00044.safetensors",
|
600 |
+
"model.layers.62.mlp.up_proj.weight": "model-00043-of-00044.safetensors",
|
601 |
+
"model.layers.62.self_attn.k_norm.weight": "model-00043-of-00044.safetensors",
|
602 |
+
"model.layers.62.self_attn.k_proj.weight": "model-00043-of-00044.safetensors",
|
603 |
+
"model.layers.62.self_attn.o_proj.weight": "model-00043-of-00044.safetensors",
|
604 |
+
"model.layers.62.self_attn.q_norm.weight": "model-00043-of-00044.safetensors",
|
605 |
+
"model.layers.62.self_attn.q_proj.weight": "model-00043-of-00044.safetensors",
|
606 |
+
"model.layers.62.self_attn.v_proj.weight": "model-00043-of-00044.safetensors",
|
607 |
+
"model.layers.63.input_layernorm.weight": "model-00044-of-00044.safetensors",
|
608 |
+
"model.layers.63.mlp.down_proj.weight": "model-00044-of-00044.safetensors",
|
609 |
+
"model.layers.63.mlp.gate_proj.weight": "model-00044-of-00044.safetensors",
|
610 |
+
"model.layers.63.mlp.up_proj.weight": "model-00044-of-00044.safetensors",
|
611 |
+
"model.layers.63.self_attn.k_norm.weight": "model-00043-of-00044.safetensors",
|
612 |
+
"model.layers.63.self_attn.k_proj.weight": "model-00044-of-00044.safetensors",
|
613 |
+
"model.layers.63.self_attn.o_proj.weight": "model-00044-of-00044.safetensors",
|
614 |
+
"model.layers.63.self_attn.q_norm.weight": "model-00043-of-00044.safetensors",
|
615 |
+
"model.layers.63.self_attn.q_proj.weight": "model-00044-of-00044.safetensors",
|
616 |
+
"model.layers.63.self_attn.v_proj.weight": "model-00044-of-00044.safetensors",
|
617 |
+
"model.layers.7.input_layernorm.weight": "model-00007-of-00044.safetensors",
|
618 |
+
"model.layers.7.mlp.down_proj.weight": "model-00007-of-00044.safetensors",
|
619 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00006-of-00044.safetensors",
|
620 |
+
"model.layers.7.mlp.up_proj.weight": "model-00007-of-00044.safetensors",
|
621 |
+
"model.layers.7.self_attn.k_norm.weight": "model-00006-of-00044.safetensors",
|
622 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00006-of-00044.safetensors",
|
623 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00006-of-00044.safetensors",
|
624 |
+
"model.layers.7.self_attn.q_norm.weight": "model-00006-of-00044.safetensors",
|
625 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00006-of-00044.safetensors",
|
626 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00006-of-00044.safetensors",
|
627 |
+
"model.layers.8.input_layernorm.weight": "model-00007-of-00044.safetensors",
|
628 |
+
"model.layers.8.mlp.down_proj.weight": "model-00007-of-00044.safetensors",
|
629 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00007-of-00044.safetensors",
|
630 |
+
"model.layers.8.mlp.up_proj.weight": "model-00007-of-00044.safetensors",
|
631 |
+
"model.layers.8.self_attn.k_norm.weight": "model-00007-of-00044.safetensors",
|
632 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00007-of-00044.safetensors",
|
633 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00007-of-00044.safetensors",
|
634 |
+
"model.layers.8.self_attn.q_norm.weight": "model-00007-of-00044.safetensors",
|
635 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00007-of-00044.safetensors",
|
636 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00007-of-00044.safetensors",
|
637 |
+
"model.layers.9.input_layernorm.weight": "model-00008-of-00044.safetensors",
|
638 |
+
"model.layers.9.mlp.down_proj.weight": "model-00008-of-00044.safetensors",
|
639 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00008-of-00044.safetensors",
|
640 |
+
"model.layers.9.mlp.up_proj.weight": "model-00008-of-00044.safetensors",
|
641 |
+
"model.layers.9.self_attn.k_norm.weight": "model-00007-of-00044.safetensors",
|
642 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00008-of-00044.safetensors",
|
643 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00008-of-00044.safetensors",
|
644 |
+
"model.layers.9.self_attn.q_norm.weight": "model-00007-of-00044.safetensors",
|
645 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00008-of-00044.safetensors",
|
646 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00008-of-00044.safetensors",
|
647 |
+
"model.norm.weight": "model-00044-of-00044.safetensors"
|
648 |
+
}
|
649 |
+
}
|
output-00001-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef213bc4cbdc8e8f08b394be8967de3c8d49d7ca41f1d83b64e332ecb7b9e590
|
3 |
+
size 8516577682
|
output-00002-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:435d14f92644bce341a08eebc829010b5f2df0132c72d7f654db6bfb7743e49a
|
3 |
+
size 8534832652
|
output-00003-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ba9bec35d943efa1e178cadf4f16730d8804eb7f125a36594fd1981f4225187f
|
3 |
+
size 8491177090
|
output-00004-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8110704f280646f5a37b671cd0fef6bf5c5a582717d69ef5db25cceb580d3c0f
|
3 |
+
size 8484950312
|
output-00005-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c8a094ae3cf45f2474d64ab5423ee08bc8b60d59c3ee8ac9104cdd073c1cee9
|
3 |
+
size 8580884000
|
output-00006-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15e16def1325e46c1074e12c93c9d938239253a4f96d65f017ed56f8195b346a
|
3 |
+
size 7703357354
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<BOS_TOKEN>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|END_OF_TURN_TOKEN|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<PAD>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c69a7ea6c0927dfac8c349186ebcf0466a4723c21cbdb2e850cf559f0bee92b8
|
3 |
+
size 12777433
|
tokenizer_config.json
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": false,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<PAD>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<UNK>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "<CLS>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
},
|
30 |
+
"3": {
|
31 |
+
"content": "<SEP>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"4": {
|
39 |
+
"content": "<MASK_TOKEN>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": false,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
},
|
46 |
+
"5": {
|
47 |
+
"content": "<BOS_TOKEN>",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": false,
|
50 |
+
"rstrip": false,
|
51 |
+
"single_word": false,
|
52 |
+
"special": true
|
53 |
+
},
|
54 |
+
"6": {
|
55 |
+
"content": "<EOS_TOKEN>",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": false,
|
58 |
+
"rstrip": false,
|
59 |
+
"single_word": false,
|
60 |
+
"special": true
|
61 |
+
},
|
62 |
+
"7": {
|
63 |
+
"content": "<EOP_TOKEN>",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": false,
|
66 |
+
"rstrip": false,
|
67 |
+
"single_word": false,
|
68 |
+
"special": true
|
69 |
+
},
|
70 |
+
"255000": {
|
71 |
+
"content": "<|START_OF_TURN_TOKEN|>",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": false,
|
74 |
+
"rstrip": false,
|
75 |
+
"single_word": false,
|
76 |
+
"special": false
|
77 |
+
},
|
78 |
+
"255001": {
|
79 |
+
"content": "<|END_OF_TURN_TOKEN|>",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": false,
|
82 |
+
"rstrip": false,
|
83 |
+
"single_word": false,
|
84 |
+
"special": true
|
85 |
+
},
|
86 |
+
"255002": {
|
87 |
+
"content": "<|YES_TOKEN|>",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": false,
|
90 |
+
"rstrip": false,
|
91 |
+
"single_word": false,
|
92 |
+
"special": false
|
93 |
+
},
|
94 |
+
"255003": {
|
95 |
+
"content": "<|NO_TOKEN|>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": false,
|
99 |
+
"single_word": false,
|
100 |
+
"special": false
|
101 |
+
},
|
102 |
+
"255004": {
|
103 |
+
"content": "<|GOOD_TOKEN|>",
|
104 |
+
"lstrip": false,
|
105 |
+
"normalized": false,
|
106 |
+
"rstrip": false,
|
107 |
+
"single_word": false,
|
108 |
+
"special": false
|
109 |
+
},
|
110 |
+
"255005": {
|
111 |
+
"content": "<|BAD_TOKEN|>",
|
112 |
+
"lstrip": false,
|
113 |
+
"normalized": false,
|
114 |
+
"rstrip": false,
|
115 |
+
"single_word": false,
|
116 |
+
"special": false
|
117 |
+
},
|
118 |
+
"255006": {
|
119 |
+
"content": "<|USER_TOKEN|>",
|
120 |
+
"lstrip": false,
|
121 |
+
"normalized": false,
|
122 |
+
"rstrip": false,
|
123 |
+
"single_word": false,
|
124 |
+
"special": false
|
125 |
+
},
|
126 |
+
"255007": {
|
127 |
+
"content": "<|CHATBOT_TOKEN|>",
|
128 |
+
"lstrip": false,
|
129 |
+
"normalized": false,
|
130 |
+
"rstrip": false,
|
131 |
+
"single_word": false,
|
132 |
+
"special": false
|
133 |
+
},
|
134 |
+
"255008": {
|
135 |
+
"content": "<|SYSTEM_TOKEN|>",
|
136 |
+
"lstrip": false,
|
137 |
+
"normalized": false,
|
138 |
+
"rstrip": false,
|
139 |
+
"single_word": false,
|
140 |
+
"special": false
|
141 |
+
},
|
142 |
+
"255009": {
|
143 |
+
"content": "<|USER_0_TOKEN|>",
|
144 |
+
"lstrip": false,
|
145 |
+
"normalized": false,
|
146 |
+
"rstrip": false,
|
147 |
+
"single_word": false,
|
148 |
+
"special": false
|
149 |
+
},
|
150 |
+
"255010": {
|
151 |
+
"content": "<|USER_1_TOKEN|>",
|
152 |
+
"lstrip": false,
|
153 |
+
"normalized": false,
|
154 |
+
"rstrip": false,
|
155 |
+
"single_word": false,
|
156 |
+
"special": false
|
157 |
+
},
|
158 |
+
"255011": {
|
159 |
+
"content": "<|USER_2_TOKEN|>",
|
160 |
+
"lstrip": false,
|
161 |
+
"normalized": false,
|
162 |
+
"rstrip": false,
|
163 |
+
"single_word": false,
|
164 |
+
"special": false
|
165 |
+
},
|
166 |
+
"255012": {
|
167 |
+
"content": "<|USER_3_TOKEN|>",
|
168 |
+
"lstrip": false,
|
169 |
+
"normalized": false,
|
170 |
+
"rstrip": false,
|
171 |
+
"single_word": false,
|
172 |
+
"special": false
|
173 |
+
},
|
174 |
+
"255013": {
|
175 |
+
"content": "<|USER_4_TOKEN|>",
|
176 |
+
"lstrip": false,
|
177 |
+
"normalized": false,
|
178 |
+
"rstrip": false,
|
179 |
+
"single_word": false,
|
180 |
+
"special": false
|
181 |
+
},
|
182 |
+
"255014": {
|
183 |
+
"content": "<|USER_5_TOKEN|>",
|
184 |
+
"lstrip": false,
|
185 |
+
"normalized": false,
|
186 |
+
"rstrip": false,
|
187 |
+
"single_word": false,
|
188 |
+
"special": false
|
189 |
+
},
|
190 |
+
"255015": {
|
191 |
+
"content": "<|USER_6_TOKEN|>",
|
192 |
+
"lstrip": false,
|
193 |
+
"normalized": false,
|
194 |
+
"rstrip": false,
|
195 |
+
"single_word": false,
|
196 |
+
"special": false
|
197 |
+
},
|
198 |
+
"255016": {
|
199 |
+
"content": "<|USER_7_TOKEN|>",
|
200 |
+
"lstrip": false,
|
201 |
+
"normalized": false,
|
202 |
+
"rstrip": false,
|
203 |
+
"single_word": false,
|
204 |
+
"special": false
|
205 |
+
},
|
206 |
+
"255017": {
|
207 |
+
"content": "<|USER_8_TOKEN|>",
|
208 |
+
"lstrip": false,
|
209 |
+
"normalized": false,
|
210 |
+
"rstrip": false,
|
211 |
+
"single_word": false,
|
212 |
+
"special": false
|
213 |
+
},
|
214 |
+
"255018": {
|
215 |
+
"content": "<|USER_9_TOKEN|>",
|
216 |
+
"lstrip": false,
|
217 |
+
"normalized": false,
|
218 |
+
"rstrip": false,
|
219 |
+
"single_word": false,
|
220 |
+
"special": false
|
221 |
+
},
|
222 |
+
"255019": {
|
223 |
+
"content": "<|EXTRA_0_TOKEN|>",
|
224 |
+
"lstrip": false,
|
225 |
+
"normalized": false,
|
226 |
+
"rstrip": false,
|
227 |
+
"single_word": false,
|
228 |
+
"special": false
|
229 |
+
},
|
230 |
+
"255020": {
|
231 |
+
"content": "<|EXTRA_1_TOKEN|>",
|
232 |
+
"lstrip": false,
|
233 |
+
"normalized": false,
|
234 |
+
"rstrip": false,
|
235 |
+
"single_word": false,
|
236 |
+
"special": false
|
237 |
+
},
|
238 |
+
"255021": {
|
239 |
+
"content": "<|EXTRA_2_TOKEN|>",
|
240 |
+
"lstrip": false,
|
241 |
+
"normalized": false,
|
242 |
+
"rstrip": false,
|
243 |
+
"single_word": false,
|
244 |
+
"special": false
|
245 |
+
},
|
246 |
+
"255022": {
|
247 |
+
"content": "<|EXTRA_3_TOKEN|>",
|
248 |
+
"lstrip": false,
|
249 |
+
"normalized": false,
|
250 |
+
"rstrip": false,
|
251 |
+
"single_word": false,
|
252 |
+
"special": false
|
253 |
+
},
|
254 |
+
"255023": {
|
255 |
+
"content": "<|EXTRA_4_TOKEN|>",
|
256 |
+
"lstrip": false,
|
257 |
+
"normalized": false,
|
258 |
+
"rstrip": false,
|
259 |
+
"single_word": false,
|
260 |
+
"special": false
|
261 |
+
},
|
262 |
+
"255024": {
|
263 |
+
"content": "<|EXTRA_5_TOKEN|>",
|
264 |
+
"lstrip": false,
|
265 |
+
"normalized": false,
|
266 |
+
"rstrip": false,
|
267 |
+
"single_word": false,
|
268 |
+
"special": false
|
269 |
+
},
|
270 |
+
"255025": {
|
271 |
+
"content": "<|EXTRA_6_TOKEN|>",
|
272 |
+
"lstrip": false,
|
273 |
+
"normalized": false,
|
274 |
+
"rstrip": false,
|
275 |
+
"single_word": false,
|
276 |
+
"special": false
|
277 |
+
},
|
278 |
+
"255026": {
|
279 |
+
"content": "<|EXTRA_7_TOKEN|>",
|
280 |
+
"lstrip": false,
|
281 |
+
"normalized": false,
|
282 |
+
"rstrip": false,
|
283 |
+
"single_word": false,
|
284 |
+
"special": false
|
285 |
+
},
|
286 |
+
"255027": {
|
287 |
+
"content": "<|EXTRA_8_TOKEN|>",
|
288 |
+
"lstrip": false,
|
289 |
+
"normalized": false,
|
290 |
+
"rstrip": false,
|
291 |
+
"single_word": false,
|
292 |
+
"special": false
|
293 |
+
},
|
294 |
+
"255028": {
|
295 |
+
"content": "<|EXTRA_9_TOKEN|>",
|
296 |
+
"lstrip": false,
|
297 |
+
"normalized": false,
|
298 |
+
"rstrip": false,
|
299 |
+
"single_word": false,
|
300 |
+
"special": false
|
301 |
+
}
|
302 |
+
},
|
303 |
+
"bos_token": "<BOS_TOKEN>",
|
304 |
+
"chat_template": [
|
305 |
+
{
|
306 |
+
"name": "default",
|
307 |
+
"template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true %}{% set loop_messages = messages %}{% set system_message = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}"
|
308 |
+
},
|
309 |
+
{
|
310 |
+
"name": "tool_use",
|
311 |
+
"template": "\n{%- macro json_to_python_type(json_spec) %}\n{%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n\n{%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n{%- elif json_spec.type == \"array\" %}\n {{- \"List[\" + json_to_python_type(json_spec.items) + \"]\"}}\n{%- elif json_spec.type == \"object\" %}\n {{- \"Dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']'}}\n{%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }} \n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n{%- else %}\n {{- \"Any\" }}\n{%- endif %}\n{%- endmacro %}\n\n{%- macro old_tool_parser(tools) %}\n{%- for tool in tools %}\n {%- if loop.index0 != 0 %}\n {{- '\\n\\n' }}\n {%- endif %}\n {{- '```python\\ndef ' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameter_definitions|items %}\n {%- if loop.index0 != 0 %}\n {{- ', '}}\n {%- endif %}\n {{- param_name + ': ' }}\n {%- if not param_fields.required %}\n {{- 'Optional[' + param_fields.type + '] = None'}}\n {%- else %}\n {{- param_fields.type }}\n {%- endif %}\n {%- endfor %}\n {{- ') -> List[Dict]:\\n \"\"\"'}}\n {{- tool.description }}\n {%- if tool.parameter_definitions|length != 0 %}\n {{- '\\n\\n Args:\\n '}}\n {%- for param_name, param_fields in tool.parameter_definitions|items %}\n {%- if loop.index0 != 0 %}\n {{- '\\n ' }}\n {%- endif %}\n {{- param_name + ' ('}}\n {%- if not param_fields.required %}\n {{- 'Optional[' + param_fields.type + ']'}}\n {%- else %}\n {{- param_fields.type }}\n {%- endif %}\n {{- '): ' + param_fields.description }}\n {%- endfor %}\n {%- endif %}\n {{- '\\n \"\"\"\\n pass\\n```' }}\n{%- endfor %}\n{%- endmacro %}\n\n{%- macro new_tool_parser(tools) %}\n{%- for tool in tools %}\n {%- if loop.index0 != 0 %}\n {{- '\\n\\n'}}\n {%- endif %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{-'```python\ndef ' + tool.name + '('}}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.index0 != 0 %}\n {{- ', '}}\n {%- endif %}\n {{-param_name + \": \"}} \n {%- if not param_name in tool.parameters.required %}\n {{-'Optional[' + json_to_python_type(param_fields) + '] = None'}}\n {%- else %}\n {{- json_to_python_type(param_fields) }}\n {%- endif %}\n {%- endfor %}\n {{- ') -> List[Dict]:\n \"\"\"'}}\n {{- tool.description }}\n {%- if tool.parameters.properties|length != 0 %}\n {{- '\\n\\n Args:\\n '}}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.index0 != 0 %}\n {{- '\\n ' }}\n {%- endif %}\n {{- param_name + ' ('}}\n {%- if not param_name in tool.parameters.required %}\n {{-'Optional[' + json_to_python_type(param_fields) + ']'}}\n {%- else %}\n {{- json_to_python_type(param_fields) }}\n {%- endif %}\n {{- '): ' + param_fields.description }}\n {%- endfor %}\n {%- endif %}\n {{- '\\n \"\"\"\\n pass\\n```' }}\n{%- endfor %}\n{%- endmacro %}\n\n{{- bos_token }}\n{%- if messages[0]['role'] == 'system' %}\n {%- set loop_messages = messages[1:] %}\n {%- set system_message = messages[0]['content'] %}\n{%- else %}\n {%- set loop_messages = messages %}\n {%- set system_message = '## Task and Context\\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user\\'s needs as best you can, which will be wide-ranging.\\n\\n## Style Guide\\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.' %}\n{%- endif %}\n{{- '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}\n{{- '# Safety Preamble' }}\n{{- '\nThe instructions in this section override those in the task description and style guide sections. Don\\'t answer questions that are harmful or immoral.' }}\n{{- '\n\n# System Preamble' }}\n{{- '\n## Basic Rules' }}\n{{- '\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\\'s requests, you cite your sources in your answers, according to those instructions.' }}\n{{- '\n\n# User Preamble' }}\n{{- '\n' + system_message }}\n{{-'\n\n## Available Tools\nHere is a list of tools that you have available to you:\n\n'}}\n{%- set ns = namespace(new_tools=true) %}\n{%- for tool in tools %}\n {%- if tool.parameter_definitions is defined %}\n {%- set ns.new_tools = false %}\n {%- endif %}\n{%- endfor %}\n{%- if ns.new_tools %}\n {{- new_tool_parser(tools) }}\n{%- else %}\n {{- old_tool_parser(tools) }}\n{%- endif %}\n{{- '<|END_OF_TURN_TOKEN|>'}}\n{%- for message in loop_messages %}\n {%- set content = message['content'] %}\n {%- if message.role == 'user' %}\n {{- '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content|trim + '<|END_OF_TURN_TOKEN|>' }}\n {%- elif message.role == 'system' %}\n {{- '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content|trim + '<|END_OF_TURN_TOKEN|>' }}\n {%- elif message.role == 'assistant' and message.tool_calls is defined %}\n {{- '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}\n {%- if message.content is defined %}\n {{- message.content|trim }}\n {%- endif %}\n {{- '\\nAction:\\n```json\\n[\\n' }}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{\\n'|indent(4, first=true) }}\n {{- '\"tool_name\": \"'|indent(8, first=true) + tool_call.name + '\",\\n' }}\n {{- '\"parameters\": '|indent(8, first=true) }}\n {%- if tool_call.arguments is defined and tool_call.arguments|length > 0 %} \n {{- tool_call.arguments|tojson(indent=4)|indent(8) }}\n {{- '\\n' }}\n {%- else %}\n {{- '{}\\n' }}\n {%- endif %}\n {{- '}'|indent(4, first=true) }}\n {%- if not loop.last %}\n {{- ',\\n' }}\n {%- endif %}\n {%- endfor %}\n {{- \"\\n]```\\n\" }}\n {%- elif message.role == 'assistant' %}\n {{- '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content|trim + '<|END_OF_TURN_TOKEN|>' }}\n {%- elif message.role == 'tool' %}\n {{- '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>\\n' }}\n {{- message.content|trim }}\n {{- '</results><|END_OF_TURN_TOKEN|>' }}\n {%- endif %}\n{%- endfor %}\n{{-'<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write \\'Action:\\' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user\\'s last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:\n```json\n[\n {\n \"tool_name\": title of the tool in the specification,\n \"parameters\": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters\n }\n]```<|END_OF_TURN_TOKEN|>'}}\n{%- if add_generation_prompt %}\n {{- '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}\n{%- endif %}\n"
|
312 |
+
},
|
313 |
+
{
|
314 |
+
"name": "rag",
|
315 |
+
"template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = '## Task and Context\\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user\\'s needs as best you can, which will be wide-ranging.\\n\\n## Style Guide\\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.' %}{% endif %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}{{ '# Safety Preamble' }}{{ '\nThe instructions in this section override those in the task description and style guide sections. Don\\'t answer questions that are harmful or immoral.' }}{{ '\n\n# System Preamble' }}{{ '\n## Basic Rules' }}{{ '\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\\'s requests, you cite your sources in your answers, according to those instructions.' }}{{ '\n\n# User Preamble' }}{{ '\n' + system_message }}{{ '<|END_OF_TURN_TOKEN|>'}}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'system' %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>'}}{{ '<results>' }}{% for document in documents %}{{ '\nDocument: ' }}{{ loop.index0 }}\n{% for key, value in document.items() %}{{ key }}: {{value}}\n{% endfor %}{% endfor %}{{ '</results>'}}{{ '<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}{{ 'Carefully perform the following instructions, in order, starting each with a new line.\n' }}{{ 'Firstly, Decide which of the retrieved documents are relevant to the user\\'s last input by writing \\'Relevant Documents:\\' followed by comma-separated list of document numbers. If none are relevant, you should instead write \\'None\\'.\n' }}{{ 'Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user\\'s last input by writing \\'Cited Documents:\\' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write \\'None\\'.\n' }}{% if citation_mode=='accurate' %}{{ 'Thirdly, Write \\'Answer:\\' followed by a response to the user\\'s last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup.\n' }}{% endif %}{{ 'Finally, Write \\'Grounded answer:\\' followed by a response to the user\\'s last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.' }}{{ '<|END_OF_TURN_TOKEN|>' }}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}"
|
316 |
+
}
|
317 |
+
],
|
318 |
+
"clean_up_tokenization_spaces": false,
|
319 |
+
"eos_token": "<|END_OF_TURN_TOKEN|>",
|
320 |
+
"legacy": true,
|
321 |
+
"merges_file": null,
|
322 |
+
"model_max_length": 1000000000000000019884624838656,
|
323 |
+
"pad_token": "<PAD>",
|
324 |
+
"sp_model_kwargs": {},
|
325 |
+
"spaces_between_special_tokens": false,
|
326 |
+
"tokenizer_class": "CohereTokenizer",
|
327 |
+
"unk_token": null,
|
328 |
+
"use_default_system_prompt": false,
|
329 |
+
"vocab_file": null
|
330 |
+
}
|
upload.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import HfApi
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
# Define the parameters for uploading
|
5 |
+
repo_id = "DBMe/command-r-plus-3.2bpw-h6-exl2" # Replace with your actual repo ID
|
6 |
+
folder_path = "/home/asusws-x570-ace/programs/tabbyAPI-new/models/command-r-plus-3.2bpw-h6-exl2/" # Replace with your folder path
|
7 |
+
repo_type = "model" # Change to "model" or "space" if applicable
|
8 |
+
revision = "main" # Optional: specify the branch or use "main"
|
9 |
+
private = False # Set to True if the repository should be private
|
10 |
+
allow_patterns = None # Optional: specify patterns of files to include
|
11 |
+
ignore_patterns = None # Optional: specify patterns of files to exclude
|
12 |
+
num_workers = 4 # Set based on your system; lower if your internet is unstable
|
13 |
+
print_report = True # Enable progress reporting
|
14 |
+
print_report_every = 60 # Report frequency in seconds
|
15 |
+
|
16 |
+
# Initialize the Hugging Face API client
|
17 |
+
api = HfApi()
|
18 |
+
|
19 |
+
# Function to upload the folder in a resumable manner
|
20 |
+
def upload_resumable():
|
21 |
+
try:
|
22 |
+
print("Starting upload process...")
|
23 |
+
|
24 |
+
# Perform the upload with the provided parameters
|
25 |
+
api.upload_large_folder(
|
26 |
+
repo_id=repo_id,
|
27 |
+
folder_path=Path(folder_path),
|
28 |
+
repo_type=repo_type,
|
29 |
+
revision=revision,
|
30 |
+
private=private,
|
31 |
+
allow_patterns=allow_patterns,
|
32 |
+
ignore_patterns=ignore_patterns,
|
33 |
+
num_workers=num_workers,
|
34 |
+
print_report=print_report,
|
35 |
+
print_report_every=print_report_every,
|
36 |
+
)
|
37 |
+
|
38 |
+
print("Upload completed successfully!")
|
39 |
+
|
40 |
+
except Exception as e:
|
41 |
+
print(f"Upload interrupted due to error: {e}")
|
42 |
+
print("You can resume the upload by running the script again.")
|
43 |
+
|
44 |
+
# Call the function to start the upload
|
45 |
+
upload_resumable()
|