Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- added_tokens.json +0 -1
- config.json +3 -3
- generation_config.json +2 -2
- model-00001-of-00002.safetensors +2 -2
- model-00002-of-00002.safetensors +2 -2
- model.safetensors.index.json +82 -82
- special_tokens_map.json +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +3 -10
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
{
|
2 |
"</tool_call>": 151658,
|
3 |
"<tool_call>": 151657,
|
4 |
-
"<|PAD_TOKEN|>": 151665,
|
5 |
"<|box_end|>": 151649,
|
6 |
"<|box_start|>": 151648,
|
7 |
"<|endoftext|>": 151643,
|
|
|
1 |
{
|
2 |
"</tool_call>": 151658,
|
3 |
"<tool_call>": 151657,
|
|
|
4 |
"<|box_end|>": 151649,
|
5 |
"<|box_start|>": 151648,
|
6 |
"<|endoftext|>": 151643,
|
config.json
CHANGED
@@ -4,7 +4,6 @@
|
|
4 |
"Qwen2ForCausalLM"
|
5 |
],
|
6 |
"attention_dropout": 0.0,
|
7 |
-
"bos_token_id": 151643,
|
8 |
"eos_token_id": 151645,
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 2048,
|
@@ -16,13 +15,14 @@
|
|
16 |
"num_attention_heads": 16,
|
17 |
"num_hidden_layers": 36,
|
18 |
"num_key_value_heads": 2,
|
19 |
-
"pad_token_id":
|
20 |
"rms_norm_eps": 1e-06,
|
|
|
21 |
"rope_theta": 1000000.0,
|
22 |
"sliding_window": null,
|
23 |
"tie_word_embeddings": true,
|
24 |
"torch_dtype": "bfloat16",
|
25 |
-
"transformers_version": "4.
|
26 |
"unsloth_fixed": true,
|
27 |
"use_cache": true,
|
28 |
"use_sliding_window": false,
|
|
|
4 |
"Qwen2ForCausalLM"
|
5 |
],
|
6 |
"attention_dropout": 0.0,
|
|
|
7 |
"eos_token_id": 151645,
|
8 |
"hidden_act": "silu",
|
9 |
"hidden_size": 2048,
|
|
|
15 |
"num_attention_heads": 16,
|
16 |
"num_hidden_layers": 36,
|
17 |
"num_key_value_heads": 2,
|
18 |
+
"pad_token_id": 151654,
|
19 |
"rms_norm_eps": 1e-06,
|
20 |
+
"rope_scaling": null,
|
21 |
"rope_theta": 1000000.0,
|
22 |
"sliding_window": null,
|
23 |
"tie_word_embeddings": true,
|
24 |
"torch_dtype": "bfloat16",
|
25 |
+
"transformers_version": "4.49.0.dev0",
|
26 |
"unsloth_fixed": true,
|
27 |
"use_cache": true,
|
28 |
"use_sliding_window": false,
|
generation_config.json
CHANGED
@@ -6,10 +6,10 @@
|
|
6 |
151643
|
7 |
],
|
8 |
"max_length": 32768,
|
9 |
-
"pad_token_id":
|
10 |
"repetition_penalty": 1.05,
|
11 |
"temperature": 0.7,
|
12 |
"top_k": 20,
|
13 |
"top_p": 0.8,
|
14 |
-
"transformers_version": "4.
|
15 |
}
|
|
|
6 |
151643
|
7 |
],
|
8 |
"max_length": 32768,
|
9 |
+
"pad_token_id": 151654,
|
10 |
"repetition_penalty": 1.05,
|
11 |
"temperature": 0.7,
|
12 |
"top_k": 20,
|
13 |
"top_p": 0.8,
|
14 |
+
"transformers_version": "4.49.0.dev0"
|
15 |
}
|
model-00001-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1c309ac1c08153adb2846748be27a184646297975607ac809a5bff7aa36e2822
|
3 |
+
size 4957560304
|
model-00002-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a22e9460cf702833fbe02be74b40815d19f1347b5093c67fac48e87ecbbe02d7
|
3 |
+
size 1214366696
|
model.safetensors.index.json
CHANGED
@@ -172,11 +172,11 @@
|
|
172 |
"model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
173 |
"model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
174 |
"model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
175 |
-
"model.layers.21.input_layernorm.weight": "model-
|
176 |
-
"model.layers.21.mlp.down_proj.weight": "model-
|
177 |
"model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
178 |
"model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
179 |
-
"model.layers.21.post_attention_layernorm.weight": "model-
|
180 |
"model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
181 |
"model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
182 |
"model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
@@ -184,90 +184,90 @@
|
|
184 |
"model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
185 |
"model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
186 |
"model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
187 |
-
"model.layers.22.input_layernorm.weight": "model-
|
188 |
-
"model.layers.22.mlp.down_proj.weight": "model-
|
189 |
-
"model.layers.22.mlp.gate_proj.weight": "model-
|
190 |
-
"model.layers.22.mlp.up_proj.weight": "model-
|
191 |
-
"model.layers.22.post_attention_layernorm.weight": "model-
|
192 |
-
"model.layers.22.self_attn.k_proj.bias": "model-
|
193 |
-
"model.layers.22.self_attn.k_proj.weight": "model-
|
194 |
-
"model.layers.22.self_attn.o_proj.weight": "model-
|
195 |
-
"model.layers.22.self_attn.q_proj.bias": "model-
|
196 |
-
"model.layers.22.self_attn.q_proj.weight": "model-
|
197 |
-
"model.layers.22.self_attn.v_proj.bias": "model-
|
198 |
-
"model.layers.22.self_attn.v_proj.weight": "model-
|
199 |
-
"model.layers.23.input_layernorm.weight": "model-
|
200 |
-
"model.layers.23.mlp.down_proj.weight": "model-
|
201 |
-
"model.layers.23.mlp.gate_proj.weight": "model-
|
202 |
-
"model.layers.23.mlp.up_proj.weight": "model-
|
203 |
-
"model.layers.23.post_attention_layernorm.weight": "model-
|
204 |
-
"model.layers.23.self_attn.k_proj.bias": "model-
|
205 |
-
"model.layers.23.self_attn.k_proj.weight": "model-
|
206 |
-
"model.layers.23.self_attn.o_proj.weight": "model-
|
207 |
-
"model.layers.23.self_attn.q_proj.bias": "model-
|
208 |
-
"model.layers.23.self_attn.q_proj.weight": "model-
|
209 |
-
"model.layers.23.self_attn.v_proj.bias": "model-
|
210 |
-
"model.layers.23.self_attn.v_proj.weight": "model-
|
211 |
-
"model.layers.24.input_layernorm.weight": "model-
|
212 |
-
"model.layers.24.mlp.down_proj.weight": "model-
|
213 |
-
"model.layers.24.mlp.gate_proj.weight": "model-
|
214 |
-
"model.layers.24.mlp.up_proj.weight": "model-
|
215 |
-
"model.layers.24.post_attention_layernorm.weight": "model-
|
216 |
-
"model.layers.24.self_attn.k_proj.bias": "model-
|
217 |
-
"model.layers.24.self_attn.k_proj.weight": "model-
|
218 |
-
"model.layers.24.self_attn.o_proj.weight": "model-
|
219 |
-
"model.layers.24.self_attn.q_proj.bias": "model-
|
220 |
-
"model.layers.24.self_attn.q_proj.weight": "model-
|
221 |
-
"model.layers.24.self_attn.v_proj.bias": "model-
|
222 |
-
"model.layers.24.self_attn.v_proj.weight": "model-
|
223 |
-
"model.layers.25.input_layernorm.weight": "model-
|
224 |
-
"model.layers.25.mlp.down_proj.weight": "model-
|
225 |
-
"model.layers.25.mlp.gate_proj.weight": "model-
|
226 |
-
"model.layers.25.mlp.up_proj.weight": "model-
|
227 |
-
"model.layers.25.post_attention_layernorm.weight": "model-
|
228 |
-
"model.layers.25.self_attn.k_proj.bias": "model-
|
229 |
-
"model.layers.25.self_attn.k_proj.weight": "model-
|
230 |
-
"model.layers.25.self_attn.o_proj.weight": "model-
|
231 |
-
"model.layers.25.self_attn.q_proj.bias": "model-
|
232 |
-
"model.layers.25.self_attn.q_proj.weight": "model-
|
233 |
-
"model.layers.25.self_attn.v_proj.bias": "model-
|
234 |
-
"model.layers.25.self_attn.v_proj.weight": "model-
|
235 |
-
"model.layers.26.input_layernorm.weight": "model-
|
236 |
-
"model.layers.26.mlp.down_proj.weight": "model-
|
237 |
-
"model.layers.26.mlp.gate_proj.weight": "model-
|
238 |
-
"model.layers.26.mlp.up_proj.weight": "model-
|
239 |
-
"model.layers.26.post_attention_layernorm.weight": "model-
|
240 |
-
"model.layers.26.self_attn.k_proj.bias": "model-
|
241 |
-
"model.layers.26.self_attn.k_proj.weight": "model-
|
242 |
-
"model.layers.26.self_attn.o_proj.weight": "model-
|
243 |
-
"model.layers.26.self_attn.q_proj.bias": "model-
|
244 |
-
"model.layers.26.self_attn.q_proj.weight": "model-
|
245 |
-
"model.layers.26.self_attn.v_proj.bias": "model-
|
246 |
-
"model.layers.26.self_attn.v_proj.weight": "model-
|
247 |
-
"model.layers.27.input_layernorm.weight": "model-
|
248 |
-
"model.layers.27.mlp.down_proj.weight": "model-
|
249 |
-
"model.layers.27.mlp.gate_proj.weight": "model-
|
250 |
-
"model.layers.27.mlp.up_proj.weight": "model-
|
251 |
-
"model.layers.27.post_attention_layernorm.weight": "model-
|
252 |
-
"model.layers.27.self_attn.k_proj.bias": "model-
|
253 |
-
"model.layers.27.self_attn.k_proj.weight": "model-
|
254 |
-
"model.layers.27.self_attn.o_proj.weight": "model-
|
255 |
-
"model.layers.27.self_attn.q_proj.bias": "model-
|
256 |
-
"model.layers.27.self_attn.q_proj.weight": "model-
|
257 |
-
"model.layers.27.self_attn.v_proj.bias": "model-
|
258 |
-
"model.layers.27.self_attn.v_proj.weight": "model-
|
259 |
"model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
260 |
"model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
261 |
"model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
262 |
"model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
263 |
"model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
264 |
-
"model.layers.28.self_attn.k_proj.bias": "model-
|
265 |
-
"model.layers.28.self_attn.k_proj.weight": "model-
|
266 |
-
"model.layers.28.self_attn.o_proj.weight": "model-
|
267 |
-
"model.layers.28.self_attn.q_proj.bias": "model-
|
268 |
-
"model.layers.28.self_attn.q_proj.weight": "model-
|
269 |
-
"model.layers.28.self_attn.v_proj.bias": "model-
|
270 |
-
"model.layers.28.self_attn.v_proj.weight": "model-
|
271 |
"model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
272 |
"model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
273 |
"model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
|
|
172 |
"model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
173 |
"model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
174 |
"model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
175 |
+
"model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
176 |
+
"model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
177 |
"model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
178 |
"model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
179 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
180 |
"model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
181 |
"model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
182 |
"model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
|
|
184 |
"model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
185 |
"model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
186 |
"model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
187 |
+
"model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
188 |
+
"model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
189 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
190 |
+
"model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
191 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
192 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
193 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
194 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
195 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
196 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
197 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
198 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
199 |
+
"model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
200 |
+
"model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
201 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
202 |
+
"model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
203 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
204 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
205 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
206 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
207 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
208 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
209 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
210 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
211 |
+
"model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
212 |
+
"model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
213 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
214 |
+
"model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
215 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
216 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
217 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
218 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
219 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
220 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
221 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
222 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
223 |
+
"model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
224 |
+
"model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
225 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
226 |
+
"model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
227 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
228 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
229 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
230 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
231 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
232 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
233 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
234 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
235 |
+
"model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
236 |
+
"model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
237 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
238 |
+
"model.layers.26.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
239 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
240 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
241 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
242 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
243 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
244 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
245 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
246 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
247 |
+
"model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
248 |
+
"model.layers.27.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
249 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
250 |
+
"model.layers.27.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
251 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
252 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
253 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
254 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
255 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
256 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
257 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
258 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
259 |
"model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
260 |
"model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
261 |
"model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
262 |
"model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
263 |
"model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
264 |
+
"model.layers.28.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
265 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
266 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
267 |
+
"model.layers.28.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
268 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
269 |
+
"model.layers.28.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
270 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
271 |
"model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
272 |
"model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
273 |
"model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
special_tokens_map.json
CHANGED
@@ -22,7 +22,7 @@
|
|
22 |
"single_word": false
|
23 |
},
|
24 |
"pad_token": {
|
25 |
-
"content": "<|
|
26 |
"lstrip": false,
|
27 |
"normalized": false,
|
28 |
"rstrip": false,
|
|
|
22 |
"single_word": false
|
23 |
},
|
24 |
"pad_token": {
|
25 |
+
"content": "<|vision_pad|>",
|
26 |
"lstrip": false,
|
27 |
"normalized": false,
|
28 |
"rstrip": false,
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -177,14 +177,6 @@
|
|
177 |
"rstrip": false,
|
178 |
"single_word": false,
|
179 |
"special": false
|
180 |
-
},
|
181 |
-
"151665": {
|
182 |
-
"content": "<|PAD_TOKEN|>",
|
183 |
-
"lstrip": false,
|
184 |
-
"normalized": false,
|
185 |
-
"rstrip": false,
|
186 |
-
"single_word": false,
|
187 |
-
"special": true
|
188 |
}
|
189 |
},
|
190 |
"additional_special_tokens": [
|
@@ -207,8 +199,9 @@
|
|
207 |
"clean_up_tokenization_spaces": false,
|
208 |
"eos_token": "<|im_end|>",
|
209 |
"errors": "replace",
|
210 |
-
"
|
211 |
-
"
|
|
|
212 |
"padding_side": "left",
|
213 |
"split_special_tokens": false,
|
214 |
"tokenizer_class": "Qwen2Tokenizer",
|
|
|
177 |
"rstrip": false,
|
178 |
"single_word": false,
|
179 |
"special": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
}
|
181 |
},
|
182 |
"additional_special_tokens": [
|
|
|
199 |
"clean_up_tokenization_spaces": false,
|
200 |
"eos_token": "<|im_end|>",
|
201 |
"errors": "replace",
|
202 |
+
"extra_special_tokens": {},
|
203 |
+
"model_max_length": 32768,
|
204 |
+
"pad_token": "<|vision_pad|>",
|
205 |
"padding_side": "left",
|
206 |
"split_special_tokens": false,
|
207 |
"tokenizer_class": "Qwen2Tokenizer",
|