Daniel Steinigen
commited on
Commit
·
11ff4bc
1
Parent(s):
548ab9a
update safetensors
Browse files- README.md +1 -1
- config.json +1 -1
- generation_config.json +1 -1
- gptx_tokenizer.py +1 -1
- model-00001-of-00004.safetensors +1 -1
- model-00002-of-00004.safetensors +1 -1
- model-00003-of-00004.safetensors +1 -1
- model-00004-of-00004.safetensors +1 -1
README.md
CHANGED
@@ -91,7 +91,7 @@ system_messages={
|
|
91 |
" Der Assistent gibt hilfreiche und höfliche Antworten auf die Fragen des Menschen.",
|
92 |
}
|
93 |
|
94 |
-
prompt = f"System: {system_messages[lang_code]}\nUser: {user}\nAssistant
|
95 |
```
|
96 |
|
97 |
The prompt template is also directly integrated in the Tokenizer and can be used as follows:
|
|
|
91 |
" Der Assistent gibt hilfreiche und höfliche Antworten auf die Fragen des Menschen.",
|
92 |
}
|
93 |
|
94 |
+
prompt = f"System: {system_messages[lang_code]}\nUser: {user}\nAssistant:"
|
95 |
```
|
96 |
|
97 |
The prompt template is also directly integrated in the Tokenizer and can be used as follows:
|
config.json
CHANGED
@@ -31,7 +31,7 @@
|
|
31 |
"tie_word_embeddings": true,
|
32 |
"tokenizer_class": "SPTokenizer",
|
33 |
"torch_dtype": "bfloat16",
|
34 |
-
"transformers_version": "4.
|
35 |
"use_cache": false,
|
36 |
"vocab_size": 250680
|
37 |
}
|
|
|
31 |
"tie_word_embeddings": true,
|
32 |
"tokenizer_class": "SPTokenizer",
|
33 |
"torch_dtype": "bfloat16",
|
34 |
+
"transformers_version": "4.43.2",
|
35 |
"use_cache": false,
|
36 |
"vocab_size": 250680
|
37 |
}
|
generation_config.json
CHANGED
@@ -3,6 +3,6 @@
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 3,
|
6 |
-
"transformers_version": "4.
|
7 |
"use_cache": false
|
8 |
}
|
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 3,
|
6 |
+
"transformers_version": "4.43.2",
|
7 |
"use_cache": false
|
8 |
}
|
gptx_tokenizer.py
CHANGED
@@ -436,7 +436,7 @@ class SPTokenizer(HFGPTXTokenizer):
|
|
436 |
"SL": "Pogovor med človekom in pomočnikom z umetno inteligenco. Pomočnik človeku prijazno in vljudno odgovarja na njegova vprašanja.", # noqa
|
437 |
"SV": "En chatt mellan en människa och en assistent med artificiell intelligens. Assistenten ger hjälpsamma och artiga svar på människans frågor.", # noqa
|
438 |
}
|
439 |
-
chat_template = "{%- for message in messages %}\n{%- if (message['role']|lower == 'user') != (loop.index0 % 2 == 0) %}\n{{- raise_exception('Roles must alternate User/Assistant/User/Assistant/...') }}\n{%- endif %}\n{%-if message['role']|lower == 'user' %}\n{{- message['role']|capitalize + ': ' + message['content'] + '\\n' }}\n{%- elif message['role']|lower == 'assistant' %}\n{{- message['role']|capitalize + ': ' + message['content'] + eos_token + '\\n' }}\n{%- else %}\n{{- raise_exception('Only user and assistant roles are supported!') }}\n {%- endif %}\n{%- endfor %}{%-if add_generation_prompt %}\n{{- 'Assistant
|
440 |
self.chat_template = {
|
441 |
lang: f"System: {sys_msg}" + "{{- '\\n'}}\n" + chat_template
|
442 |
for lang, sys_msg in self.system_messages_by_lang.items()
|
|
|
436 |
"SL": "Pogovor med človekom in pomočnikom z umetno inteligenco. Pomočnik človeku prijazno in vljudno odgovarja na njegova vprašanja.", # noqa
|
437 |
"SV": "En chatt mellan en människa och en assistent med artificiell intelligens. Assistenten ger hjälpsamma och artiga svar på människans frågor.", # noqa
|
438 |
}
|
439 |
+
chat_template = "{%- for message in messages %}\n{%- if (message['role']|lower == 'user') != (loop.index0 % 2 == 0) %}\n{{- raise_exception('Roles must alternate User/Assistant/User/Assistant/...') }}\n{%- endif %}\n{%-if message['role']|lower == 'user' %}\n{{- message['role']|capitalize + ': ' + message['content'] + '\\n' }}\n{%- elif message['role']|lower == 'assistant' %}\n{{- message['role']|capitalize + ': ' + message['content'] + eos_token + '\\n' }}\n{%- else %}\n{{- raise_exception('Only user and assistant roles are supported!') }}\n {%- endif %}\n{%- endfor %}{%-if add_generation_prompt %}\n{{- 'Assistant: '}}\n{%- endif %}\n"
|
440 |
self.chat_template = {
|
441 |
lang: f"System: {sys_msg}" + "{{- '\\n'}}\n" + chat_template
|
442 |
for lang, sys_msg in self.system_messages_by_lang.items()
|
model-00001-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4936228560
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1cfaf20eadf3845f489ea5067f4e6de1a681da576c19938a9ec0cfe242df5201
|
3 |
size 4936228560
|
model-00002-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4929565048
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b9950b06babbe7a3724b4d2be33cd97a24cc65fdcda04274d9db837f5151886
|
3 |
size 4929565048
|
model-00003-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4929565072
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b5b1d86912a6116f6120b1b4660acedcb969039d610b0a2c64b02f960c137553
|
3 |
size 4929565072
|
model-00004-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 110125512
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6d7af70db15816fdc38705f0e7dd9aa6420945eecfbe6870fa0ddce63adf2e3
|
3 |
size 110125512
|