Upload 2 files
Browse files- README.md +22 -3
- tokenizer_config.json +1 -1
README.md
CHANGED
@@ -185,6 +185,25 @@ extra_gated_fields:
|
|
185 |
By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
|
186 |
extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
|
187 |
extra_gated_button_content: Submit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
---
|
189 |
|
190 |
## Model Details
|
@@ -285,7 +304,7 @@ pipeline = transformers.pipeline(
|
|
285 |
"text-generation",
|
286 |
model=model_id,
|
287 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
288 |
-
device="
|
289 |
)
|
290 |
|
291 |
messages = [
|
@@ -300,8 +319,8 @@ prompt = pipeline.tokenizer.apply_chat_template(
|
|
300 |
)
|
301 |
|
302 |
terminators = [
|
303 |
-
tokenizer.eos_token_id,
|
304 |
-
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
305 |
]
|
306 |
|
307 |
outputs = pipeline(
|
|
|
185 |
By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
|
186 |
extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
|
187 |
extra_gated_button_content: Submit
|
188 |
+
widget:
|
189 |
+
- example_title: Winter holidays
|
190 |
+
messages:
|
191 |
+
- role: system
|
192 |
+
content: You are a helpful and honest assistant. Please, respond concisely and truthfully.
|
193 |
+
- role: user
|
194 |
+
content: Can you recommend a good destination for Winter holidays?
|
195 |
+
- example_title: Programming assistant
|
196 |
+
messages:
|
197 |
+
- role: system
|
198 |
+
content: You are a helpful and honest code and programming assistant. Please, respond concisely and truthfully.
|
199 |
+
- role: user
|
200 |
+
content: Write a function that computes the nth fibonacci number.
|
201 |
+
inference:
|
202 |
+
parameters:
|
203 |
+
max_new_tokens: 300
|
204 |
+
stop:
|
205 |
+
- <|end_of_text|>
|
206 |
+
- <|eot_id|>
|
207 |
---
|
208 |
|
209 |
## Model Details
|
|
|
304 |
"text-generation",
|
305 |
model=model_id,
|
306 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
307 |
+
device="auto",
|
308 |
)
|
309 |
|
310 |
messages = [
|
|
|
319 |
)
|
320 |
|
321 |
terminators = [
|
322 |
+
pipeline.tokenizer.eos_token_id,
|
323 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
324 |
]
|
325 |
|
326 |
outputs = pipeline(
|
tokenizer_config.json
CHANGED
@@ -2050,7 +2050,7 @@
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
2053 |
-
"chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|end_of_text|>",
|
2056 |
"model_input_names": [
|
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
2053 |
+
"chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|end_of_text|>",
|
2056 |
"model_input_names": [
|