Update README.md
Browse files
README.md
CHANGED
@@ -25,8 +25,6 @@ llm = vllm.LLM(model=model_name,max_model_len=4096)
|
|
25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
|
27 |
messages = [
|
28 |
-
{"role": "system",
|
29 |
-
"content": "You first thinks about the reasoning process in the mind and then provides the user with the answer while reasoning step by step, and putting the final answer within \\boxed{}.The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., <think> reasoning process here </think><answer> answer here </answer>."},
|
30 |
{"role": "user", "content": f"早餐喝早餐店的奶茶會導致烙賽為什麼?"},
|
31 |
]
|
32 |
prompts = tokenizer.apply_chat_template(
|
|
|
25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
|
27 |
messages = [
|
|
|
|
|
28 |
{"role": "user", "content": f"早餐喝早餐店的奶茶會導致烙賽為什麼?"},
|
29 |
]
|
30 |
prompts = tokenizer.apply_chat_template(
|