Update README.md
Browse files
README.md
CHANGED
@@ -34,7 +34,7 @@ def generate_answer(prompt, response_type="generativo"):
|
|
34 |
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
|
35 |
generated_ids = model.generate(model_inputs, max_new_tokens=200, do_sample=True,
|
36 |
temperature=0.001, eos_token_id=tokenizer.eos_token_id)
|
37 |
-
decoded = tokenizer.batch_decode(generated_ids, skip_special_tokens=
|
38 |
return decoded[0].split("[/INST]", 1)[1].strip() if "[/INST]" in decoded[0] else "Errore nella generazione della risposta"
|
39 |
|
40 |
|
|
|
34 |
model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device)
|
35 |
generated_ids = model.generate(model_inputs, max_new_tokens=200, do_sample=True,
|
36 |
temperature=0.001, eos_token_id=tokenizer.eos_token_id)
|
37 |
+
decoded = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
38 |
return decoded[0].split("[/INST]", 1)[1].strip() if "[/INST]" in decoded[0] else "Errore nella generazione della risposta"
|
39 |
|
40 |
|