Update README.md
Browse files
README.md
CHANGED
@@ -22,11 +22,20 @@ model = "recogna-nlp/phibode-3-mini-4k-ultraalpaca"
|
|
22 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
23 |
|
24 |
# Example prompt
|
25 |
-
|
|
|
|
|
|
|
26 |
|
27 |
# Generate a response
|
28 |
-
model = AutoModelForCausalLM.from_pretrained(model)
|
29 |
pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
print(outputs[0]["generated_text"])
|
32 |
```
|
|
|
22 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
23 |
|
24 |
# Example prompt
|
25 |
+
messages = [
|
26 |
+
{"role": "system", "content": "Você é assistente de IA chamado PhiBode. O PhiBode é um modelo de língua conversacional projetado para ser prestativo, honesto e inofensivo."},
|
27 |
+
{"role": "user", "content": "<Insira seu prompt aqui>"},
|
28 |
+
]
|
29 |
|
30 |
# Generate a response
|
31 |
+
model = AutoModelForCausalLM.from_pretrained(model, trust_remote_code=True)
|
32 |
pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
33 |
+
generation_args = {
|
34 |
+
"max_new_tokens": 500,
|
35 |
+
"return_full_text": False,
|
36 |
+
"temperature": 0.0,
|
37 |
+
"do_sample": False,
|
38 |
+
}
|
39 |
+
outputs = pipeline(messages, **generation_args)
|
40 |
print(outputs[0]["generated_text"])
|
41 |
```
|