Add "Usage Example" section
Browse files
README.md
CHANGED
@@ -110,3 +110,36 @@ penalty_alpha: 0.5
|
|
110 |
top_k: 4
|
111 |
repetition_penalty: 1.01
|
112 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
top_k: 4
|
111 |
repetition_penalty: 1.01
|
112 |
```
|
113 |
+
|
114 |
+
## Usage Example
|
115 |
+
|
116 |
+
```python
|
117 |
+
from transformers import pipeline
|
118 |
+
|
119 |
+
generate = pipeline("text-generation", "Felladrin/Llama-160M-Chat-v1")
|
120 |
+
|
121 |
+
messages = [
|
122 |
+
{
|
123 |
+
"role": "system",
|
124 |
+
"content": "You are a helpful assistant who answers user's questions with details and curiosity.",
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"role": "user",
|
128 |
+
"content": "What are some potential applications for quantum computing?",
|
129 |
+
},
|
130 |
+
]
|
131 |
+
|
132 |
+
prompt = generate.tokenizer.apply_chat_template(
|
133 |
+
messages, tokenize=False, add_generation_prompt=True
|
134 |
+
)
|
135 |
+
|
136 |
+
output = generate(
|
137 |
+
prompt,
|
138 |
+
max_new_tokens=1024,
|
139 |
+
penalty_alpha=0.5,
|
140 |
+
top_k=4,
|
141 |
+
repetition_penalty=1.01,
|
142 |
+
)
|
143 |
+
|
144 |
+
print(output[0]["generated_text"])
|
145 |
+
```
|