Update README.md
Browse files
README.md
CHANGED
@@ -2,4 +2,37 @@
|
|
2 |
datasets:
|
3 |
- ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered
|
4 |
license: llama2
|
5 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
datasets:
|
3 |
- ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered
|
4 |
license: llama2
|
5 |
+
---
|
6 |
+
|
7 |
+
## llama2-7b-WizardLM-alpaca-evol-instruct-35k-mini
|
8 |
+
|
9 |
+
It is a 4-bit qlora refinement of llama-v2-guanaco, fine tuned on the 15k rows of Hermes dataset.
|
10 |
+
|
11 |
+
## Example:
|
12 |
+
|
13 |
+
```
|
14 |
+
from transformers import AutoTokenizer
|
15 |
+
import transformers
|
16 |
+
import torch
|
17 |
+
|
18 |
+
model = "aloobun/llama2-7b-WizardLM-alpaca-evol-instruct-35k-mini"
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
21 |
+
pipeline = transformers.pipeline(
|
22 |
+
"text-generation",
|
23 |
+
model=model,
|
24 |
+
torch_dtype=torch.float16,
|
25 |
+
device_map="auto",
|
26 |
+
)
|
27 |
+
|
28 |
+
sequences = pipeline(
|
29 |
+
f'[INST] {prompt} [/INST]',
|
30 |
+
do_sample=True,
|
31 |
+
top_k=10,
|
32 |
+
num_return_sequences=1,
|
33 |
+
eos_token_id=tokenizer.eos_token_id,
|
34 |
+
max_length=200,
|
35 |
+
)
|
36 |
+
for seq in sequences:
|
37 |
+
print(f"Result: {seq['generated_text']}")
|
38 |
+
```
|