updated usage card
Browse filesthis is gemma 2b model from google which is fine-tuned on mental health conversations dataset
README.md
CHANGED
@@ -4,6 +4,9 @@ tags:
|
|
4 |
- text-generation-inference
|
5 |
- text-generation
|
6 |
- peft
|
|
|
|
|
|
|
7 |
library_name: transformers
|
8 |
widget:
|
9 |
- messages:
|
@@ -18,28 +21,29 @@ This model was trained using AutoTrain. For more information, please visit [Auto
|
|
18 |
|
19 |
# Usage
|
20 |
|
21 |
-
|
|
|
22 |
|
23 |
-
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
28 |
-
model = AutoModelForCausalLM.from_pretrained(
|
29 |
-
model_path,
|
30 |
-
device_map="auto",
|
31 |
-
torch_dtype='auto'
|
32 |
-
).eval()
|
33 |
-
|
34 |
-
# Prompt content: "hi"
|
35 |
messages = [
|
36 |
-
{"role": "user", "content": "
|
37 |
]
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
4 |
- text-generation-inference
|
5 |
- text-generation
|
6 |
- peft
|
7 |
+
- chatbot
|
8 |
+
- depression
|
9 |
+
- therapy
|
10 |
library_name: transformers
|
11 |
widget:
|
12 |
- messages:
|
|
|
21 |
|
22 |
# Usage
|
23 |
|
24 |
+
from transformers import AutoTokenizer, pipeline
|
25 |
+
import torch
|
26 |
|
27 |
+
model = "Rhaps360/gemma-dep-ins-ft"
|
28 |
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
30 |
+
pipeline = pipeline(
|
31 |
+
"text-generation",
|
32 |
+
model=model,
|
33 |
+
model_kwargs={"torch_dtype": torch.bfloat16},
|
34 |
+
device="cuda" if(torch.cuda.is_available()) else "cpu",
|
35 |
+
)
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
messages = [
|
38 |
+
{"role": "user", "content": "### Context: the input message goes here. ### Response: "}
|
39 |
]
|
40 |
+
prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
41 |
+
outputs = pipeline(
|
42 |
+
prompt,
|
43 |
+
max_new_tokens=300,
|
44 |
+
do_sample=True,
|
45 |
+
temperature=0.2,
|
46 |
+
top_k=50,
|
47 |
+
top_p=0.95
|
48 |
+
)
|
49 |
+
print(outputs[0]["generated_text"][len(prompt):])
|