umarmajeedofficial commited on
Commit
73949c8
1 Parent(s): c95ea90

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -14
README.md CHANGED
@@ -56,27 +56,21 @@ How to Get Started with the Model
56
 
57
  To get started with the MyFriend model, use the code snippet below
58
 
 
 
59
 
 
60
 
61
-
62
- import torch
63
- from transformers import pipeline
64
-
65
- pipe = pipeline("text-generation", model="umarmajeedofficial/MyFriend", torch_dtype=torch.bfloat16, device_map="auto")
66
-
67
- messages = [
68
  {
69
  "role": "system",
70
  "content": "You are an emergency response assistant with expertise in environmental issues.",
71
  },
72
  {"role": "user", "content": "What should I do during a heat wave?"},
73
- ]
74
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
75
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
76
- print(outputs[0]["generated_text"])
77
-
78
-
79
-
80
 
81
 
82
 
 
56
 
57
  To get started with the MyFriend model, use the code snippet below
58
 
59
+ import torch
60
+ from transformers import pipeline
61
 
62
+ pipe = pipeline("text-generation", model="umarmajeedofficial/MyFriend", torch_dtype=torch.bfloat16, device_map="auto")
63
 
64
+ messages = [
 
 
 
 
 
 
65
  {
66
  "role": "system",
67
  "content": "You are an emergency response assistant with expertise in environmental issues.",
68
  },
69
  {"role": "user", "content": "What should I do during a heat wave?"},
70
+ ]
71
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
72
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
73
+ print(outputs[0]["generated_text"])
 
 
 
74
 
75
 
76