xmanii commited on
Commit
00f4a22
1 Parent(s): 6107dec

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -27,7 +27,7 @@ model-index:
27
  from unsloth import FastLanguageModel
28
  from unsloth.chat_templates import get_chat_template
29
 
30
- model_save_path = "path to the download folder" # Adjust this path as needed
31
 
32
  model, tokenizer = FastLanguageModel.from_pretrained(
33
  model_name=model_save_path,
@@ -38,11 +38,11 @@ model-index:
38
 
39
  tokenizer = get_chat_template(
40
  tokenizer,
41
- chat_template="llama-3", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
42
- mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"}, # ShareGPT style
43
  )
44
 
45
- messages = [{"from": "human", "value": "your prompt"}]
46
  inputs = tokenizer.apply_chat_template(
47
  messages,
48
  tokenize=True,
 
27
  from unsloth import FastLanguageModel
28
  from unsloth.chat_templates import get_chat_template
29
 
30
+ model_save_path = "path to the download folder" #the hugging face folder path pulled.
31
 
32
  model, tokenizer = FastLanguageModel.from_pretrained(
33
  model_name=model_save_path,
 
38
 
39
  tokenizer = get_chat_template(
40
  tokenizer,
41
+ chat_template="llama-3", # use the llama-3 template
42
+ mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"}, # mapping the messages.
43
  )
44
 
45
+ messages = [{"from": "human", "value": "your prompt"}]#add your prompt here as human
46
  inputs = tokenizer.apply_chat_template(
47
  messages,
48
  tokenize=True,