Update README.md
Browse files
README.md
CHANGED
@@ -27,7 +27,7 @@ model-index:
|
|
27 |
from unsloth import FastLanguageModel
|
28 |
from unsloth.chat_templates import get_chat_template
|
29 |
|
30 |
-
model_save_path = "path to the download folder" #
|
31 |
|
32 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
33 |
model_name=model_save_path,
|
@@ -38,11 +38,11 @@ model-index:
|
|
38 |
|
39 |
tokenizer = get_chat_template(
|
40 |
tokenizer,
|
41 |
-
chat_template="llama-3", #
|
42 |
-
mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"}, #
|
43 |
)
|
44 |
|
45 |
-
messages = [{"from": "human", "value": "your prompt"}]
|
46 |
inputs = tokenizer.apply_chat_template(
|
47 |
messages,
|
48 |
tokenize=True,
|
|
|
27 |
from unsloth import FastLanguageModel
|
28 |
from unsloth.chat_templates import get_chat_template
|
29 |
|
30 |
+
model_save_path = "path to the download folder" #the hugging face folder path pulled.
|
31 |
|
32 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
33 |
model_name=model_save_path,
|
|
|
38 |
|
39 |
tokenizer = get_chat_template(
|
40 |
tokenizer,
|
41 |
+
chat_template="llama-3", # use the llama-3 template
|
42 |
+
mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"}, # mapping the messages.
|
43 |
)
|
44 |
|
45 |
+
messages = [{"from": "human", "value": "your prompt"}]#add your prompt here as human
|
46 |
inputs = tokenizer.apply_chat_template(
|
47 |
messages,
|
48 |
tokenize=True,
|