Update README.md
Browse files
README.md
CHANGED
@@ -34,7 +34,7 @@ You can run conversational inference using the Transformers pipeline abstraction
|
|
34 |
import transformers
|
35 |
import torch
|
36 |
|
37 |
-
model_id = "
|
38 |
|
39 |
pipeline = transformers.pipeline(
|
40 |
"text-generation",
|
@@ -76,7 +76,7 @@ print(outputs[0]["generated_text"][len(prompt):])
|
|
76 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
77 |
import torch
|
78 |
|
79 |
-
model_id = "
|
80 |
|
81 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
82 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
34 |
import transformers
|
35 |
import torch
|
36 |
|
37 |
+
model_id = "haqishen/Llama-3-8B-Japanese-Instruct"
|
38 |
|
39 |
pipeline = transformers.pipeline(
|
40 |
"text-generation",
|
|
|
76 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
77 |
import torch
|
78 |
|
79 |
+
model_id = "haqishen/Llama-3-8B-Japanese-Instruct"
|
80 |
|
81 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
82 |
model = AutoModelForCausalLM.from_pretrained(
|