Text Generation
Transformers
Safetensors
Korean
llama
text-generation-inference
Inference Endpoints
wkshin89 commited on
Commit
cb396dd
1 Parent(s): 1909454

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -9,7 +9,7 @@ language:
9
  base_model: beomi/Yi-Ko-6B
10
  ---
11
 
12
- # Yi-Ko-6B-Instruct-v1.1_
13
 
14
  ## Model Details
15
 
@@ -36,9 +36,9 @@ base_model: beomi/Yi-Ko-6B
36
  import torch
37
  from transformers import AutoModelForCausalLM, AutoTokenizer
38
 
39
- tokenizer = AutoTokenizer.from_pretrained("wkshin89/Yi-Ko-6B-Instruct-v1.1_")
40
  model = AutoModelForCausalLM.from_pretrained(
41
- "wkshin89/Yi-Ko-6B-Instruct-v1.1_",
42
  device_map="auto",
43
  torch_dtype=torch.bfloat16,
44
  )
 
9
  base_model: beomi/Yi-Ko-6B
10
  ---
11
 
12
+ # Yi-Ko-6B-Instruct-v1.1
13
 
14
  ## Model Details
15
 
 
36
  import torch
37
  from transformers import AutoModelForCausalLM, AutoTokenizer
38
 
39
+ tokenizer = AutoTokenizer.from_pretrained("wkshin89/Yi-Ko-6B-Instruct-v1.1")
40
  model = AutoModelForCausalLM.from_pretrained(
41
+ "wkshin89/Yi-Ko-6B-Instruct-v1.1",
42
  device_map="auto",
43
  torch_dtype=torch.bfloat16,
44
  )