mabaochang commited on
Commit
f9a0f52
1 Parent(s): 39a408a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -85,7 +85,7 @@ After you decrypt the files, BELLE-LLAMA-13B-2M can be easily loaded with LlamaF
85
  from transformers import LlamaForCausalLM, AutoTokenizer
86
  import torch
87
 
88
- ckpt = './result/BELLE-LLAMA-13B-2M'
89
  device = torch.device('cuda')
90
  model = LlamaForCausalLM.from_pretrained(ckpt, device_map='auto', low_cpu_mem_usage=True)
91
  tokenizer = AutoTokenizer.from_pretrained(ckpt)
 
85
  from transformers import LlamaForCausalLM, AutoTokenizer
86
  import torch
87
 
88
+ ckpt = './result/'
89
  device = torch.device('cuda')
90
  model = LlamaForCausalLM.from_pretrained(ckpt, device_map='auto', low_cpu_mem_usage=True)
91
  tokenizer = AutoTokenizer.from_pretrained(ckpt)