Spaces:
Runtime error
Runtime error
Vision-CAIR
commited on
Commit
β’
2de32bf
1
Parent(s):
c1ebdb7
Update minigpt4/models/base_model.py
Browse files
minigpt4/models/base_model.py
CHANGED
@@ -171,20 +171,22 @@ class BaseModel(nn.Module):
|
|
171 |
def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
|
172 |
lora_target_modules=["q_proj","v_proj"], **lora_kargs):
|
173 |
logging.info('Loading LLAMA')
|
174 |
-
llama_tokenizer = LlamaTokenizer.from_pretrained(
|
175 |
llama_tokenizer.pad_token = "$$"
|
176 |
|
177 |
if low_resource:
|
178 |
llama_model = LlamaForCausalLM.from_pretrained(
|
179 |
-
|
180 |
torch_dtype=torch.float16,
|
181 |
load_in_8bit=True,
|
182 |
-
device_map={'': low_res_device}
|
|
|
183 |
)
|
184 |
else:
|
185 |
llama_model = LlamaForCausalLM.from_pretrained(
|
186 |
-
|
187 |
torch_dtype=torch.float16,
|
|
|
188 |
)
|
189 |
|
190 |
if lora_r > 0:
|
|
|
171 |
def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
|
172 |
lora_target_modules=["q_proj","v_proj"], **lora_kargs):
|
173 |
logging.info('Loading LLAMA')
|
174 |
+
llama_tokenizer = LlamaTokenizer.from_pretrained("Vision-CAIR/llama-2-7b-chat-pytorch", use_fast=False, use_auth_token=True)
|
175 |
llama_tokenizer.pad_token = "$$"
|
176 |
|
177 |
if low_resource:
|
178 |
llama_model = LlamaForCausalLM.from_pretrained(
|
179 |
+
"Vision-CAIR/llama-2-7b-chat-pytorch",
|
180 |
torch_dtype=torch.float16,
|
181 |
load_in_8bit=True,
|
182 |
+
device_map={'': low_res_device},
|
183 |
+
use_auth_token=True
|
184 |
)
|
185 |
else:
|
186 |
llama_model = LlamaForCausalLM.from_pretrained(
|
187 |
+
"Vision-CAIR/llama-2-7b-chat-pytorch",
|
188 |
torch_dtype=torch.float16,
|
189 |
+
use_auth_token=True
|
190 |
)
|
191 |
|
192 |
if lora_r > 0:
|