Update app.py
Browse files
app.py
CHANGED
@@ -46,7 +46,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
46 |
|
47 |
# model_id = "mistralai/Mistral-7B-v0.3"
|
48 |
|
49 |
-
model_id = "CohereForAI/c4ai-command-r-
|
50 |
|
51 |
|
52 |
tokenizer = AutoTokenizer.from_pretrained(
|
@@ -55,15 +55,15 @@ tokenizer = AutoTokenizer.from_pretrained(
|
|
55 |
, token= token,)
|
56 |
|
57 |
|
58 |
-
|
59 |
|
60 |
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
61 |
# torch_dtype= torch.uint8,
|
62 |
-
|
63 |
-
|
64 |
# # # torch_dtype=torch.fl,
|
65 |
-
|
66 |
-
|
67 |
# device_map='cuda',
|
68 |
# device_map=accelerator.device_map,
|
69 |
|
@@ -71,7 +71,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
|
71 |
|
72 |
|
73 |
#
|
74 |
-
|
75 |
|
76 |
|
77 |
# device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
|
@@ -102,7 +102,7 @@ def respond(
|
|
102 |
|
103 |
messages= json_obj
|
104 |
|
105 |
-
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(
|
106 |
input_ids2 = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, return_tensors="pt") #.to('cuda')
|
107 |
print(f"Converted input_ids dtype: {input_ids.dtype}")
|
108 |
input_str= str(input_ids2)
|
|
|
46 |
|
47 |
# model_id = "mistralai/Mistral-7B-v0.3"
|
48 |
|
49 |
+
model_id = "CohereForAI/c4ai-command-r-v01"
|
50 |
|
51 |
|
52 |
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
55 |
, token= token,)
|
56 |
|
57 |
|
58 |
+
accelerator = Accelerator()
|
59 |
|
60 |
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
61 |
# torch_dtype= torch.uint8,
|
62 |
+
torch_dtype=torch.float16,
|
63 |
+
load_in_4bit=True,
|
64 |
# # # torch_dtype=torch.fl,
|
65 |
+
attn_implementation="flash_attention_2",
|
66 |
+
low_cpu_mem_usage=True,
|
67 |
# device_map='cuda',
|
68 |
# device_map=accelerator.device_map,
|
69 |
|
|
|
71 |
|
72 |
|
73 |
#
|
74 |
+
model = accelerator.prepare(model)
|
75 |
|
76 |
|
77 |
# device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
|
|
|
102 |
|
103 |
messages= json_obj
|
104 |
|
105 |
+
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(accelerator.device)
|
106 |
input_ids2 = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, return_tensors="pt") #.to('cuda')
|
107 |
print(f"Converted input_ids dtype: {input_ids.dtype}")
|
108 |
input_str= str(input_ids2)
|