rphrp1985 commited on
Commit
ccb1159
·
verified ·
1 Parent(s): 6426790

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -47,7 +47,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
47
  # model_id = "mistralai/Mistral-7B-v0.3"
48
 
49
  model_id = "openchat/openchat-3.6-8b-20240522"
50
- model_id = "Qwen/Qwen2-7B-Instruct"
51
 
52
 
53
  tokenizer = AutoTokenizer.from_pretrained(
@@ -61,10 +61,10 @@ accelerator = Accelerator()
61
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
62
  # torch_dtype= torch.uint8,
63
  torch_dtype=torch.bfloat16,
64
- # load_in_8bit=True,
65
  # # # torch_dtype=torch.fl,
66
  attn_implementation="flash_attention_2",
67
- # low_cpu_mem_usage=True,
68
  # device_map='cuda',
69
  # device_map=accelerator.device_map,
70
 
 
47
  # model_id = "mistralai/Mistral-7B-v0.3"
48
 
49
  model_id = "openchat/openchat-3.6-8b-20240522"
50
+ model_id = "01-ai/Yi-1.5-34B-Chat"
51
 
52
 
53
  tokenizer = AutoTokenizer.from_pretrained(
 
61
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
62
  # torch_dtype= torch.uint8,
63
  torch_dtype=torch.bfloat16,
64
+ load_in_8bit=True,
65
  # # # torch_dtype=torch.fl,
66
  attn_implementation="flash_attention_2",
67
+ low_cpu_mem_usage=True,
68
  # device_map='cuda',
69
  # device_map=accelerator.device_map,
70