FlawedLLM commited on
Commit
6ff4d75
1 Parent(s): 696557c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -12,7 +12,7 @@ quantization_config = BitsAndBytesConfig(
12
  bnb_4bit_use_double_quant=True,
13
  bnb_4bit_quant_type="nf4",
14
  bnb_4bit_compute_dtype=torch.float16)
15
- config=AutoConfig("FlawedLLM/Bhashini_00")
16
  model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
17
  device_map="auto",
18
  quantization_config=quantization_config,
 
12
  bnb_4bit_use_double_quant=True,
13
  bnb_4bit_quant_type="nf4",
14
  bnb_4bit_compute_dtype=torch.float16)
15
+ config=AutoConfig.from_pretrained("FlawedLLM/Bhashini_00")
16
  model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
17
  device_map="auto",
18
  quantization_config=quantization_config,