FlawedLLM commited on
Commit
99c292a
1 Parent(s): 4e539da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -46,13 +46,15 @@ import torch
46
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
49
- quantization_config = BitsAndBytesConfig(
50
- load_in_4bit=True,
51
- bnb_4bit_use_double_quant=True,
52
- bnb_4bit_quant_type="nf4",
53
- bnb_4bit_compute_dtype=torch.float16)
54
- torch_dtype =torch.float16
55
- model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",quantization_config=quantization_config ,)
 
 
56
 
57
  @spaces.GPU(duration=300)
58
  def chunk_it(input_command):
 
46
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
49
+ # quantization_config = BitsAndBytesConfig(
50
+ # load_in_4bit=True,
51
+ # bnb_4bit_use_double_quant=True,
52
+ # bnb_4bit_quant_type="nf4",
53
+ # bnb_4bit_compute_dtype=torch.float16
54
+ # )
55
+
56
+ # torch_dtype =torch.float16
57
+ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",use_safetensors= True ,)
58
 
59
  @spaces.GPU(duration=300)
60
  def chunk_it(input_command):