FlawedLLM commited on
Commit
04ab46f
1 Parent(s): 322f74c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -64,13 +64,13 @@ from huggingface_hub import login, HfFolder
64
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
65
  # Load model directly
66
 
67
- tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00", trust_remote_code=True)
68
  quantization_config = BitsAndBytesConfig(
69
  load_in_4bit=True,
70
  bnb_4bit_use_double_quant=True,
71
  bnb_4bit_quant_type="nf4",
72
  bnb_4bit_compute_dtype=torch.float16)
73
- model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
74
  device_map="auto",
75
  quantization_config=quantization_config,
76
  torch_dtype =torch.float16,
 
64
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
65
  # Load model directly
66
 
67
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini89", trust_remote_code=True)
68
  quantization_config = BitsAndBytesConfig(
69
  load_in_4bit=True,
70
  bnb_4bit_use_double_quant=True,
71
  bnb_4bit_quant_type="nf4",
72
  bnb_4bit_compute_dtype=torch.float16)
73
+ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini89",
74
  device_map="auto",
75
  quantization_config=quantization_config,
76
  torch_dtype =torch.float16,