FlawedLLM commited on
Commit
7d60ecc
1 Parent(s): 52c65e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -61,8 +61,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
61
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
62
  # Load model directly
63
 
64
- tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00")
65
- model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00", load_in_4bit=True)
66
 
67
  @spaces.GPU(duration=300)
68
  def chunk_it(input_command):
 
61
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
62
  # Load model directly
63
 
64
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma16")
65
+ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma16", load_in_4bit=True)
66
 
67
  @spaces.GPU(duration=300)
68
  def chunk_it(input_command):