FlawedLLM commited on
Commit
b4bedb5
1 Parent(s): 695a939

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import re
2
  import spaces
3
  import gradio as gr
4
- from transformers import AutoTokenizer, AutoModel, BitsAndBytesConfig
5
  import torch
6
  # from peft import PeftModel, PeftConfig
7
 
@@ -43,13 +42,11 @@ import torch
43
  # load_in_4bit = True,
44
  # )
45
  # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini")
46
- from unsloth import FastLanguageModel
47
- model, tokenizer = FastLanguageModel.from_pretrained(
48
- model_name = "FlawedLLM/Bhashini_2", # YOUR MODEL YOU USED FOR TRAINING
49
- max_seq_length = 2048,
50
- dtype = torch.float16,
51
- load_in_4bit = True,)
52
- FastLanguageModel.for_inference(model)
53
 
54
  @spaces.GPU(duration=300)
55
  def chunk_it(input_command):
 
1
  import re
2
  import spaces
3
  import gradio as gr
 
4
  import torch
5
  # from peft import PeftModel, PeftConfig
6
 
 
42
  # load_in_4bit = True,
43
  # )
44
  # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini")
45
+ # Load model directly
46
+ from transformers import AutoTokenizer, AutoModelForCausalLM
47
+
48
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
49
+ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9", device_map='auto')
 
 
50
 
51
  @spaces.GPU(duration=300)
52
  def chunk_it(input_command):