FlawedLLM commited on
Commit
bc3fc88
1 Parent(s): 05ddcdc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -2
app.py CHANGED
@@ -1,12 +1,10 @@
1
  import re
2
- from unsloth import FastLanguageModel
3
  from peft import PeftModel, PeftConfig
4
  from transformers import AutoModelForCausalLM
5
 
6
  config = PeftConfig.from_pretrained("FlawedLLM/BhashiniLLM")
7
  base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit")
8
  model = PeftModel.from_pretrained(base_model, "FlawedLLM/BhashiniLLM")
9
- FastLanguageModel.for_inference(model) # Enable native 2x faster inference
10
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
11
  @spaces.GPU
12
  def chunk_it(input_command):
 
1
  import re
 
2
  from peft import PeftModel, PeftConfig
3
  from transformers import AutoModelForCausalLM
4
 
5
  config = PeftConfig.from_pretrained("FlawedLLM/BhashiniLLM")
6
  base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit")
7
  model = PeftModel.from_pretrained(base_model, "FlawedLLM/BhashiniLLM")
 
8
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
9
  @spaces.GPU
10
  def chunk_it(input_command):