FlawedLLM commited on
Commit
18aeeba
1 Parent(s): 3c99a91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -1,13 +1,15 @@
1
  import re
2
  from peft import PeftModel, PeftConfig
3
- from transformers import AutoModelForCausalLM
4
-
5
 
6
  @spaces.GPU(duration=300)
7
- config = PeftConfig.from_pretrained("FlawedLLM/BhashiniLLM")
8
- base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit")
9
- model = PeftModel.from_pretrained(base_model, "FlawedLLM/BhashiniLLM")
10
- tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
 
 
11
  def chunk_it(input_command):
12
  inputs = tokenizer(
13
  [
 
1
  import re
2
  from peft import PeftModel, PeftConfig
3
+ from peft import AutoPeftModelForCausalLM
4
+ from transformers import AutoTokenizer
5
 
6
  @spaces.GPU(duration=300)
7
+
8
+ model = AutoPeftModelForCausalLM.from_pretrained(
9
+ "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
10
+ load_in_4bit = load_in_4bit,
11
+ )
12
+ tokenizer = AutoTokenizer.from_pretrained("lora_model")
13
  def chunk_it(input_command):
14
  inputs = tokenizer(
15
  [