FlawedLLM commited on
Commit
fe1b079
1 Parent(s): f823e77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -6
app.py CHANGED
@@ -4,14 +4,24 @@ import gradio as gr
4
  from peft import PeftModel, PeftConfig
5
  from peft import AutoPeftModelForCausalLM
6
  from transformers import AutoTokenizer
7
-
8
- @spaces.GPU(duration=300)
9
- def chunk_it(input_command):
10
- model = AutoPeftModelForCausalLM.from_pretrained(
11
  "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
12
  load_in_4bit = True,
13
- )
14
- tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  inputs = tokenizer(
16
  [
17
  alpaca_prompt.format(
 
4
  from peft import PeftModel, PeftConfig
5
  from peft import AutoPeftModelForCausalLM
6
  from transformers import AutoTokenizer
7
+ model = AutoPeftModelForCausalLM.from_pretrained(
 
 
 
8
  "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
9
  load_in_4bit = True,
10
+ )
11
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
12
+ @spaces.GPU(duration=300)
13
+ def chunk_it(input_command):
14
+
15
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
16
+
17
+ ### Instruction:
18
+ {}
19
+
20
+ ### Input:
21
+ {}
22
+
23
+ ### Response:
24
+ {}"""
25
  inputs = tokenizer(
26
  [
27
  alpaca_prompt.format(