FlawedLLM commited on
Commit
3d57546
1 Parent(s): fb46545

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -10,16 +10,17 @@ from transformers import AutoTokenizer
10
  # )
11
  # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
12
  from unsloth import FastLanguageModel
13
- model, tokenizer = FastLanguageModel.from_pretrained(
14
- model_name = "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
15
- max_seq_length = 2048,
16
- dtype = None,
17
- load_in_4bit = True,
18
- )
19
- FastLanguageModel.for_inference(model)
20
  @spaces.GPU(duration=300)
21
  def chunk_it(input_command):
22
-
 
 
 
 
 
 
23
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
24
 
25
  ### Instruction:
 
10
  # )
11
  # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
12
  from unsloth import FastLanguageModel
13
+
14
+
 
 
 
 
 
15
  @spaces.GPU(duration=300)
16
  def chunk_it(input_command):
17
+ model, tokenizer = FastLanguageModel.from_pretrained(
18
+ model_name = "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
19
+ max_seq_length = 2048,
20
+ dtype = None,
21
+ load_in_4bit = True,
22
+ )
23
+ FastLanguageModel.for_inference(model)
24
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
25
 
26
  ### Instruction: