FlawedLLM commited on
Commit
d2a5fcd
1 Parent(s): 6ff4d75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -6,20 +6,20 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
6
  # from peft import PeftModel, PeftConfig
7
 
8
 
9
- tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00")
10
- quantization_config = BitsAndBytesConfig(
11
- load_in_4bit=True,
12
- bnb_4bit_use_double_quant=True,
13
- bnb_4bit_quant_type="nf4",
14
- bnb_4bit_compute_dtype=torch.float16)
15
- config=AutoConfig.from_pretrained("FlawedLLM/Bhashini_00")
16
- model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
17
- device_map="auto",
18
- quantization_config=quantization_config,
19
- torch_dtype =torch.float16,
20
- low_cpu_mem_usage=True,
21
- use_safetensors=True,
22
- )
23
 
24
  # # Assuming you have your HF repository in this format: "your_username/your_model_name"
25
  # model_id = "FlawedLLM/BhashiniLLM"
@@ -60,12 +60,12 @@ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
60
  # # torch_dtype =torch.float16
61
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
62
  # Load model directly
 
63
 
 
 
64
 
65
- # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00")
66
- # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00").to('cuda')
67
 
68
- @spaces.GPU(duration=300)
69
  def chunk_it(input_command):
70
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
71
 
 
6
  # from peft import PeftModel, PeftConfig
7
 
8
 
9
+ # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00")
10
+ # quantization_config = BitsAndBytesConfig(
11
+ # load_in_4bit=True,
12
+ # bnb_4bit_use_double_quant=True,
13
+ # bnb_4bit_quant_type="nf4",
14
+ # bnb_4bit_compute_dtype=torch.float16)
15
+ # config=AutoConfig.from_pretrained("FlawedLLM/Bhashini_00")
16
+ # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
17
+ # device_map="auto",
18
+ # quantization_config=quantization_config,
19
+ # torch_dtype =torch.float16,
20
+ # low_cpu_mem_usage=True,
21
+ # use_safetensors=True,
22
+ # )
23
 
24
  # # Assuming you have your HF repository in this format: "your_username/your_model_name"
25
  # model_id = "FlawedLLM/BhashiniLLM"
 
60
  # # torch_dtype =torch.float16
61
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
62
  # Load model directly
63
+ @spaces.GPU(duration=300)
64
 
65
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00")
66
+ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00", load_in_4bit=True).to('cuda')
67
 
 
 
68
 
 
69
  def chunk_it(input_command):
70
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
71