FlawedLLM commited on
Commit
6875a6e
1 Parent(s): 13df409

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -0
app.py CHANGED
@@ -25,9 +25,12 @@ model_id = "FlawedLLM/BhashiniLLM"
25
 
26
  # Load the base model (the one you fine-tuned with LoRA)
27
  base_model = AutoModelForCausalLM.from_pretrained(model_id, device_map='auto') # Load in 8-bit for efficiency
 
 
28
 
29
  # Load the LoRA adapter weights
30
  model = PeftModel.from_pretrained(base_model, model_id)
 
31
  tokenizer = AutoTokenizer.from_pretrained(model_id)
32
 
33
 
 
25
 
26
  # Load the base model (the one you fine-tuned with LoRA)
27
  base_model = AutoModelForCausalLM.from_pretrained(model_id, device_map='auto') # Load in 8-bit for efficiency
28
+ for param in base_model.parameters():
29
+ param.data = param.data.to(torch.float16) # or torch.float32
30
 
31
  # Load the LoRA adapter weights
32
  model = PeftModel.from_pretrained(base_model, model_id)
33
+
34
  tokenizer = AutoTokenizer.from_pretrained(model_id)
35
 
36