File size: 1,295 Bytes
d397597
c0dbf63
 
d397597
c0dbf63
 
 
 
d397597
 
c0dbf63
d397597
48ceee6
 
d397597
 
 
 
 
 
48ceee6
 
 
 
 
 
 
 
 
 
 
 
 
c0dbf63
 
 
48ceee6
c0dbf63
 
48ceee6
c0dbf63
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer

# Load model from Hugging Face Hub
base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3.2-3b-instruct-bnb-4bit")
model = PeftModel.from_pretrained(base_model, "ayush0504/Fine-Tunned-GPT")
model.eval()

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained("ayush0504/Fine-Tunned-GPT")

def generate_response(question):
    messages = [{"role": "user", "content": question}]
    inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to("cuda" if torch.cuda.is_available() else "cpu")
    
    text_streamer = TextStreamer(tokenizer, skip_prompt=True)
    output = model.generate(
        input_ids=inputs,
        streamer=text_streamer,
        max_new_tokens=1048,
        use_cache=True,
        temperature=0.7,
        min_p=0.1
    )
    
    return tokenizer.decode(output[0], skip_special_tokens=True)

# Example usage
if __name__ == "__main__":
    question = input("Ask a legal question: ")
    if question.strip():
        answer = generate_response(question)
        print("\nAnswer:", answer)
    else:
        print("Please enter a valid question.")