File size: 2,601 Bytes
91dd789
b2f4773
 
 
 
 
91dd789
b2f4773
91dd789
b2f4773
91dd789
b2f4773
91dd789
b2f4773
91dd789
 
 
 
 
 
 
 
 
 
 
 
 
bf11651
91dd789
b2f4773
91dd789
b2f4773
91dd789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2f4773
91dd789
 
 
 
 
 
b2f4773
91dd789
 
 
 
b2f4773
91dd789
 
 
 
 
 
 
 
 
 
 
4f30987
91dd789
b2f4773
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import gradio as gr
from unsloth import FastLanguageModel
from transformers import TextStreamer

# Load the fine-tuned model and tokenizer
# model, tokenizer = FastLanguageModel.from_pretrained("lora_model")
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer

base_model = AutoModelForCausalLM.from_pretrained("unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit")
model = PeftModel.from_pretrained(base_model, "DarkAngel/gitallama")
tokenizer = AutoTokenizer.from_pretrained("unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit")

tokenizer = AutoTokenizer.from_pretrained("unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit")
def generate_response(shloka, transliteration):
    """
    Generates the response using the fine-tuned LLaMA model.
    """
    input_message = [
        {
            "role": "user",
            "content": f"Shloka: {shloka} Transliteration: {transliteration}"
        }
    ]
    inputs = tokenizer.apply_chat_template(
        input_message,
        tokenize=True,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to("cpu")

    # Generate response
    text_streamer = TextStreamer(tokenizer, skip_prompt=True)
    generated_tokens = model.generate(
        input_ids=inputs,
        streamer=text_streamer,
        max_new_tokens=512,
        use_cache=True,
        temperature=1.5,
        min_p=0.1
    )

    raw_response = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
    try:
        sections = raw_response.split("Hindi Meaning:")
        english_meaning = sections[0].strip()
        hindi_and_word = sections[1].split("Word Meaning:")
        hindi_meaning = hindi_and_word[0].strip()
        word_meaning = hindi_and_word[1].strip()


        formatted_response = (
            f"English Meaning:\n{english_meaning}\n\n"
            f"Hindi Meaning:\n{hindi_meaning}\n\n"
            f"Word Meaning:\n{word_meaning}"
        )
    except IndexError:
        
        formatted_response = raw_response

    return formatted_response


interface = gr.Interface(
    fn=generate_response,
    inputs=[
        gr.Textbox(label="Enter Shloka", placeholder="Type or paste a Shloka here"),
        gr.Textbox(label="Enter Transliteration", placeholder="Type or paste the transliteration here")
    ],
    outputs=gr.Textbox(label="Generated Response"),
    title="Bhagavad Gita LLaMA Model",
    description="Input a Shloka with its transliteration, and this model will provide meanings in English and Hindi along with word meanings."
)

# Launch the interface
if __name__ == "__main__":
    interface.launch()