File size: 4,621 Bytes
f9ea741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import os
import gradio as gr
from langchain_core.prompts import PromptTemplate
from langchain_community.document_loaders import PyPDFLoader
from langchain_google_genai import ChatGoogleGenerativeAI
import google.generativeai as genai
from langchain.chains.question_answering import load_qa_chain
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from PIL import Image
import io
from threading import Thread
from transformers import TextIteratorStreamer

# Configure Gemini API
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))

# Load OpenELM model
checkpoint = "apple/OpenELM-270M"
checkpoint_tok = "meta-llama/Llama-2-7b-hf"
tokenizer = AutoTokenizer.from_pretrained(checkpoint_tok)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
low_cpu_mem_usage = True if torch.cuda.is_available() else False

model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch_dtype, trust_remote_code=True, low_cpu_mem_usage=low_cpu_mem_usage)
model.to(device)

# Adjust tokenizer settings
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
    tokenizer.pad_token_id = tokenizer.eos_token_id

# Define other settings
max_new_tokens = 250
repetition_penalty = 1.4
rtl = False

# Function to process PDF using Gemini API
def process_pdf(file_path, question):
    model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
    prompt_template = """Answer the question as precise as possible using the provided context. If the answer is not contained in the context, say "answer not available in context" \n\n Context: \n {context}?\n Question: \n {question} \n Answer: """
    prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
    
    pdf_loader = PyPDFLoader(file_path)
    pages = pdf_loader.load_and_split()
    context = "\n".join(str(page.page_content) for page in pages[:200])
    stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
    stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
    return stuff_answer['output_text']

# Function to process images using Gemini API
def process_image(image, question):
    model = genai.GenerativeModel('gemini-pro-vision')
    response = model.generate_content([image, question])
    return response.text

# Function to generate follow-up using OpenELM model
def generate_openelm_followup(answer):
    prompt = f"Based on this answer: {answer}\nGenerate a follow-up question:"
    inputs = tokenizer([prompt], return_tensors='pt').input_ids.to(model.device)

    # Streaming output using TextIteratorStreamer
    decode_kwargs = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)
    streamer = TextIteratorStreamer(tokenizer, timeout=5., decode_kwargs=decode_kwargs)

    generation_kwargs = dict(input_ids=inputs, streamer=streamer, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty)
    thread = Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    followup = ""
    for new_text in streamer:
        if new_text:
            followup += new_text.replace(tokenizer.pad_token, "").replace(tokenizer.bos_token, "")
    
    return followup

# Function to process input and generate output
def process_input(file, image, question):
    try:
        if file is not None:
            gemini_answer = process_pdf(file.name, question)
        elif image is not None:
            gemini_answer = process_image(image, question)
        else:
            return "Please upload a PDF file or an image."

        openelm_followup = generate_openelm_followup(gemini_answer)
        combined_output = f"Gemini Answer: {gemini_answer}\n\nOpenELM Follow-up: {openelm_followup}"
        return combined_output
    except Exception as e:
        return f"An error occurred: {str(e)}"

# Define Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# Multi-modal RAG Knowledge Retrieval using Gemini API and OpenELM Model")
    
    with gr.Row():
        with gr.Column():
            input_file = gr.File(label="Upload PDF File")
            input_image = gr.Image(type="pil", label="Upload Image")
        input_question = gr.Textbox(label="Ask about the document or image")
    
    output_text = gr.Textbox(label="Answer - Combined Gemini and OpenELM")
    
    submit_button = gr.Button("Submit")
    submit_button.click(fn=process_input, inputs=[input_file, input_image, input_question], outputs=output_text)

demo.launch()