Spaces:
Build error
Build error
Initial Draft
Browse files
app.py
CHANGED
@@ -13,6 +13,13 @@ from langchain.prompts.prompt import PromptTemplate
|
|
13 |
from langchain_community.llms import LlamaCpp
|
14 |
from langchain.chains import RetrievalQA
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
# Upload pdf file into 'pdf-data' folder if it does not exist
|
17 |
def fn_upload_pdf(mv_pdf_input_file, mv_processing_message):
|
18 |
"""Upload pdf file into 'pdf-data' folder if it does not exist"""
|
@@ -229,6 +236,57 @@ def fn_generate_QnA_response(mv_selected_model, mv_user_question, lv_vector_stor
|
|
229 |
|
230 |
return lv_llm_response
|
231 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
# Main Function
|
233 |
def main():
|
234 |
|
@@ -248,7 +306,8 @@ def main():
|
|
248 |
[
|
249 |
'microsoft/phi-2',
|
250 |
'google/gemma-2b',
|
251 |
-
'mistralai/Mistral-7B-Instruct-v0.2'
|
|
|
252 |
]
|
253 |
)
|
254 |
|
@@ -268,7 +327,10 @@ def main():
|
|
268 |
st.text("")
|
269 |
|
270 |
# -- Downloading Model Files
|
271 |
-
|
|
|
|
|
|
|
272 |
|
273 |
# -- Processing PDF
|
274 |
if (mv_pdf_input_file is not None):
|
@@ -290,8 +352,11 @@ def main():
|
|
290 |
st.session_state.messages.append({"role": "user", "content": mv_user_question})
|
291 |
|
292 |
# -- Generating LLM response
|
293 |
-
|
294 |
-
|
|
|
|
|
|
|
295 |
# -- Adding assistant response to chat history
|
296 |
st.session_state.messages.append({"role": "assistant", "content": lv_response})
|
297 |
|
|
|
13 |
from langchain_community.llms import LlamaCpp
|
14 |
from langchain.chains import RetrievalQA
|
15 |
|
16 |
+
from dotenv import load_dotenv
|
17 |
+
import google.generativeai as genai
|
18 |
+
|
19 |
+
# Loading Google Gemini
|
20 |
+
load_dotenv()
|
21 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
22 |
+
|
23 |
# Upload pdf file into 'pdf-data' folder if it does not exist
|
24 |
def fn_upload_pdf(mv_pdf_input_file, mv_processing_message):
|
25 |
"""Upload pdf file into 'pdf-data' folder if it does not exist"""
|
|
|
236 |
|
237 |
return lv_llm_response
|
238 |
|
239 |
+
# Function return API based QA Response using Vector Store
|
240 |
+
def fn_generate_API_QnA_response(mv_selected_model, mv_user_question, lv_vector_store, mv_processing_message):
|
241 |
+
"""Returns QA Response using Vector Store"""
|
242 |
+
|
243 |
+
lv_template = """Instruction:
|
244 |
+
You are an AI assistant for answering questions about the provided context.
|
245 |
+
You are given the following extracted parts of a long document and a question. Provide a detailed answer.
|
246 |
+
If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
|
247 |
+
=======
|
248 |
+
{context}
|
249 |
+
=======
|
250 |
+
Question: {question}
|
251 |
+
Output:\n"""
|
252 |
+
lv_qa_prompt = PromptTemplate(
|
253 |
+
template=lv_template,
|
254 |
+
input_variables=["question", "context"]
|
255 |
+
)
|
256 |
+
|
257 |
+
lv_vector_search_result = lv_vector_store.similarity_search(mv_user_question, k=2)
|
258 |
+
# print("Vector Search Result - ")
|
259 |
+
# print(lv_vector_search_result)
|
260 |
+
|
261 |
+
# -- Creating formatted document result
|
262 |
+
lv_document_context = ""
|
263 |
+
lv_count = 0
|
264 |
+
for lv_result in lv_vector_search_result:
|
265 |
+
# print("Concatenating Result of page - " + str(lv_count) + " with content of document page no - "+str(lv_result.metadata["page"]))
|
266 |
+
lv_document_context += lv_result.page_content
|
267 |
+
lv_count += 1
|
268 |
+
|
269 |
+
# print("Formatted Document Search Result - ")
|
270 |
+
# print(lv_document_context)
|
271 |
+
|
272 |
+
lv_qa_formatted_prompt = lv_qa_prompt.format(
|
273 |
+
question=mv_user_question,
|
274 |
+
context=lv_document_context
|
275 |
+
)
|
276 |
+
|
277 |
+
if mv_selected_model == 'Google Gemini-pro':
|
278 |
+
lv_model = genai.GenerativeModel('gemini-pro')
|
279 |
+
|
280 |
+
print("Step4: Generating LLM response")
|
281 |
+
fn_display_user_messages("Step4: Generating LLM response","Info", mv_processing_message)
|
282 |
+
|
283 |
+
lv_llm_response = lv_model.generate_content(lv_qa_prompt)
|
284 |
+
|
285 |
+
print("Step5: LLM response generated")
|
286 |
+
fn_display_user_messages("Step5: LLM response generated","Info", mv_processing_message)
|
287 |
+
|
288 |
+
return lv_llm_response
|
289 |
+
|
290 |
# Main Function
|
291 |
def main():
|
292 |
|
|
|
306 |
[
|
307 |
'microsoft/phi-2',
|
308 |
'google/gemma-2b',
|
309 |
+
'mistralai/Mistral-7B-Instruct-v0.2',
|
310 |
+
'Google Gemini-pro'
|
311 |
]
|
312 |
)
|
313 |
|
|
|
327 |
st.text("")
|
328 |
|
329 |
# -- Downloading Model Files
|
330 |
+
if mv_selected_model != "Google Gemini-pro":
|
331 |
+
fn_download_llm_models(mv_selected_model, mv_processing_message)
|
332 |
+
else:
|
333 |
+
print("Call Google API")
|
334 |
|
335 |
# -- Processing PDF
|
336 |
if (mv_pdf_input_file is not None):
|
|
|
352 |
st.session_state.messages.append({"role": "user", "content": mv_user_question})
|
353 |
|
354 |
# -- Generating LLM response
|
355 |
+
if mv_selected_model != "Google Gemini-pro":
|
356 |
+
lv_response = fn_generate_QnA_response(mv_selected_model, mv_user_question, lv_vector_store, mv_processing_message)
|
357 |
+
else:
|
358 |
+
lv_response = fn_generate_API_QnA_response(mv_selected_model, mv_user_question, lv_vector_store, mv_processing_message)
|
359 |
+
|
360 |
# -- Adding assistant response to chat history
|
361 |
st.session_state.messages.append({"role": "assistant", "content": lv_response})
|
362 |
|