|
import streamlit as st |
|
from langchain_chroma import Chroma |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from langchain_google_genai import GoogleGenerativeAI |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
|
|
|
|
GOOGLE_API_KEY = "AIzaSyB-7cKMdUpA5kTccpNxd72IT5CjeSgSmkc" |
|
CHROMA_DB_DIR = "./chroma_db_" |
|
MODEL_NAME = "flax-sentence-embeddings/all_datasets_v4_MiniLM-L6" |
|
|
|
|
|
embeddings_model = HuggingFaceEmbeddings(model_name=MODEL_NAME) |
|
|
|
|
|
db = Chroma(collection_name="vector_database", |
|
embedding_function=embeddings_model, |
|
persist_directory=CHROMA_DB_DIR) |
|
|
|
|
|
genai_model = GoogleGenerativeAI(api_key=GOOGLE_API_KEY, model="gemini-1.5-flash") |
|
|
|
|
|
st.title("Question Answering with ChromaDB and Google GenAI") |
|
st.write("Ask a question based on the context stored in the database.") |
|
|
|
|
|
query = st.text_input("Enter your question:") |
|
|
|
if query: |
|
with st.spinner("Retrieving context and generating an answer..."): |
|
|
|
docs_chroma = db.similarity_search_with_score(query, k=4) |
|
context_text = "\n\n".join([doc.page_content for doc, _score in docs_chroma]) |
|
|
|
|
|
PROMPT_TEMPLATE = """ |
|
Answer the question based only on the following context: |
|
{context} |
|
Answer the question based on the above context: {question}. |
|
Provide a detailed answer. |
|
Don’t justify your answers. |
|
Don’t give information not mentioned in the CONTEXT INFORMATION. |
|
Do not say "according to the context" or "mentioned in the context" or similar. |
|
""" |
|
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE) |
|
prompt = prompt_template.format(context=context_text, question=query) |
|
|
|
response_text = genai_model.invoke(prompt) |
|
|
|
|
|
st.subheader("Answer:") |
|
st.write(response_text) |