Spaces:
Runtime error
Runtime error
Promptcleaning
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from langchain.prompts import PromptTemplate
|
|
8 |
from langchain.chains import LLMChain
|
9 |
|
10 |
# Load pre-processed vector data from .pkl and .faiss files
|
11 |
-
VECTOR_STORE_DIR = "
|
12 |
PICKLE_PATH = os.path.join(VECTOR_STORE_DIR, "index.pkl")
|
13 |
|
14 |
# Load documents
|
@@ -20,8 +20,6 @@ else:
|
|
20 |
|
21 |
# Initialize embeddings
|
22 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
23 |
-
SEC_KEY = os.getenv("SEC_KEY")
|
24 |
-
|
25 |
|
26 |
# Load or create FAISS vector store
|
27 |
FAISS_INDEX_PATH = os.path.join(VECTOR_STORE_DIR, "index.faiss")
|
@@ -35,6 +33,15 @@ else:
|
|
35 |
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
|
36 |
|
37 |
# Define the prompt template
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
prompt_template = """
|
39 |
You are an AI assistant that helps answer questions based on the following context:
|
40 |
|
@@ -44,6 +51,7 @@ Question: {question}
|
|
44 |
Answer:
|
45 |
"""
|
46 |
|
|
|
47 |
prompt = PromptTemplate(
|
48 |
input_variables=["context", "question"],
|
49 |
template=prompt_template
|
@@ -51,6 +59,7 @@ prompt = PromptTemplate(
|
|
51 |
|
52 |
# Initialize LLM model
|
53 |
REPO_ID = "mistralai/Mistral-7B-Instruct-v0.2"
|
|
|
54 |
|
55 |
model = HuggingFaceHub(
|
56 |
repo_id=REPO_ID,
|
@@ -62,16 +71,25 @@ model = HuggingFaceHub(
|
|
62 |
rag_chain = LLMChain(llm=model, prompt=prompt)
|
63 |
|
64 |
def answer_question(question):
|
65 |
-
"""Retrieve relevant documents and generate an answer using the RAG chain."""
|
66 |
docs = retriever.get_relevant_documents(question)
|
67 |
context = "\n".join([doc.page_content for doc in docs])
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
# Define the Gradio function
|
72 |
def ask_model(history, question):
|
|
|
|
|
73 |
response = answer_question(question)
|
74 |
-
history.append((question, response))
|
75 |
return history
|
76 |
|
77 |
# Footer content
|
|
|
8 |
from langchain.chains import LLMChain
|
9 |
|
10 |
# Load pre-processed vector data from .pkl and .faiss files
|
11 |
+
VECTOR_STORE_DIR = r"C:\Users\cshas\Downloads\Banking_Regulations_Compliance_ChatBOT\faiss"
|
12 |
PICKLE_PATH = os.path.join(VECTOR_STORE_DIR, "index.pkl")
|
13 |
|
14 |
# Load documents
|
|
|
20 |
|
21 |
# Initialize embeddings
|
22 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
|
|
|
|
23 |
|
24 |
# Load or create FAISS vector store
|
25 |
FAISS_INDEX_PATH = os.path.join(VECTOR_STORE_DIR, "index.faiss")
|
|
|
33 |
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
|
34 |
|
35 |
# Define the prompt template
|
36 |
+
# prompt_template = """
|
37 |
+
# You are an AI assistant that helps answer questions based on the following context:
|
38 |
+
|
39 |
+
# {context}
|
40 |
+
|
41 |
+
# Question: {question}
|
42 |
+
# Answer:
|
43 |
+
# """
|
44 |
+
|
45 |
prompt_template = """
|
46 |
You are an AI assistant that helps answer questions based on the following context:
|
47 |
|
|
|
51 |
Answer:
|
52 |
"""
|
53 |
|
54 |
+
|
55 |
prompt = PromptTemplate(
|
56 |
input_variables=["context", "question"],
|
57 |
template=prompt_template
|
|
|
59 |
|
60 |
# Initialize LLM model
|
61 |
REPO_ID = "mistralai/Mistral-7B-Instruct-v0.2"
|
62 |
+
SEC_KEY = os.getenv("SEC_KEY")# Replace with your HuggingFace API key
|
63 |
|
64 |
model = HuggingFaceHub(
|
65 |
repo_id=REPO_ID,
|
|
|
71 |
rag_chain = LLMChain(llm=model, prompt=prompt)
|
72 |
|
73 |
def answer_question(question):
|
|
|
74 |
docs = retriever.get_relevant_documents(question)
|
75 |
context = "\n".join([doc.page_content for doc in docs])
|
76 |
+
raw_output = rag_chain.run({"context": context, "question": question})
|
77 |
+
|
78 |
+
# Attempt to isolate only the answer
|
79 |
+
if "Answer:" in raw_output:
|
80 |
+
final_answer = raw_output.split("Answer:")[-1].strip()
|
81 |
+
else:
|
82 |
+
# If 'Answer:' is not found for some reason, just strip leading/trailing whitespace
|
83 |
+
final_answer = raw_output.strip()
|
84 |
+
|
85 |
+
return final_answer
|
86 |
|
87 |
+
# Define the Gradio function
|
88 |
def ask_model(history, question):
|
89 |
+
# Simulated response (Replace this with your actual model logic)
|
90 |
+
# response = f"AI Response to: '{question}'"
|
91 |
response = answer_question(question)
|
92 |
+
history.append((question, response)) # Append user input and bot response as a tuple
|
93 |
return history
|
94 |
|
95 |
# Footer content
|