Updated code
Browse files
app.py
CHANGED
@@ -214,13 +214,9 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
|
|
214 |
for item in response:
|
215 |
full_response += item
|
216 |
placeholder.markdown(full_response)
|
217 |
-
# The following logic will work in the way given below.
|
218 |
-
# -- Check if intermediary steps are present in the output of the given prompt.
|
219 |
-
# -- If not, we can conclude that, agent has used internet search as tool.
|
220 |
-
# -- Check if intermediary steps are present in the output of the prompt.
|
221 |
-
# -- If intermediary steps are present, it means agent has used exising custom knowledge base for iformation retrival and therefore we need to give souce docs as output along with LLM's reponse.
|
222 |
if response:
|
223 |
st.text("-------------------------------------")
|
|
|
224 |
docs= st.session_state["ensemble_retriver"].get_relevant_documents(prompt)
|
225 |
source_doc_list= []
|
226 |
for doc in docs:
|
@@ -237,6 +233,6 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
|
|
237 |
st.write("---") # Add a separator between entries
|
238 |
message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
|
239 |
st.session_state.messages.append(message)
|
240 |
-
|
241 |
end = timeit.default_timer()
|
242 |
print(f"Time to retrieve response: {end - start}")
|
|
|
214 |
for item in response:
|
215 |
full_response += item
|
216 |
placeholder.markdown(full_response)
|
|
|
|
|
|
|
|
|
|
|
217 |
if response:
|
218 |
st.text("-------------------------------------")
|
219 |
+
#Getting source docs
|
220 |
docs= st.session_state["ensemble_retriver"].get_relevant_documents(prompt)
|
221 |
source_doc_list= []
|
222 |
for doc in docs:
|
|
|
233 |
st.write("---") # Add a separator between entries
|
234 |
message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
|
235 |
st.session_state.messages.append(message)
|
236 |
+
|
237 |
end = timeit.default_timer()
|
238 |
print(f"Time to retrieve response: {end - start}")
|
utils.py
CHANGED
@@ -251,8 +251,8 @@ def load_text_chunks(text_chunks_pkl_dir):
|
|
251 |
def load_ensemble_retriver(text_chunks, embeddings, chroma_vectorstore):
|
252 |
"""Load ensemble retiriever with BM25 and Chroma as individual retrievers"""
|
253 |
bm25_retriever = BM25Retriever.from_documents(text_chunks)
|
254 |
-
bm25_retriever.k =
|
255 |
-
chroma_retriever = chroma_vectorstore.as_retriever(search_kwargs={"k":
|
256 |
ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, chroma_retriever], weights=[0.3, 0.7])
|
257 |
return ensemble_retriever
|
258 |
|
|
|
251 |
def load_ensemble_retriver(text_chunks, embeddings, chroma_vectorstore):
|
252 |
"""Load ensemble retiriever with BM25 and Chroma as individual retrievers"""
|
253 |
bm25_retriever = BM25Retriever.from_documents(text_chunks)
|
254 |
+
bm25_retriever.k = 2
|
255 |
+
chroma_retriever = chroma_vectorstore.as_retriever(search_kwargs={"k": 2})
|
256 |
ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, chroma_retriever], weights=[0.3, 0.7])
|
257 |
return ensemble_retriever
|
258 |
|