Spaces:
Runtime error
Runtime error
JoshuaKelleyDs
commited on
Commit
•
f3bec34
1
Parent(s):
6257fb8
Update app.py
Browse files
app.py
CHANGED
@@ -107,10 +107,10 @@ async def start():
|
|
107 |
youtube_docs = await create_youtube_transcription(youtube_link['content']) # create the youtube transcription
|
108 |
transcription = youtube_docs[0].page_content # get the transcription of the first document
|
109 |
await cl.Message(content=f"youtube docs: {transcription}").send() # display the transcription of the first document to show that we have the correct data
|
110 |
-
split_docs = create_text_splitter(youtube_docs) # split the documents into chunks
|
111 |
-
vector_db = create_faiss_vector_store(split_docs) # create the vector db
|
112 |
bm25 = create_bm25_retreiver(split_docs) # create the BM25 retreiver
|
113 |
-
ensemble_retriever =
|
114 |
cl.user_session.set("ensemble_retriever", ensemble_retriever) # store the ensemble retriever in the user session for our on message function
|
115 |
except Exception as e:
|
116 |
await cl.Message(content=f"failed to load model: {e}").send() # display the error if we failed to load the model
|
@@ -132,7 +132,8 @@ async def message(message: cl.Message):
|
|
132 |
cl.Message(content=f"Displaying Relevant Docs").send() # we display the relevant documents to the user
|
133 |
for doc in relevant_docs: # loop through the relevant documents and display each one!
|
134 |
await cl.Message(content=doc.page_content).send()
|
135 |
-
rag_chain = RunnableSequence({"context": ensemble_retriever, "question": RunnablePassthrough()} | prompt_template | llm)
|
136 |
-
response = rag_chain.invoke(message.content) # we invoke the rag chain with the user's message
|
137 |
await cl.Message(content="Done Displaying Relevant Docs").send()
|
|
|
|
|
|
|
138 |
await cl.Message(content=f"LLM Response: {response.content}").send() # we display the response to the user
|
|
|
107 |
youtube_docs = await create_youtube_transcription(youtube_link['content']) # create the youtube transcription
|
108 |
transcription = youtube_docs[0].page_content # get the transcription of the first document
|
109 |
await cl.Message(content=f"youtube docs: {transcription}").send() # display the transcription of the first document to show that we have the correct data
|
110 |
+
split_docs = await create_text_splitter(youtube_docs) # split the documents into chunks
|
111 |
+
vector_db = await create_faiss_vector_store(split_docs) # create the vector db
|
112 |
bm25 = create_bm25_retreiver(split_docs) # create the BM25 retreiver
|
113 |
+
ensemble_retriever = create_ensemble_retriever(vector_db, bm25) # create the ensemble retriever
|
114 |
cl.user_session.set("ensemble_retriever", ensemble_retriever) # store the ensemble retriever in the user session for our on message function
|
115 |
except Exception as e:
|
116 |
await cl.Message(content=f"failed to load model: {e}").send() # display the error if we failed to load the model
|
|
|
132 |
cl.Message(content=f"Displaying Relevant Docs").send() # we display the relevant documents to the user
|
133 |
for doc in relevant_docs: # loop through the relevant documents and display each one!
|
134 |
await cl.Message(content=doc.page_content).send()
|
|
|
|
|
135 |
await cl.Message(content="Done Displaying Relevant Docs").send()
|
136 |
+
# question -> retrieve relevant docs -> format the question and context and add it to the prompt template -> pass to LLM
|
137 |
+
rag_chain = RunnableSequence({"context": ensemble_retriever, "question": RunnablePassthrough()} | prompt_template | llm)
|
138 |
+
response = rag_chain.invoke(message.content) # we invoke the rag chain with the user's message
|
139 |
await cl.Message(content=f"LLM Response: {response.content}").send() # we display the response to the user
|