Update app.py
Browse files
app.py
CHANGED
@@ -107,11 +107,10 @@ def query_hybrid_search(query: str, client: QdrantClient, collection_name: str,
|
|
107 |
score_threshold=0.9
|
108 |
)
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
"""Generate a short, single-sentence summary of the user's intent or topic based on their question, capturing the main focus of what they want to discuss.
|
113 |
|
114 |
-
Question : {
|
115 |
"""
|
116 |
|
117 |
@outlines.prompt
|
@@ -158,28 +157,28 @@ def open_query_prompt(past_messages: str, query: str):
|
|
158 |
|
159 |
@outlines.prompt
|
160 |
def route_llm(context: str, query: str):
|
161 |
-
"""
|
162 |
|
163 |
-
Context: {{ context }}
|
164 |
|
165 |
-
|
166 |
"""
|
167 |
|
168 |
@outlines.prompt
|
169 |
def answer_with_context(context: str, query: str):
|
170 |
-
"""
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
Answer:
|
177 |
"""
|
178 |
|
179 |
@outlines.prompt
|
180 |
def idk(query: str):
|
181 |
-
"""
|
182 |
-
|
183 |
Question: {{ query }}
|
184 |
"""
|
185 |
|
@@ -502,7 +501,7 @@ def load_models_and_documents():
|
|
502 |
time.sleep(5)
|
503 |
container.empty()
|
504 |
|
505 |
-
return client, collection_name, model, dense_model, sparse_model, nlp, conn, cursor
|
506 |
|
507 |
def chunk_documents(texts: List[str], metadatas: List[dict], dense_model: OptimumEncoder, sparse_model: SparseTextEmbedding):
|
508 |
text_splitter = SemanticChunker(
|
@@ -549,7 +548,7 @@ def on_change_documents_only():
|
|
549 |
if __name__ == '__main__':
|
550 |
st.set_page_config(page_title="Multipurpose AI Agent",layout="wide", initial_sidebar_state='auto')
|
551 |
|
552 |
-
client, collection_name, llm, dense_model, sparse_model, nlp, conn, cursor = load_models_and_documents()
|
553 |
|
554 |
styles = {
|
555 |
"nav": {
|
@@ -728,17 +727,13 @@ if __name__ == '__main__':
|
|
728 |
)
|
729 |
|
730 |
def generate_conv_title(llm):
|
731 |
-
if st.session_state.chat_id == 'New Conversation':
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
-
output = gen_conv(
|
736 |
-
prompt,
|
737 |
-
max_tokens=10,
|
738 |
-
sampling_params=SamplingParams(temperature=0.3)
|
739 |
)
|
740 |
-
print(f'OUTPUT : {output}')
|
741 |
-
st.session_state.chat_id = output
|
742 |
st.session_state.messages = []
|
743 |
|
744 |
torch.cuda.empty_cache()
|
@@ -761,7 +756,7 @@ if __name__ == '__main__':
|
|
761 |
st.chat_message("user").markdown(prompt)
|
762 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
763 |
|
764 |
-
ai_response = main(prompt, client, collection_name,
|
765 |
with st.chat_message("assistant"):
|
766 |
message_placeholder = st.empty()
|
767 |
full_response = ""
|
|
|
107 |
score_threshold=0.9
|
108 |
)
|
109 |
|
110 |
+
def build_prompt_conv():
|
111 |
+
return f"""Generate a short, single-sentence summary of the user's intent or topic based on their question, capturing the main focus of what they want to discuss.
|
|
|
112 |
|
113 |
+
Question : {st.session_state.user_input}
|
114 |
"""
|
115 |
|
116 |
@outlines.prompt
|
|
|
157 |
|
158 |
@outlines.prompt
|
159 |
def route_llm(context: str, query: str):
|
160 |
+
"""Based on the given context and user query, decide if the context is relevant. Return 'Yes' for relevant and 'No' for irrelevant.
|
161 |
|
162 |
+
Context : {{ context }}
|
163 |
|
164 |
+
Query: {{ query }}
|
165 |
"""
|
166 |
|
167 |
@outlines.prompt
|
168 |
def answer_with_context(context: str, query: str):
|
169 |
+
"""Context information is below.
|
170 |
+
---------------------
|
171 |
+
{context}
|
172 |
+
---------------------
|
173 |
+
Given the context information and not prior knowledge, answer the query.
|
174 |
+
Query: {query}
|
175 |
Answer:
|
176 |
"""
|
177 |
|
178 |
@outlines.prompt
|
179 |
def idk(query: str):
|
180 |
+
"""When you encounter a question that falls outside your knowledge or expertise, respond in a way that politely conveys you don't have the information needed to answer.
|
181 |
+
|
182 |
Question: {{ query }}
|
183 |
"""
|
184 |
|
|
|
501 |
time.sleep(5)
|
502 |
container.empty()
|
503 |
|
504 |
+
return client, collection_name, llm, model, dense_model, sparse_model, nlp, conn, cursor
|
505 |
|
506 |
def chunk_documents(texts: List[str], metadatas: List[dict], dense_model: OptimumEncoder, sparse_model: SparseTextEmbedding):
|
507 |
text_splitter = SemanticChunker(
|
|
|
548 |
if __name__ == '__main__':
|
549 |
st.set_page_config(page_title="Multipurpose AI Agent",layout="wide", initial_sidebar_state='auto')
|
550 |
|
551 |
+
client, collection_name, llm, model, dense_model, sparse_model, nlp, conn, cursor = load_models_and_documents()
|
552 |
|
553 |
styles = {
|
554 |
"nav": {
|
|
|
727 |
)
|
728 |
|
729 |
def generate_conv_title(llm):
|
730 |
+
if st.session_state.chat_id == 'New Conversation':
|
731 |
+
output = llm.generate(
|
732 |
+
build_prompt_conv(),
|
733 |
+
SamplingParams(temperature=0.3, max_tokens=10)
|
|
|
|
|
|
|
|
|
734 |
)
|
735 |
+
print(f'OUTPUT : {output[0].outputs[0].text}')
|
736 |
+
st.session_state.chat_id = output[0].outputs[0].text
|
737 |
st.session_state.messages = []
|
738 |
|
739 |
torch.cuda.empty_cache()
|
|
|
756 |
st.chat_message("user").markdown(prompt)
|
757 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
758 |
|
759 |
+
ai_response = main(prompt, client, collection_name, model, dense_model, sparse_model, "\n".join([f'{msg["role"]}: {msg["content"]}' for msg in st.session_state.messages]))
|
760 |
with st.chat_message("assistant"):
|
761 |
message_placeholder = st.empty()
|
762 |
full_response = ""
|