Update question.py
Browse files- question.py +1 -4
question.py
CHANGED
@@ -66,9 +66,6 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
|
|
66 |
# print(st.session_state['max_tokens'])
|
67 |
endpoint_url = ("https://api-inference.huggingface.co/models/"+ model)
|
68 |
model_kwargs = {"temperature" : st.session_state['temperature'],
|
69 |
-
"min_p" : 0.1,
|
70 |
-
"top_p" : 0.9,
|
71 |
-
"repetition_penalty" : 1.5,
|
72 |
"max_new_tokens" : st.session_state['max_tokens'],
|
73 |
"return_full_text" : False}
|
74 |
hf = HuggingFaceEndpoint(
|
@@ -77,7 +74,7 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
|
|
77 |
huggingfacehub_api_token=hf_api_key,
|
78 |
model_kwargs=model_kwargs
|
79 |
)
|
80 |
-
qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(search_kwargs={"score_threshold": 0.
|
81 |
|
82 |
st.session_state['chat_history'].append(("You", question))
|
83 |
|
|
|
66 |
# print(st.session_state['max_tokens'])
|
67 |
endpoint_url = ("https://api-inference.huggingface.co/models/"+ model)
|
68 |
model_kwargs = {"temperature" : st.session_state['temperature'],
|
|
|
|
|
|
|
69 |
"max_new_tokens" : st.session_state['max_tokens'],
|
70 |
"return_full_text" : False}
|
71 |
hf = HuggingFaceEndpoint(
|
|
|
74 |
huggingfacehub_api_token=hf_api_key,
|
75 |
model_kwargs=model_kwargs
|
76 |
)
|
77 |
+
qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(search_kwargs={"score_threshold": 0.6, "k": 3,"filter": {"user": st.session_state["username"]}}), memory=memory, verbose=True, return_source_documents=True)
|
78 |
|
79 |
st.session_state['chat_history'].append(("You", question))
|
80 |
|