Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -232,7 +232,7 @@ def main(query: str, client: QdrantClient, collection_name: str, llm, dense_mode
|
|
232 |
result_metadatas = "\n\n".join(f'{value}' for value in filtered_metadatas)
|
233 |
|
234 |
prompt = answer_with_context(context, query)
|
235 |
-
answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
|
236 |
answer = f"{answer}\n\n\nSource(s) :\n\n{result_metadatas}"
|
237 |
|
238 |
if not st.session_state.documents_only:
|
@@ -244,15 +244,15 @@ def main(query: str, client: QdrantClient, collection_name: str, llm, dense_mode
|
|
244 |
print(f'Choice 2: {action}')
|
245 |
if action == 'General Question':
|
246 |
prompt = open_query_prompt(past_messages, query)
|
247 |
-
answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
|
248 |
else:
|
249 |
if st.session_state.documents_only:
|
250 |
prompt = idk(query)
|
251 |
-
answer = gen_text(prompt, max_tokens=128, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
|
252 |
print(f'TYPE: {type(answer)}')
|
253 |
else:
|
254 |
prompt = self_knowledge(query)
|
255 |
-
answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
|
256 |
answer = f'Internal Knowledge :\n\n{answer}'
|
257 |
|
258 |
torch.cuda.empty_cache()
|
@@ -745,7 +745,7 @@ if __name__ == '__main__':
|
|
745 |
if st.session_state.chat_id == 'New Conversation':
|
746 |
output = llm.chat(
|
747 |
build_prompt_conv(),
|
748 |
-
SamplingParams(temperature=0.6,top_p=0.9, max_tokens=10)
|
749 |
)
|
750 |
print(f'OUTPUT : {output[0].outputs[0].text}')
|
751 |
st.session_state.chat_id = output[0].outputs[0].text.replace('"', '')
|
|
|
232 |
result_metadatas = "\n\n".join(f'{value}' for value in filtered_metadatas)
|
233 |
|
234 |
prompt = answer_with_context(context, query)
|
235 |
+
answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10))
|
236 |
answer = f"{answer}\n\n\nSource(s) :\n\n{result_metadatas}"
|
237 |
|
238 |
if not st.session_state.documents_only:
|
|
|
244 |
print(f'Choice 2: {action}')
|
245 |
if action == 'General Question':
|
246 |
prompt = open_query_prompt(past_messages, query)
|
247 |
+
answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10))
|
248 |
else:
|
249 |
if st.session_state.documents_only:
|
250 |
prompt = idk(query)
|
251 |
+
answer = gen_text(prompt, max_tokens=128, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10))
|
252 |
print(f'TYPE: {type(answer)}')
|
253 |
else:
|
254 |
prompt = self_knowledge(query)
|
255 |
+
answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10))
|
256 |
answer = f'Internal Knowledge :\n\n{answer}'
|
257 |
|
258 |
torch.cuda.empty_cache()
|
|
|
745 |
if st.session_state.chat_id == 'New Conversation':
|
746 |
output = llm.chat(
|
747 |
build_prompt_conv(),
|
748 |
+
SamplingParams(temperature=0.6,top_p=0.9, max_tokens=10, top_k=10)
|
749 |
)
|
750 |
print(f'OUTPUT : {output[0].outputs[0].text}')
|
751 |
st.session_state.chat_id = output[0].outputs[0].text.replace('"', '')
|