devve1 commited on
Commit
8afc070
1 Parent(s): cfa4332

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -215,6 +215,7 @@ def main(query: str, client: QdrantClient, collection_name: str, llm, dense_mode
215
 
216
  regex = build_regex_from_schema(schema, r"[\n ]?")
217
  gen_text = outlines.generate.regex(llm, regex)
 
218
 
219
  gen_choice = outlines.generate.choice(llm, choices=['Yes', 'No'])
220
  prompt = route_llm(context, query)
@@ -231,7 +232,7 @@ def main(query: str, client: QdrantClient, collection_name: str, llm, dense_mode
231
  result_metadatas = "\n\n".join(f'{value}' for value in filtered_metadatas)
232
 
233
  prompt = answer_with_context(context, query)
234
- answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))['answer']
235
  answer = f"{answer}\n\n\nSource(s) :\n\n{result_metadatas}"
236
 
237
  if not st.session_state.documents_only:
@@ -243,14 +244,15 @@ def main(query: str, client: QdrantClient, collection_name: str, llm, dense_mode
243
  print(f'Choice 2: {action}')
244
  if action == 'General Question':
245
  prompt = open_query_prompt(past_messages, query)
246
- answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))['answer']
247
  else:
248
  if st.session_state.documents_only:
249
  prompt = idk(query)
250
- answer = gen_text(prompt, max_tokens=128, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))['answer']
 
251
  else:
252
  prompt = self_knowledge(query)
253
- answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))['answer']
254
  answer = f'Internal Knowledge :\n\n{answer}'
255
 
256
  torch.cuda.empty_cache()
 
215
 
216
  regex = build_regex_from_schema(schema, r"[\n ]?")
217
  gen_text = outlines.generate.regex(llm, regex)
218
+ gen_text.format_sequence = lambda x: schema_object.parse_raw(x)
219
 
220
  gen_choice = outlines.generate.choice(llm, choices=['Yes', 'No'])
221
  prompt = route_llm(context, query)
 
232
  result_metadatas = "\n\n".join(f'{value}' for value in filtered_metadatas)
233
 
234
  prompt = answer_with_context(context, query)
235
+ answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
236
  answer = f"{answer}\n\n\nSource(s) :\n\n{result_metadatas}"
237
 
238
  if not st.session_state.documents_only:
 
244
  print(f'Choice 2: {action}')
245
  if action == 'General Question':
246
  prompt = open_query_prompt(past_messages, query)
247
+ answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
248
  else:
249
  if st.session_state.documents_only:
250
  prompt = idk(query)
251
+ answer = gen_text(prompt, max_tokens=128, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
252
+ print(f'TYPE: {type(answer)})
253
  else:
254
  prompt = self_knowledge(query)
255
+ answer = gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9))
256
  answer = f'Internal Knowledge :\n\n{answer}'
257
 
258
  torch.cuda.empty_cache()