Spaces:
Runtime error
Runtime error
pseudotensor
commited on
Commit
·
4fe0d73
1
Parent(s):
bacf914
Update with h2oGPT hash 6a58e8f0a87c4fc67f2b2bec736cf34f8c1bbc39
Browse files- gen.py +9 -7
- gpt_langchain.py +2 -1
gen.py
CHANGED
@@ -403,13 +403,13 @@ def main(
|
|
403 |
|
404 |
# auto-set langchain_mode
|
405 |
if have_langchain and langchain_mode is None:
|
|
|
|
|
406 |
if allow_upload_to_user_data and not is_public and user_path:
|
407 |
-
langchain_mode
|
408 |
-
print("Auto set langchain_mode=%s" % langchain_mode, flush=True)
|
409 |
elif allow_upload_to_my_data:
|
410 |
-
langchain_mode
|
411 |
-
|
412 |
-
" To use UserData to pull files from disk,"
|
413 |
" set user_path and ensure allow_upload_to_user_data=True" % langchain_mode, flush=True)
|
414 |
else:
|
415 |
raise RuntimeError("Please pass --langchain_mode=<chosen mode> out of %s" % langchain_modes)
|
@@ -1555,7 +1555,8 @@ def evaluate(
|
|
1555 |
where_from = "openai_client"
|
1556 |
|
1557 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
1558 |
-
|
|
|
1559 |
stop_sequences = [x for x in stop_sequences if x]
|
1560 |
# OpenAI will complain if ask for too many new tokens, takes it as min in some sense, wrongly so.
|
1561 |
max_new_tokens_openai = min(max_new_tokens, model_max_length - num_prompt_tokens)
|
@@ -1764,7 +1765,8 @@ def evaluate(
|
|
1764 |
|
1765 |
# prompt must include all human-bot like tokens, already added by prompt
|
1766 |
# https://github.com/huggingface/text-generation-inference/tree/main/clients/python#types
|
1767 |
-
|
|
|
1768 |
stop_sequences = [x for x in stop_sequences if x]
|
1769 |
gen_server_kwargs = dict(do_sample=do_sample,
|
1770 |
max_new_tokens=max_new_tokens,
|
|
|
403 |
|
404 |
# auto-set langchain_mode
|
405 |
if have_langchain and langchain_mode is None:
|
406 |
+
# start in chat mode, in case just want to chat and don't want to get "No documents to query" by default.
|
407 |
+
langchain_mode = LangChainMode.CHAT_LLM.value
|
408 |
if allow_upload_to_user_data and not is_public and user_path:
|
409 |
+
print("Auto set langchain_mode=%s. Could use UserData instead." % langchain_mode, flush=True)
|
|
|
410 |
elif allow_upload_to_my_data:
|
411 |
+
print("Auto set langchain_mode=%s. Could use MyData instead."
|
412 |
+
" To allow UserData to pull files from disk,"
|
|
|
413 |
" set user_path and ensure allow_upload_to_user_data=True" % langchain_mode, flush=True)
|
414 |
else:
|
415 |
raise RuntimeError("Please pass --langchain_mode=<chosen mode> out of %s" % langchain_modes)
|
|
|
1555 |
where_from = "openai_client"
|
1556 |
|
1557 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
1558 |
+
terminate_response = prompter.terminate_response or []
|
1559 |
+
stop_sequences = list(set(terminate_response + [prompter.PreResponse]))
|
1560 |
stop_sequences = [x for x in stop_sequences if x]
|
1561 |
# OpenAI will complain if ask for too many new tokens, takes it as min in some sense, wrongly so.
|
1562 |
max_new_tokens_openai = min(max_new_tokens, model_max_length - num_prompt_tokens)
|
|
|
1765 |
|
1766 |
# prompt must include all human-bot like tokens, already added by prompt
|
1767 |
# https://github.com/huggingface/text-generation-inference/tree/main/clients/python#types
|
1768 |
+
terminate_response = prompter.terminate_response or []
|
1769 |
+
stop_sequences = list(set(terminate_response + [prompter.PreResponse]))
|
1770 |
stop_sequences = [x for x in stop_sequences if x]
|
1771 |
gen_server_kwargs = dict(do_sample=do_sample,
|
1772 |
max_new_tokens=max_new_tokens,
|
gpt_langchain.py
CHANGED
@@ -643,7 +643,8 @@ def get_llm(use_openai_model=False,
|
|
643 |
|
644 |
callbacks = [StreamingGradioCallbackHandler()]
|
645 |
assert prompter is not None
|
646 |
-
|
|
|
647 |
stop_sequences = [x for x in stop_sequences if x]
|
648 |
|
649 |
if gr_client:
|
|
|
643 |
|
644 |
callbacks = [StreamingGradioCallbackHandler()]
|
645 |
assert prompter is not None
|
646 |
+
terminate_response = prompter.terminate_response or []
|
647 |
+
stop_sequences = list(set(terminate_response + [prompter.PreResponse]))
|
648 |
stop_sequences = [x for x in stop_sequences if x]
|
649 |
|
650 |
if gr_client:
|