Spaces:
Sleeping
Sleeping
import os | |
import openai | |
import sys | |
sys.path.append('../..') | |
import utils | |
import panel as pn # GUI | |
pn.extension(template='bootstrap') | |
deployment_name = os.environ['DEPLOYMENT_ID'] | |
def get_completion_from_messages(messages, engine=deployment_name, temperature=0, max_tokens=500): | |
openai.api_key = os.environ['API_KEY'] | |
openai.api_base = os.environ['API_BASE'] | |
openai.api_type = os.environ['API_TYPE'] | |
openai.api_version = os.environ['API_VERSION'] | |
response = openai.ChatCompletion.create( | |
engine=engine, | |
messages=messages, | |
temperature=temperature, | |
max_tokens=max_tokens, | |
) | |
return response.choices[0].message["content"] | |
def get_moderation_from_input(user_input): | |
openai.api_key = os.environ['MOD_API_KEY'] | |
openai.api_base = os.environ['MOD_API_BASE'] | |
openai.api_type = "open_ai" | |
openai.api_version = None | |
response = openai.Moderation.create(input=user_input) | |
return response | |
def process_user_message(user_input, all_messages, debug=True): | |
delimiter = "```" | |
# Step 1: Check input to see if it flags the Moderation API or is a prompt injection | |
response = get_moderation_from_input(user_input) | |
moderation_output = response["results"][0] | |
if moderation_output["flagged"]: | |
print("Step 1: Input flagged by Moderation API.") | |
return "Sorry, we cannot process this request." | |
if debug: print("Step 1: Input passed moderation check.") | |
page_and_qm_response = utils.find_pages_and_qms_only(user_input) | |
#print(print(page_and_qm_response) | |
# Step 2: Extract the list of products | |
page_and_qm_list = utils.read_string_to_list(page_and_qm_response) | |
#print(page_and_qm_list) | |
if debug: print("Step 2: Extracted list of quality markers.") | |
# Step 3: If quality markers are found, look them up | |
qm_information = utils.generate_output_string(page_and_qm_list) | |
if debug: print("Step 3: Looked up quality marker information.") | |
# Step 4: Answer the user question | |
system_message = f""" | |
You are an experienced assistant in the domain of Behaviour Support Plans (BSPs). \ | |
Respond in a friendly and helpful tone, with concise answers. \ | |
Your response MUST be ONLY based on the relevant information provided to you. | |
Make sure to ask the user relevant follow-up questions. | |
""" | |
messages = [ | |
{'role': 'system', 'content': system_message}, | |
{'role': 'user', 'content': f"{delimiter}{user_input}{delimiter}"}, | |
{'role': 'assistant', 'content': f"Relevant information:\n{qm_information}"} | |
] | |
final_response = get_completion_from_messages(all_messages + messages) | |
if debug:print("Step 4: Generated response to user question.") | |
all_messages = all_messages + messages[1:] | |
# Step 5: Put the answer through the Moderation API | |
response = get_moderation_from_input(final_response) | |
moderation_output = response["results"][0] | |
if moderation_output["flagged"]: | |
if debug: print("Step 5: Response flagged by Moderation API.") | |
return "Sorry, we cannot provide this information." | |
if debug: print("Step 5: Response passed moderation check.") | |
# Step 6: Ask the model if the response answers the initial user query well | |
user_message = f""" | |
Customer message: {delimiter}{user_input}{delimiter} | |
Agent response: {delimiter}{final_response}{delimiter} | |
Does the agent response sufficiently answer the customer question? \ | |
Respond with a Y or N character, with no punctuation: \ | |
Y - if the output sufficiently answers the question \ | |
AND the response correctly uses the relevant information provided to the agent \ | |
N - otherwise \ | |
Output a single letter only. | |
""" | |
messages = [ | |
{'role': 'system', 'content': system_message}, | |
{'role': 'user', 'content': user_message} | |
] | |
evaluation_response = get_completion_from_messages(messages) | |
if debug: print("Step 6: Model evaluated the response.") | |
# Step 7: If yes, use this answer; if not, say that you will connect the user to a human | |
if "Y" in evaluation_response: # Using "in" instead of "==" to be safer for model output variation (e.g., "Y." or "Yes") | |
if debug: print("Step 7: Model approved the response.") | |
return final_response, all_messages | |
else: | |
if debug: print("Step 7: Model disapproved the response.") | |
neg_str = "I'm unable to provide the information you're looking for. Please try asking again." | |
return neg_str, all_messages | |
chat_box = pn.widgets.ChatBox(value=[ | |
{"You": ""}, | |
{"AI Assistant": "Greetings! Feel free to ask about the BSP summary document, such as pages, quality markers, AI models, and NLP topics."} | |
]) | |
context = [{"role": "system", "content": "You are Service Assistant"}] | |
# Function that collects user and assistant messages over time | |
def collect_messages(event) -> None: | |
global context | |
user_input = event.new[-1] | |
user_message = user_input.get("You") | |
if user_message is None or user_message == "": | |
return | |
response, context = process_user_message(user_message, context, False) | |
context.append({"role": "assistant", "content": f"{response}"}) | |
chat_box.append({"AI Assistant": response}) | |
# Chat with the chatbot! | |
chat_box.param.watch(collect_messages, 'value') | |
dashboard = pn.Column(pn.pane.Markdown("# BSP Summary Document AI Assistant"), chat_box) | |
dashboard.servable() | |