Spaces:
Runtime error
Runtime error
File size: 6,843 Bytes
77b8357 6b31d07 affac96 a1e8d8f bae5b02 a1e8d8f 2da8950 6c42480 6b31d07 2da8950 6b31d07 11b16f9 6b31d07 11b16f9 90e81fe a2107b5 90e81fe d62edfd 6b31d07 2da8950 6c42480 6b31d07 6c42480 6b31d07 acae979 6b31d07 2da8950 6b31d07 8de6323 2da8950 8de6323 6b31d07 4459ac3 a2107b5 ddcc691 a2107b5 6b31d07 4459ac3 bae5b02 4459ac3 6b31d07 97bd1ab 6b31d07 a2107b5 6b31d07 8d4bf4d a7325ef 6b31d07 119f6c6 6c42480 6570355 6b31d07 d62edfd 119f6c6 6b31d07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
import openai
import requests
import csv
import os
import langchain
import chromadb
#import faiss
# Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
from chromadb.config import Settings
client = chromadb.Client(Settings(
chroma_db_impl="duckdb+parquet",
persist_directory="./embeddings" # Optional, defaults to .chromadb/ in the current directory
))
def get_empty_state():
return {"total_tokens": 0, "messages": []}
#Initial prompt template, others added below from TXT file
prompt_templates = {"All Needs Gurus": "I want you to act as a needs assessment expert."}
def download_prompt_templates():
url = "https://huggingface.co./spaces/ryanrwatkins/needs/raw/main/gurus.txt"
try:
response = requests.get(url)
reader = csv.reader(response.text.splitlines())
next(reader) # skip the header row
for row in reader:
if len(row) >= 2:
act = row[0].strip('"')
prompt = row[1].strip('"')
description = row[2].strip('"')
prompt_templates[act] = prompt
except requests.exceptions.RequestException as e:
print(f"An error occurred while downloading prompt templates: {e}")
return
choices = list(prompt_templates.keys())
choices = choices[:1] + sorted(choices[1:])
return gr.update(value=choices[0], choices=choices)
def on_prompt_template_change(prompt_template):
if not isinstance(prompt_template, str): return
return prompt_templates[prompt_template]
def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
openai.api_key = os.environ['openai_key']
history = state['messages']
if not prompt:
return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
prompt_template = prompt_templates[prompt_template]
system_prompt = []
if prompt_template:
system_prompt = [{ "role": "system", "content": prompt_template }]
prompt_msg = { "role": "user", "content": prompt }
try:
#completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
persist_directory = "./embeddings"
vectordb = Chroma.from_documents(romeoandjuliet_doc, embeddings, persist_directory=persist_directory)
completion = ChatVectorDBChain.from_llm(OpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
result = completion({"question": system_prompt + history[-context_length*2:] + [prompt_msg]})
# from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
history.append(prompt_msg)
history.append(completion.choices[0].message.to_dict())
state['total_tokens'] += completion['usage']['total_tokens']
except Exception as e:
history.append(prompt_msg)
history.append({
"role": "system",
"content": f"Error: {e}"
})
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
return '', chat_messages, total_tokens_used_msg, state
def clear_conversation():
return gr.update(value=None, visible=True), None, "", get_empty_state()
css = """
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
#chatbox {min-height: 400px;}
#header {text-align: center;}
#prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
#total_tokens_str {text-align: right; font-size: 0.8em; color: #666;}
#label {font-size: 0.8em; padding: 0.5em; margin: 0;}
.message { font-size: 1.2em; }
"""
with gr.Blocks(css=css) as demo:
state = gr.State(get_empty_state())
with gr.Column(elem_id="col-container"):
gr.Markdown("""# Chat with Needs Assessment Experts (Past and Present)
## Ask questions of experts on needs assessments, get responses from *needs assessment* version of ChatGPT.
Ask questions of all of them, or pick your expert.""",
elem_id="header")
with gr.Row():
with gr.Column():
chatbot = gr.Chatbot(elem_id="chatbox")
input_message = gr.Textbox(show_label=False, placeholder="Enter your needs assessment question and press enter", visible=True).style(container=False)
btn_submit = gr.Button("Submit")
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
btn_clear_conversation = gr.Button("π Start New Conversation")
with gr.Column():
prompt_template = gr.Dropdown(label="Choose a expert:", choices=list(prompt_templates.keys()))
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
with gr.Accordion("Advanced parameters", open=False):
temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Flexibility", info="Higher = more creative/chaotic, Lower = just the guru")
max_tokens = gr.Slider(minimum=100, maximum=400, value=400, step=1, label="Max tokens per response")
context_length = gr.Slider(minimum=1, maximum=5, value=2, step=1, label="Context length", info="Number of previous questions you have asked. Be careful with high values, it can blow up the token budget quickly.")
btn_submit.click(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
input_message.submit(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
prompt_template.change(on_prompt_template_change, inputs=[description], outputs=[prompt_template_preview])
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
demo.queue(concurrency_count=10)
demo.launch(height='800px')
|