Spaces:
Sleeping
Sleeping
File size: 3,394 Bytes
c515693 5b344e4 c515693 5b344e4 c515693 298f638 c515693 e7e3173 c515693 298f638 c515693 298f638 5b344e4 298f638 c515693 e7e3173 c515693 e7e3173 298f638 5b344e4 298f638 e7e3173 c515693 298f638 c515693 5b344e4 c515693 298f638 c515693 298f638 c515693 e7e3173 c515693 e7e3173 298f638 c515693 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import gradio as gr
import openai
import markdown
my_api_key = "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" # input your api_key
initial_prompt = "You are a helpful assistant."
class ChatGPT:
def __init__(self, apikey) -> None:
openai.api_key = apikey
self.system = {"role": "system", "content": initial_prompt}
def get_response(self, messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[self.system, *messages],
)
statistics = f'Token用量(最多4096):补全 {response["usage"]["completion_tokens"]}, 提问 {response["usage"]["prompt_tokens"]}, 总用量 {response["usage"]["total_tokens"]}'
message = response["choices"][0]["message"]["content"]
message_with_stats = f'{message}\n\n================\n\n{statistics}'
message_with_stats = markdown.markdown(message_with_stats)
return message, message_with_stats
def predict(self, chatbot, input_sentence, context):
if len(input_sentence) == 0:
return [], context
context.append({"role": "user", "content": f"{input_sentence}"})
message, message_with_stats = self.get_response(context)
context.append({"role": "assistant", "content": message})
chatbot.append((input_sentence, message_with_stats))
return chatbot, context
def retry(self, chatbot, context):
if len(context) == 0:
return [], []
message, message_with_stats = self.get_response(context[:-1])
context[-1] = {"role": "assistant", "content": message}
chatbot[-1] = (context[-2]["content"], message_with_stats)
return chatbot, context
def update_system(self, new_system_prompt):
self.system = {"role": "system", "content": new_system_prompt}
return new_system_prompt
def delete_last_conversation(self, chatbot, context):
if len(context) == 0:
return [], []
chatbot = chatbot[:-1]
context = context[:-2]
return chatbot, context[:-2]
def reset_state():
return [], []
mychatGPT = ChatGPT(my_api_key)
with gr.Blocks() as demo:
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
state = gr.State([])
with gr.Column():
txt = gr.Textbox(show_label=False, placeholder="💬 在这里输入").style(container=False)
with gr.Row():
emptyBth = gr.Button("重置")
retryBth = gr.Button("再试一次")
delLastBth = gr.Button("删除上一个问答")
system = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
syspromptTxt = gr.Textbox(show_label=True, placeholder=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
txt.submit(mychatGPT.predict, [chatbot, txt, state], [chatbot, state], show_progress=True)
txt.submit(lambda :"", None, txt)
emptyBth.click(reset_state, outputs=[chatbot, state])
system.submit(mychatGPT.update_system, system, syspromptTxt)
system.submit(lambda :"", None, system)
retryBth.click(mychatGPT.retry, [chatbot, state], [chatbot, state], show_progress=True)
delLastBth.click(mychatGPT.delete_last_conversation, [chatbot, state], [chatbot, state], show_progress=True)
demo.launch()
|