Spaces:
Sleeping
Sleeping
File size: 4,759 Bytes
c515693 5b344e4 c515693 ca1bf30 c515693 469d077 c515693 469d077 c515693 469d077 ca1bf30 298f638 ca1bf30 298f638 c515693 055d067 e7e3173 055d067 c515693 055d067 c515693 055d067 5b344e4 298f638 c515693 055d067 c515693 055d067 e7e3173 055d067 5b344e4 055d067 c515693 055d067 298f638 055d067 ca1bf30 055d067 ca1bf30 469d077 ca1bf30 055d067 298f638 c515693 ca1bf30 c515693 5b344e4 055d067 c515693 ca1bf30 c515693 ca1bf30 c515693 ca1bf30 c515693 298f638 c515693 055d067 c515693 ca1bf30 c515693 ca1bf30 c515693 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
import openai
import markdown
my_api_key = "" # input your api_key
initial_prompt = "You are a helpful assistant."
class ChatGPT:
def __init__(self, apikey) -> None:
openai.api_key = apikey
self.system = {"role": "system", "content": initial_prompt}
self.context = []
self.response = None
def get_response(self, messages):
self.response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[self.system, *messages],
)
statistics = f'本次对话Tokens用量【{self.response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {self.response["usage"]["prompt_tokens"]},回答 {self.response["usage"]["completion_tokens"]} )'
message = self.response["choices"][0]["message"]["content"]
message_with_stats = f'{message}\n\n================\n\n{statistics}'
message_with_stats = markdown.markdown(message_with_stats)
return message, message_with_stats
def predict(self, chatbot, input_sentence, ):
if len(input_sentence) == 0:
return []
self.context.append({"role": "user", "content": f"{input_sentence}"})
message, message_with_stats = self.get_response(self.context)
self.context.append({"role": "assistant", "content": message})
chatbot.append((input_sentence, message_with_stats))
return chatbot
def retry(self, chatbot):
if len(self.context) == 0:
return [], []
message, message_with_stats = self.get_response(self.context[:-1])
self.context[-1] = {"role": "assistant", "content": message}
chatbot[-1] = (self.context[-2]["content"], message_with_stats)
return chatbot
def update_system(self, new_system_prompt):
self.system = {"role": "system", "content": new_system_prompt}
return new_system_prompt
def delete_last_conversation(self, chatbot):
if len(self.context) == 0:
return [], []
chatbot = chatbot[:-1]
self.context = self.context[:-2]
return chatbot
def reduce_token(self, chatbot):
self.context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
message, message_with_stats = self.get_response(self.context)
self.system = {"role": "system", "content": f"You are a helpful assistant. The content that the Assistant and the User discussed in the previous self.context is: {message}."}
statistics = f'本次对话Tokens用量【{self.response["usage"]["completion_tokens"]+23} / 4096】'
optmz_str = markdown.markdown( f"System prompt已经更新, 请继续对话\n\n================\n\n{statistics}" )
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
self.context = []
return chatbot, self.system["content"]
def reset_state():
return []
mychatGPT = ChatGPT(my_api_key)
with gr.Blocks() as demo:
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
# state = gr.State([])
with gr.Row():
with gr.Column(scale=10):
txt = gr.Textbox(show_label=False, placeholder="💬 在这里输入").style(container=False)
with gr.Column(min_width=50, scale=1):
submitBtn = gr.Button("发送", variant="primary")
with gr.Row():
emptyBtn = gr.Button("🧹 新的对话")
retryBtn = gr.Button("🔁 重新生成")
delLastBtn = gr.Button("⬅️ 删除上条对话")
reduceTokenBtn = gr.Button("♻️ 优化Tokens")
system = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
syspromptTxt = gr.Textbox(show_label=True, placeholder=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
txt.submit(mychatGPT.predict, [chatbot, txt], [chatbot], show_progress=True)
txt.submit(lambda :"", None, txt)
submitBtn.click(mychatGPT.predict, [chatbot, txt], [chatbot], show_progress=True)
submitBtn.click(lambda :"", None, txt)
emptyBtn.click(reset_state, outputs=[chatbot])
system.submit(mychatGPT.update_system, system, syspromptTxt)
system.submit(lambda :"", None, system)
retryBtn.click(mychatGPT.retry, [chatbot], [chatbot], show_progress=True)
delLastBtn.click(mychatGPT.delete_last_conversation, [chatbot], [chatbot], show_progress=True)
reduceTokenBtn.click(mychatGPT.reduce_token, [chatbot], [chatbot, syspromptTxt], show_progress=True)
demo.launch()
|