Spaces:
Sleeping
Sleeping
File size: 4,759 Bytes
c515693 5b344e4 c515693 2805912 4b71923 c515693 4b71923 7014951 ca1bf30 298f638 ca1bf30 298f638 c515693 7014951 c515693 7014951 c515693 7014951 5b344e4 7014951 c515693 7014951 c515693 7014951 5b344e4 7014951 c515693 7014951 c515693 7014951 2805912 7014951 298f638 7014951 298f638 c515693 7014951 c515693 7014951 c515693 5b344e4 7014951 c515693 ca1bf30 7014951 ca1bf30 7014951 c515693 ca1bf30 7014951 ca1bf30 c515693 7014951 c515693 7014951 c515693 7014951 ca1bf30 7014951 c515693 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import gradio as gr
import openai
import markdown
我的API密钥 = "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" # 在这里输入你的 API 密钥
initial_prompt = "You are a helpful assistant."
openai.api_key = 我的API密钥
def get_response(system, context, raw = False):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[system, *context],
)
if raw:
return response
else:
statistics = f'本次对话Tokens用量【{response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {response["usage"]["prompt_tokens"]},回答 {response["usage"]["completion_tokens"]} )'
message = response["choices"][0]["message"]["content"]
message_with_stats = f'{message}\n\n================\n\n{statistics}'
message_with_stats = markdown.markdown(message_with_stats)
return message, message_with_stats
def predict(chatbot, input_sentence, system, context):
if len(input_sentence) == 0:
return []
context.append({"role": "user", "content": f"{input_sentence}"})
message, message_with_stats = get_response(system, context)
context.append({"role": "assistant", "content": message})
chatbot.append((input_sentence, message_with_stats))
return chatbot, context
def retry(chatbot, system, context):
if len(context) == 0:
return [], []
message, message_with_stats = get_response(system, context[:-1])
context[-1] = {"role": "assistant", "content": message}
chatbot[-1] = (context[-2]["content"], message_with_stats)
return chatbot, context
def delete_last_conversation(chatbot, context):
if len(context) == 0:
return [], []
chatbot = chatbot[:-1]
context = context[:-2]
return chatbot, context
def reduce_token(chatbot, system, context):
context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
response = get_response(system, context, raw=True)
statistics = f'本次对话Tokens用量【{response["usage"]["completion_tokens"]+12+12+8} / 4096】'
optmz_str = markdown.markdown( f'好的,我们之前聊了:{response["choices"][0]["message"]["content"]}\n\n================\n\n{statistics}' )
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
context = []
context.append({"role": "user", "content": "我们之前聊了什么?"})
context.append({"role": "assistant", "content": f'我们之前聊了:{response["choices"][0]["message"]["content"]}'})
return chatbot, context
def reset_state():
return [], []
def update_system(new_system_prompt):
return {"role": "system", "content": new_system_prompt}
with gr.Blocks() as demo:
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
context = gr.State([])
systemPrompt = gr.State(update_system(initial_prompt))
with gr.Row():
with gr.Column(scale=12):
txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(container=False)
with gr.Column(min_width=50, scale=1):
submitBtn = gr.Button("🚀", variant="primary")
with gr.Row():
emptyBtn = gr.Button("🧹 新的对话")
retryBtn = gr.Button("🔄 重新生成")
delLastBtn = gr.Button("🗑️ 删除上条对话")
reduceTokenBtn = gr.Button("♻️ 优化Tokens")
newSystemPrompt = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
systemPromptDisplay = gr.Textbox(show_label=True, value=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
txt.submit(predict, [chatbot, txt, systemPrompt, context], [chatbot, context], show_progress=True)
txt.submit(lambda :"", None, txt)
submitBtn.click(predict, [chatbot, txt, systemPrompt, context], [chatbot, context], show_progress=True)
submitBtn.click(lambda :"", None, txt)
emptyBtn.click(reset_state, outputs=[chatbot, context])
newSystemPrompt.submit(update_system, newSystemPrompt, systemPrompt)
newSystemPrompt.submit(lambda x: x, newSystemPrompt, systemPromptDisplay)
newSystemPrompt.submit(lambda :"", None, newSystemPrompt)
retryBtn.click(retry, [chatbot, systemPrompt, context], [chatbot, context], show_progress=True)
delLastBtn.click(delete_last_conversation, [chatbot, context], [chatbot, context], show_progress=True)
reduceTokenBtn.click(reduce_token, [chatbot, systemPrompt, context], [chatbot, context], show_progress=True)
demo.launch()
|