3v324v23 commited on
Commit
3851f80
1 Parent(s): d9355b9

api error catch

Browse files
Files changed (5) hide show
  1. app.py +108 -7
  2. appx.py +7 -0
  3. config.py +3 -0
  4. main.py +0 -108
  5. predict.py +4 -2
app.py CHANGED
@@ -1,7 +1,108 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
2
+ import gradio as gr
3
+ from predict import predict
4
+ from toolbox import format_io, find_free_port
5
+
6
+ # 建议您复制一个config_private.py放自己的秘密,如API和代理网址,避免不小心传github被别人看到
7
+ try: from config_private import proxies, WEB_PORT
8
+ except: from config import proxies, WEB_PORT
9
+
10
+ # 如果WEB_PORT是-1,则随机选取WEB端口
11
+ PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
12
+
13
+ initial_prompt = "Serve me as a writing and programming assistant."
14
+ title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
15
+
16
+ # 问询记录,python 版本建议3.9+(越新越好)
17
+ import logging
18
+ os.makedirs('gpt_log', exist_ok=True)
19
+ try:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO, encoding='utf-8')
20
+ except:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO)
21
+ print('所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log,请注意自我隐私保护哦!')
22
+
23
+ # 一些普通功能模块
24
+ from functional import get_functionals
25
+ functional = get_functionals()
26
+
27
+ # 对一些丧心病狂的实验性功能模块进行测试
28
+ from functional_crazy import get_crazy_functionals, on_file_uploaded, on_report_generated
29
+ crazy_functional = get_crazy_functionals()
30
+
31
+ # 处理markdown文本格式的转变
32
+ gr.Chatbot.postprocess = format_io
33
+
34
+ # 做一些样式上的调整
35
+ try: set_theme = gr.themes.Default( primary_hue=gr.themes.utils.colors.orange,
36
+ font=["ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
37
+ font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
38
+ except:
39
+ set_theme = None; print('gradio版本较旧,不能自定义字体和颜色')
40
+
41
+ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
42
+ gr.HTML(title_html)
43
+ with gr.Row():
44
+ with gr.Column(scale=2):
45
+ chatbot = gr.Chatbot()
46
+ chatbot.style(height=1000)
47
+ chatbot.style()
48
+ history = gr.State([])
49
+ TRUE = gr.State(True)
50
+ FALSE = gr.State(False)
51
+ with gr.Column(scale=1):
52
+ with gr.Row():
53
+ with gr.Column(scale=12):
54
+ api = gr.Textbox(show_label=False, placeholder="Input OpenAI Key.").style(container=False)
55
+ with gr.Row():
56
+ with gr.Column(scale=12):
57
+ txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
58
+ with gr.Column(scale=1):
59
+ submitBtn = gr.Button("Ask", variant="primary")
60
+ with gr.Row():
61
+ for k in functional:
62
+ variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
63
+ functional[k]["Button"] = gr.Button(k, variant=variant)
64
+ with gr.Row():
65
+ gr.Markdown("以下部分实验性功能需从input框读取路径.")
66
+ with gr.Row():
67
+ for k in crazy_functional:
68
+ variant = crazy_functional[k]["Color"] if "Color" in crazy_functional[k] else "secondary"
69
+ crazy_functional[k]["Button"] = gr.Button(k, variant=variant)
70
+ with gr.Row():
71
+ gr.Markdown("上传本地文件供上面的实验性功能调用.")
72
+ with gr.Row():
73
+ file_upload = gr.Files(label='任何文件,但推荐上传压缩文件(zip, tar)', file_count="multiple")
74
+
75
+ from check_proxy import check_proxy
76
+ statusDisplay = gr.Markdown(f"{check_proxy(proxies)}")
77
+ systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
78
+ #inputs, top_p, temperature, top_k, repetition_penalty
79
+ with gr.Accordion("arguments", open=False):
80
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
81
+ temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
82
+
83
+ txt.submit(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
84
+ submitBtn.click(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
85
+ for k in functional:
86
+ functional[k]["Button"].click(predict,
87
+ [api, txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
88
+ file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
89
+ for k in crazy_functional:
90
+ click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
91
+ [api, txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay]
92
+ )
93
+ try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
94
+ except: pass
95
+
96
+
97
+ # 延迟函数,做一些准备工作,最后尝试打开浏览器
98
+ def auto_opentab_delay():
99
+ import threading, webbrowser, time
100
+ print(f"URL http://localhost:{PORT}")
101
+ def open(): time.sleep(2)
102
+ webbrowser.open_new_tab(f'http://localhost:{PORT}')
103
+ t = threading.Thread(target=open)
104
+ t.daemon = True; t.start()
105
+
106
+ auto_opentab_delay()
107
+ demo.title = "ChatGPT 学术优化"
108
+ demo.queue().launch(server_name="0.0.0.0", share=True, server_port=PORT)
appx.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return "Hello " + name + "!!"
5
+
6
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ iface.launch()
config.py CHANGED
@@ -24,3 +24,6 @@ MAX_RETRY = 2
24
  # 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
25
  LLM_MODEL = "gpt-3.5-turbo"
26
 
 
 
 
 
24
  # 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
25
  LLM_MODEL = "gpt-3.5-turbo"
26
 
27
+ # # 检查一下是不是忘了改config
28
+ # if API_KEY == "sk-此处填API秘钥":
29
+ # assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
main.py DELETED
@@ -1,108 +0,0 @@
1
- import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
2
- import gradio as gr
3
- from predict import predict
4
- from toolbox import format_io, find_free_port
5
-
6
- # 建议您复制一个config_private.py放自己的秘密,如API和代理网址,避免不小心传github被别人看到
7
- try: from config_private import proxies, WEB_PORT
8
- except: from config import proxies, WEB_PORT
9
-
10
- # 如果WEB_PORT是-1,则随机选取WEB端口
11
- PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
12
-
13
- initial_prompt = "Serve me as a writing and programming assistant."
14
- title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
15
-
16
- # 问询记录,python 版本建议3.9+(越新越好)
17
- import logging
18
- os.makedirs('gpt_log', exist_ok=True)
19
- try:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO, encoding='utf-8')
20
- except:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO)
21
- print('所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log,请注意自我隐私保护哦!')
22
-
23
- # 一些普通功能模块
24
- from functional import get_functionals
25
- functional = get_functionals()
26
-
27
- # 对一些丧心病狂的实验性功能模块进行测试
28
- from functional_crazy import get_crazy_functionals, on_file_uploaded, on_report_generated
29
- crazy_functional = get_crazy_functionals()
30
-
31
- # 处理markdown文本格式的转变
32
- gr.Chatbot.postprocess = format_io
33
-
34
- # 做一些样式上的调整
35
- try: set_theme = gr.themes.Default( primary_hue=gr.themes.utils.colors.orange,
36
- font=["ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
37
- font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
38
- except:
39
- set_theme = None; print('gradio版本较旧,不能自定义字体和颜色')
40
-
41
- with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
42
- gr.HTML(title_html)
43
- with gr.Row():
44
- with gr.Column(scale=2):
45
- chatbot = gr.Chatbot()
46
- chatbot.style(height=1000)
47
- chatbot.style()
48
- history = gr.State([])
49
- TRUE = gr.State(True)
50
- FALSE = gr.State(False)
51
- with gr.Column(scale=1):
52
- with gr.Row():
53
- with gr.Column(scale=12):
54
- api = gr.Textbox(show_label=False, placeholder="Input OpenAI Key.").style(container=False)
55
- with gr.Row():
56
- with gr.Column(scale=12):
57
- txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
58
- with gr.Column(scale=1):
59
- submitBtn = gr.Button("Ask", variant="primary")
60
- with gr.Row():
61
- for k in functional:
62
- variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
63
- functional[k]["Button"] = gr.Button(k, variant=variant)
64
- with gr.Row():
65
- gr.Markdown("以下部分实验性功能需从input框读取路径.")
66
- with gr.Row():
67
- for k in crazy_functional:
68
- variant = crazy_functional[k]["Color"] if "Color" in crazy_functional[k] else "secondary"
69
- crazy_functional[k]["Button"] = gr.Button(k, variant=variant)
70
- with gr.Row():
71
- gr.Markdown("上传本地文件供上面的实验性功能调用.")
72
- with gr.Row():
73
- file_upload = gr.Files(label='任何文件,但推荐上传压缩文件(zip, tar)', file_count="multiple")
74
-
75
- from check_proxy import check_proxy
76
- statusDisplay = gr.Markdown(f"{check_proxy(proxies)}")
77
- systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
78
- #inputs, top_p, temperature, top_k, repetition_penalty
79
- with gr.Accordion("arguments", open=False):
80
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
81
- temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
82
-
83
- txt.submit(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
84
- submitBtn.click(predict, [api, txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
85
- for k in functional:
86
- functional[k]["Button"].click(predict,
87
- [api, txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
88
- file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
89
- for k in crazy_functional:
90
- click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
91
- [api, txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay]
92
- )
93
- try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
94
- except: pass
95
-
96
-
97
- # 延迟函数,做一些准备工作,最后尝试打开浏览器
98
- def auto_opentab_delay():
99
- import threading, webbrowser, time
100
- print(f"URL http://localhost:{PORT}")
101
- def open(): time.sleep(2)
102
- webbrowser.open_new_tab(f'http://localhost:{PORT}')
103
- t = threading.Thread(target=open)
104
- t.daemon = True; t.start()
105
-
106
- auto_opentab_delay()
107
- demo.title = "ChatGPT 学术优化"
108
- demo.queue().launch(server_name="0.0.0.0", share=True, server_port=PORT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
predict.py CHANGED
@@ -130,8 +130,10 @@ def predict(api, inputs, top_p, temperature, chatbot=[], history=[], system_prom
130
  chunk = get_full_error(chunk, stream_response)
131
  error_msg = chunk.decode()
132
  if "reduce the length" in error_msg:
133
- chatbot[-1] = (history[-1], "[Local Message] Input (or history) is too long, please reduce input or clear history by refleshing this page.")
134
  history = []
 
 
135
  yield chatbot, history, "Json解析不合常规,很可能是文本过长" + error_msg
136
  return
137
 
@@ -141,7 +143,7 @@ def generate_payload(api, inputs, top_p, temperature, history, system_prompt, st
141
  """
142
  headers = {
143
  "Content-Type": "application/json",
144
- "Authorization": f"Bearer {API_KEY}"
145
  }
146
 
147
  conversation_cnt = len(history) // 2
 
130
  chunk = get_full_error(chunk, stream_response)
131
  error_msg = chunk.decode()
132
  if "reduce the length" in error_msg:
133
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by refleshing this page.")
134
  history = []
135
+ if "Incorrect API key" in error_msg:
136
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
137
  yield chatbot, history, "Json解析不合常规,很可能是文本过长" + error_msg
138
  return
139
 
 
143
  """
144
  headers = {
145
  "Content-Type": "application/json",
146
+ "Authorization": f"Bearer {api}"
147
  }
148
 
149
  conversation_cnt = len(history) // 2