Upload 49 files
Browse files- check_proxy.py +6 -2
- config.py +16 -7
- crazy_functions/批量总结PDF文档.py +99 -0
- functional.py +19 -15
- functional_crazy.py +30 -9
- main.py +35 -20
- predict.py +1 -1
- theme.py +2 -2
- toolbox.py +3 -3
check_proxy.py
CHANGED
@@ -6,8 +6,11 @@ def check_proxy(proxies):
|
|
6 |
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
7 |
data = response.json()
|
8 |
print(f'查询代理的地理位置,返回的结果是{data}')
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
11 |
print(result)
|
12 |
return result
|
13 |
except:
|
@@ -17,6 +20,7 @@ def check_proxy(proxies):
|
|
17 |
|
18 |
|
19 |
if __name__ == '__main__':
|
|
|
20 |
try: from config_private import proxies # 放自己的秘密如API和代理网址 os.path.exists('config_private.py')
|
21 |
except: from config import proxies
|
22 |
check_proxy(proxies)
|
|
|
6 |
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
7 |
data = response.json()
|
8 |
print(f'查询代理的地理位置,返回的结果是{data}')
|
9 |
+
if 'country_name' in data:
|
10 |
+
country = data['country_name']
|
11 |
+
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
12 |
+
elif 'error' in data:
|
13 |
+
result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
|
14 |
print(result)
|
15 |
return result
|
16 |
except:
|
|
|
20 |
|
21 |
|
22 |
if __name__ == '__main__':
|
23 |
+
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
24 |
try: from config_private import proxies # 放自己的秘密如API和代理网址 os.path.exists('config_private.py')
|
25 |
except: from config import proxies
|
26 |
check_proxy(proxies)
|
config.py
CHANGED
@@ -1,11 +1,19 @@
|
|
1 |
import os
|
2 |
|
3 |
-
API_KEY =
|
|
|
4 |
API_URL = "https://api.openai.com/v1/chat/completions"
|
5 |
|
6 |
# 改为True应用代理
|
7 |
USE_PROXY = False
|
8 |
if USE_PROXY:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
10 |
proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", }
|
11 |
print('网络代理状态:运行。')
|
@@ -14,7 +22,7 @@ else:
|
|
14 |
print('网络代理状态:未配置。无代理状态下很可能无法访问。')
|
15 |
|
16 |
# 发送请求到OpenAI后,等待多久判定为超时
|
17 |
-
TIMEOUT_SECONDS =
|
18 |
|
19 |
# 网页的端口, -1代表随机端口
|
20 |
WEB_PORT = -1
|
@@ -25,12 +33,13 @@ MAX_RETRY = 2
|
|
25 |
# 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
|
26 |
LLM_MODEL = "gpt-3.5-turbo"
|
27 |
|
28 |
-
# 检查一下是不是忘了改config
|
29 |
-
if len(API_KEY) != 51: # 正确的密钥是51位
|
30 |
-
assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
|
31 |
-
|
32 |
# 设置并行使用的线程数
|
33 |
CONCURRENT_COUNT = 100
|
34 |
|
35 |
# 设置用户名和密码
|
36 |
-
AUTHENTICATION = [] # [("username", "password"), ("username2", "password2"), ...]
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
|
3 |
+
# API_KEY = "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" 此key无效
|
4 |
+
API_KEY = os.environ.get('API_KEY')
|
5 |
API_URL = "https://api.openai.com/v1/chat/completions"
|
6 |
|
7 |
# 改为True应用代理
|
8 |
USE_PROXY = False
|
9 |
if USE_PROXY:
|
10 |
+
|
11 |
+
# 填写格式是 [协议]:// [地址] :[端口] ,
|
12 |
+
# 例如 "socks5h://localhost:11284"
|
13 |
+
# [协议] 常见协议无非socks5h/http,例如 v2*** 和 s** 的默认本地协议是socks5h,cl**h 的默认本地协议是http
|
14 |
+
# [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
15 |
+
# [端口] 在代理软件的设置里,不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
16 |
+
|
17 |
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
18 |
proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", }
|
19 |
print('网络代理状态:运行。')
|
|
|
22 |
print('网络代理状态:未配置。无代理状态下很可能无法访问。')
|
23 |
|
24 |
# 发送请求到OpenAI后,等待多久判定为超时
|
25 |
+
TIMEOUT_SECONDS = 25
|
26 |
|
27 |
# 网页的端口, -1代表随机端口
|
28 |
WEB_PORT = -1
|
|
|
33 |
# 选择的OpenAI模型是(gpt4现在只对申请成功的人开放)
|
34 |
LLM_MODEL = "gpt-3.5-turbo"
|
35 |
|
|
|
|
|
|
|
|
|
36 |
# 设置并行使用的线程数
|
37 |
CONCURRENT_COUNT = 100
|
38 |
|
39 |
# 设置用户名和密码
|
40 |
+
AUTHENTICATION = [] # [("username", "password"), ("username2", "password2"), ...]
|
41 |
+
|
42 |
+
# 检查一下是不是忘了改config
|
43 |
+
if len(API_KEY) != 51:
|
44 |
+
assert False, "正确的API_KEY密钥是51位,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
45 |
+
"(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)"
|
crazy_functions/批量总结PDF文档.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from predict import predict_no_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
3 |
+
fast_debug = False
|
4 |
+
|
5 |
+
|
6 |
+
def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
7 |
+
import time, glob, os, fitz
|
8 |
+
print('begin analysis on:', file_manifest)
|
9 |
+
for index, fp in enumerate(file_manifest):
|
10 |
+
with fitz.open(fp) as doc:
|
11 |
+
file_content = ""
|
12 |
+
for page in doc:
|
13 |
+
file_content += page.get_text()
|
14 |
+
print(file_content)
|
15 |
+
|
16 |
+
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
17 |
+
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
18 |
+
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
19 |
+
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
20 |
+
print('[1] yield chatbot, history')
|
21 |
+
yield chatbot, history, '正常'
|
22 |
+
|
23 |
+
if not fast_debug:
|
24 |
+
msg = '正常'
|
25 |
+
# ** gpt request **
|
26 |
+
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
27 |
+
|
28 |
+
print('[2] end gpt req')
|
29 |
+
chatbot[-1] = (i_say_show_user, gpt_say)
|
30 |
+
history.append(i_say_show_user); history.append(gpt_say)
|
31 |
+
print('[3] yield chatbot, history')
|
32 |
+
yield chatbot, history, msg
|
33 |
+
print('[4] next')
|
34 |
+
if not fast_debug: time.sleep(2)
|
35 |
+
|
36 |
+
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
37 |
+
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
38 |
+
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
39 |
+
yield chatbot, history, '正常'
|
40 |
+
|
41 |
+
if not fast_debug:
|
42 |
+
msg = '正常'
|
43 |
+
# ** gpt request **
|
44 |
+
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
45 |
+
|
46 |
+
chatbot[-1] = (i_say, gpt_say)
|
47 |
+
history.append(i_say); history.append(gpt_say)
|
48 |
+
yield chatbot, history, msg
|
49 |
+
res = write_results_to_file(history)
|
50 |
+
chatbot.append(("完成了吗?", res))
|
51 |
+
yield chatbot, history, msg
|
52 |
+
|
53 |
+
|
54 |
+
@CatchException
|
55 |
+
def 批量总结PDF文档(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
56 |
+
import glob, os
|
57 |
+
|
58 |
+
# 基本信息:功能、贡献者
|
59 |
+
chatbot.append([
|
60 |
+
"函数插件功能?",
|
61 |
+
"批量总结PDF文档。函数插件贡献者: ValeriaWong"])
|
62 |
+
yield chatbot, history, '正常'
|
63 |
+
|
64 |
+
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
65 |
+
try:
|
66 |
+
import fitz
|
67 |
+
except:
|
68 |
+
report_execption(chatbot, history,
|
69 |
+
a = f"解析项目: {txt}",
|
70 |
+
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
71 |
+
yield chatbot, history, '正常'
|
72 |
+
return
|
73 |
+
|
74 |
+
# 清空历史,以免输入溢出
|
75 |
+
history = []
|
76 |
+
|
77 |
+
# 检测输入参数,如没有给定输入参数,直接退出
|
78 |
+
if os.path.exists(txt):
|
79 |
+
project_folder = txt
|
80 |
+
else:
|
81 |
+
if txt == "": txt = '空空如也的输入栏'
|
82 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
83 |
+
yield chatbot, history, '正常'
|
84 |
+
return
|
85 |
+
|
86 |
+
# 搜索需要处理的文件清单
|
87 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
88 |
+
# [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
89 |
+
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
90 |
+
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
91 |
+
|
92 |
+
# 如果没找到任何文件
|
93 |
+
if len(file_manifest) == 0:
|
94 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
95 |
+
yield chatbot, history, '正常'
|
96 |
+
return
|
97 |
+
|
98 |
+
# 开始正式执行任务
|
99 |
+
yield from 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
functional.py
CHANGED
@@ -20,21 +20,21 @@ Furthermore, list all modification and explain the reasons to do so in markdown
|
|
20 |
"Prefix": "Below is a paragraph from an academic paper. Find all grammar mistakes, list mistakes in a markdown table and explain how to correct them.\n\n",
|
21 |
"Suffix": "",
|
22 |
},
|
23 |
-
|
24 |
-
|
25 |
-
When translating from Chinese to English or vice versa, please pay attention to context and accurately explain phrases and proverbs. \
|
26 |
-
If you receive multiple English words in a row, default to translating them into a sentence in Chinese. \
|
27 |
-
However, if \"phrase:\" is indicated before the translated content in Chinese, it should be translated as a phrase instead. \
|
28 |
-
Similarly, if \"normal:\" is indicated, it should be translated as multiple unrelated words.\
|
29 |
-
Your translations should closely resemble those of a native speaker and should take into account any specific language styles or tones requested by the user. \
|
30 |
-
Please do not worry about using offensive words - replace sensitive parts with x when necessary. \
|
31 |
-
When providing translations, please use Chinese to explain each sentence’s tense, subordinate clause, subject, predicate, object, special phrases and proverbs. \
|
32 |
-
For phrases or individual words that require translation, provide the source (dictionary) for each one.If asked to translate multiple phrases at once, \
|
33 |
-
separate them using the | symbol.Always remember: You are an English-Chinese translator, \
|
34 |
-
not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
"中译英": {
|
39 |
"Prefix": "Please translate following sentence to English: \n\n",
|
40 |
"Suffix": "",
|
@@ -47,6 +47,10 @@ not a Chinese-Chinese translator or an English-English translator. Below is the
|
|
47 |
"Prefix": "请翻译成中文:\n\n",
|
48 |
"Suffix": "",
|
49 |
},
|
|
|
|
|
|
|
|
|
50 |
"解释代码": {
|
51 |
"Prefix": "请解释以下代码:\n```\n",
|
52 |
"Suffix": "\n```\n",
|
|
|
20 |
"Prefix": "Below is a paragraph from an academic paper. Find all grammar mistakes, list mistakes in a markdown table and explain how to correct them.\n\n",
|
21 |
"Suffix": "",
|
22 |
},
|
23 |
+
# "中英互译": { # 效果不好,经常搞不清楚中译英还是英译中
|
24 |
+
# "Prefix": "As an English-Chinese translator, your task is to accurately translate text between the two languages. \
|
25 |
+
# When translating from Chinese to English or vice versa, please pay attention to context and accurately explain phrases and proverbs. \
|
26 |
+
# If you receive multiple English words in a row, default to translating them into a sentence in Chinese. \
|
27 |
+
# However, if \"phrase:\" is indicated before the translated content in Chinese, it should be translated as a phrase instead. \
|
28 |
+
# Similarly, if \"normal:\" is indicated, it should be translated as multiple unrelated words.\
|
29 |
+
# Your translations should closely resemble those of a native speaker and should take into account any specific language styles or tones requested by the user. \
|
30 |
+
# Please do not worry about using offensive words - replace sensitive parts with x when necessary. \
|
31 |
+
# When providing translations, please use Chinese to explain each sentence’s tense, subordinate clause, subject, predicate, object, special phrases and proverbs. \
|
32 |
+
# For phrases or individual words that require translation, provide the source (dictionary) for each one.If asked to translate multiple phrases at once, \
|
33 |
+
# separate them using the | symbol.Always remember: You are an English-Chinese translator, \
|
34 |
+
# not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
|
35 |
+
# "Suffix": "",
|
36 |
+
# "Color": "secondary",
|
37 |
+
# },
|
38 |
"中译英": {
|
39 |
"Prefix": "Please translate following sentence to English: \n\n",
|
40 |
"Suffix": "",
|
|
|
47 |
"Prefix": "请翻译成中文:\n\n",
|
48 |
"Suffix": "",
|
49 |
},
|
50 |
+
"找图片": {
|
51 |
+
"Prefix": "我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL,然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:\n\n",
|
52 |
+
"Suffix": "",
|
53 |
+
},
|
54 |
"解释代码": {
|
55 |
"Prefix": "请解释以下代码:\n```\n",
|
56 |
"Suffix": "\n```\n",
|
functional_crazy.py
CHANGED
@@ -1,3 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
def get_crazy_functionals():
|
3 |
from crazy_functions.读文章写摘要 import 读文章写摘要
|
@@ -9,37 +14,53 @@ def get_crazy_functionals():
|
|
9 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
10 |
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
11 |
|
12 |
-
|
13 |
-
"
|
14 |
"Function": 解析项目本身
|
15 |
},
|
16 |
-
"
|
17 |
"Color": "stop", # 按钮颜色
|
18 |
"Function": 解析一个Python项目
|
19 |
},
|
20 |
-
"
|
21 |
"Color": "stop", # 按钮颜色
|
22 |
"Function": 解析一个C项目的头文件
|
23 |
},
|
24 |
-
"
|
25 |
"Color": "stop", # 按钮颜色
|
26 |
"Function": 解析一个C项目
|
27 |
},
|
28 |
-
"
|
29 |
"Color": "stop", # 按钮颜色
|
30 |
"Function": 读文章写摘要
|
31 |
},
|
32 |
-
"
|
33 |
"Color": "stop", # 按钮颜色
|
34 |
"Function": 批量生成函数注释
|
35 |
},
|
36 |
-
"[
|
37 |
"Function": 全项目切换英文
|
38 |
},
|
39 |
-
"[
|
40 |
"Function": 高阶功能模板函数
|
41 |
},
|
42 |
}
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
|
|
|
1 |
+
# UserVisibleLevel是过滤器参数。
|
2 |
+
# 由于UI界面空间有限,所以通过这种方式决定UI界面中显示哪些插件
|
3 |
+
# 默认函数插件 VisibleLevel 是 0
|
4 |
+
# 当 UserVisibleLevel >= 函数插件的 VisibleLevel 时,该函数插件才会被显示出来
|
5 |
+
UserVisibleLevel = 1
|
6 |
|
7 |
def get_crazy_functionals():
|
8 |
from crazy_functions.读文章写摘要 import 读文章写摘要
|
|
|
14 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
15 |
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
16 |
|
17 |
+
function_plugins = {
|
18 |
+
"请解析并解构此项目本身": {
|
19 |
"Function": 解析项目本身
|
20 |
},
|
21 |
+
"解析整个py项目": {
|
22 |
"Color": "stop", # 按钮颜色
|
23 |
"Function": 解析一个Python项目
|
24 |
},
|
25 |
+
"解析整个C++项目头文件": {
|
26 |
"Color": "stop", # 按钮颜色
|
27 |
"Function": 解析一个C项目的头文件
|
28 |
},
|
29 |
+
"解析整个C++项目": {
|
30 |
"Color": "stop", # 按钮颜色
|
31 |
"Function": 解析一个C项目
|
32 |
},
|
33 |
+
"读tex论文写摘要": {
|
34 |
"Color": "stop", # 按钮颜色
|
35 |
"Function": 读文章写摘要
|
36 |
},
|
37 |
+
"批量生成函数注释": {
|
38 |
"Color": "stop", # 按钮颜色
|
39 |
"Function": 批量生成函数注释
|
40 |
},
|
41 |
+
"[多线程demo] 把本项目源代码切换成全英文": {
|
42 |
"Function": 全项目切换英文
|
43 |
},
|
44 |
+
"[函数插件模板demo] 历史上的今天": {
|
45 |
"Function": 高阶功能模板函数
|
46 |
},
|
47 |
}
|
48 |
|
49 |
+
# VisibleLevel=1 经过测试,但功能未达到理想状态
|
50 |
+
if UserVisibleLevel >= 1:
|
51 |
+
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
52 |
+
function_plugins.update({
|
53 |
+
"[仅供开发调试] 批量总结PDF文档": {
|
54 |
+
"Color": "stop",
|
55 |
+
"Function": 批量总结PDF文档
|
56 |
+
},
|
57 |
+
})
|
58 |
+
|
59 |
+
# VisibleLevel=2 尚未充分测试的函数插件,放在这里
|
60 |
+
if UserVisibleLevel >= 2:
|
61 |
+
function_plugins.update({
|
62 |
+
})
|
63 |
+
|
64 |
+
return function_plugins
|
65 |
|
66 |
|
main.py
CHANGED
@@ -12,7 +12,7 @@ PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
|
12 |
AUTHENTICATION = None if AUTHENTICATION == [] else AUTHENTICATION
|
13 |
|
14 |
initial_prompt = "Serve me as a writing and programming assistant."
|
15 |
-
title_html = """<h1 align="left">ChatGPT
|
16 |
|
17 |
# 问询记录, python 版本建议3.9+(越新越好)
|
18 |
import logging
|
@@ -36,55 +36,57 @@ gr.Chatbot.postprocess = format_io
|
|
36 |
from theme import adjust_theme
|
37 |
set_theme = adjust_theme()
|
38 |
|
|
|
39 |
with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
|
40 |
gr.HTML(title_html)
|
41 |
with gr.Row():
|
42 |
with gr.Column(scale=2):
|
43 |
chatbot = gr.Chatbot()
|
44 |
-
chatbot.style(height=
|
45 |
chatbot.style()
|
46 |
history = gr.State([])
|
47 |
with gr.Column(scale=1):
|
48 |
with gr.Row():
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
with gr.Row():
|
56 |
from check_proxy import check_proxy
|
57 |
-
statusDisplay = gr.Markdown(f"
|
58 |
with gr.Row():
|
59 |
for k in functional:
|
60 |
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
61 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
62 |
with gr.Row():
|
63 |
-
gr.Markdown("
|
64 |
with gr.Row():
|
65 |
for k in crazy_functional:
|
66 |
variant = crazy_functional[k]["Color"] if "Color" in crazy_functional[k] else "secondary"
|
67 |
crazy_functional[k]["Button"] = gr.Button(k, variant=variant)
|
68 |
with gr.Row():
|
69 |
-
gr.Markdown("
|
70 |
with gr.Row():
|
71 |
-
file_upload = gr.Files(label='
|
72 |
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
|
73 |
with gr.Accordion("arguments", open=False):
|
74 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
75 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
76 |
|
77 |
predict_args = dict(fn=predict, inputs=[txt, top_p, temperature, chatbot, history, system_prompt], outputs=[chatbot, history, statusDisplay], show_progress=True)
|
78 |
-
empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt])
|
79 |
|
80 |
-
txt.submit(**predict_args)
|
81 |
-
txt.submit(**empty_txt_args)
|
82 |
-
submitBtn.click(**predict_args)
|
83 |
-
submitBtn.click(**empty_txt_args)
|
84 |
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, statusDisplay])
|
85 |
for k in functional:
|
86 |
-
functional[k]["Button"].click(predict,
|
87 |
[txt, top_p, temperature, chatbot, history, system_prompt, gr.State(True), gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
|
|
|
88 |
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
89 |
for k in crazy_functional:
|
90 |
click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
|
@@ -92,7 +94,20 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
|
|
92 |
)
|
93 |
try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
94 |
except: pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
|
|
96 |
demo.launch()
|
97 |
-
#
|
98 |
-
#
|
|
|
12 |
AUTHENTICATION = None if AUTHENTICATION == [] else AUTHENTICATION
|
13 |
|
14 |
initial_prompt = "Serve me as a writing and programming assistant."
|
15 |
+
title_html = """<h1 align="left">ChatGPT 学术优化</h1>"""
|
16 |
|
17 |
# 问询记录, python 版本建议3.9+(越新越好)
|
18 |
import logging
|
|
|
36 |
from theme import adjust_theme
|
37 |
set_theme = adjust_theme()
|
38 |
|
39 |
+
cancel_handles = []
|
40 |
with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
|
41 |
gr.HTML(title_html)
|
42 |
with gr.Row():
|
43 |
with gr.Column(scale=2):
|
44 |
chatbot = gr.Chatbot()
|
45 |
+
chatbot.style(height=600)
|
46 |
chatbot.style()
|
47 |
history = gr.State([])
|
48 |
with gr.Column(scale=1):
|
49 |
with gr.Row():
|
50 |
+
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
51 |
+
with gr.Row():
|
52 |
+
submitBtn = gr.Button("提交", variant="primary")
|
53 |
+
with gr.Row():
|
54 |
+
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
55 |
+
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
56 |
with gr.Row():
|
57 |
from check_proxy import check_proxy
|
58 |
+
statusDisplay = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {check_proxy(proxies)}")
|
59 |
with gr.Row():
|
60 |
for k in functional:
|
61 |
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
62 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
63 |
with gr.Row():
|
64 |
+
gr.Markdown("注意:以下“红颜色”标识的函数插件需从input区读取路径作为参数.")
|
65 |
with gr.Row():
|
66 |
for k in crazy_functional:
|
67 |
variant = crazy_functional[k]["Color"] if "Color" in crazy_functional[k] else "secondary"
|
68 |
crazy_functional[k]["Button"] = gr.Button(k, variant=variant)
|
69 |
with gr.Row():
|
70 |
+
gr.Markdown("上传本地文件,供上面的函数插件调用.")
|
71 |
with gr.Row():
|
72 |
+
file_upload = gr.Files(label='任何文件, 但推荐上传压缩文件(zip, tar)', file_count="multiple")
|
73 |
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
|
74 |
with gr.Accordion("arguments", open=False):
|
75 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
76 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
77 |
|
78 |
predict_args = dict(fn=predict, inputs=[txt, top_p, temperature, chatbot, history, system_prompt], outputs=[chatbot, history, statusDisplay], show_progress=True)
|
79 |
+
empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt]) # 用于在提交后清空输入栏
|
80 |
|
81 |
+
cancel_handles.append(txt.submit(**predict_args))
|
82 |
+
# txt.submit(**empty_txt_args) 在提交后清空输入栏
|
83 |
+
cancel_handles.append(submitBtn.click(**predict_args))
|
84 |
+
# submitBtn.click(**empty_txt_args) 在提交后清空输入栏
|
85 |
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, statusDisplay])
|
86 |
for k in functional:
|
87 |
+
click_handle = functional[k]["Button"].click(predict,
|
88 |
[txt, top_p, temperature, chatbot, history, system_prompt, gr.State(True), gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
|
89 |
+
cancel_handles.append(click_handle)
|
90 |
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
|
91 |
for k in crazy_functional:
|
92 |
click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
|
|
|
94 |
)
|
95 |
try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
96 |
except: pass
|
97 |
+
cancel_handles.append(click_handle)
|
98 |
+
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
99 |
+
|
100 |
+
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
101 |
+
def auto_opentab_delay():
|
102 |
+
import threading, webbrowser, time
|
103 |
+
print(f"URL http://localhost:{PORT}")
|
104 |
+
def open():
|
105 |
+
time.sleep(2)
|
106 |
+
webbrowser.open_new_tab(f'http://localhost:{PORT}')
|
107 |
+
t = threading.Thread(target=open)
|
108 |
+
t.daemon = True; t.start()
|
109 |
|
110 |
+
auto_opentab_delay()
|
111 |
demo.launch()
|
112 |
+
#demo.title = "ChatGPT 学术优化"
|
113 |
+
#demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
|
predict.py
CHANGED
@@ -176,7 +176,7 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
176 |
chunk = get_full_error(chunk, stream_response)
|
177 |
error_msg = chunk.decode()
|
178 |
if "reduce the length" in error_msg:
|
179 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by
|
180 |
history = []
|
181 |
elif "Incorrect API key" in error_msg:
|
182 |
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
|
|
|
176 |
chunk = get_full_error(chunk, stream_response)
|
177 |
error_msg = chunk.decode()
|
178 |
if "reduce the length" in error_msg:
|
179 |
+
chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by refreshing this page.")
|
180 |
history = []
|
181 |
elif "Incorrect API key" in error_msg:
|
182 |
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
|
theme.py
CHANGED
@@ -28,8 +28,8 @@ def adjust_theme():
|
|
28 |
try:
|
29 |
color_er = gr.themes.utils.colors.pink
|
30 |
set_theme = gr.themes.Default(
|
31 |
-
primary_hue=gr.themes.utils.colors.
|
32 |
-
neutral_hue=gr.themes.utils.colors.
|
33 |
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
34 |
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
35 |
set_theme.set(
|
|
|
28 |
try:
|
29 |
color_er = gr.themes.utils.colors.pink
|
30 |
set_theme = gr.themes.Default(
|
31 |
+
primary_hue=gr.themes.utils.colors.sky,
|
32 |
+
neutral_hue=gr.themes.utils.colors.fuchsia,
|
33 |
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui", "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
34 |
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
35 |
set_theme.set(
|
toolbox.py
CHANGED
@@ -107,8 +107,8 @@ def text_divide_paragraph(text):
|
|
107 |
# wtf input
|
108 |
lines = text.split("\n")
|
109 |
for i, line in enumerate(lines):
|
110 |
-
|
111 |
-
text = "".join(lines)
|
112 |
return text
|
113 |
|
114 |
def markdown_convertion(txt):
|
@@ -207,7 +207,7 @@ def on_file_uploaded(files, chatbot, txt):
|
|
207 |
txt = f'private_upload/{time_tag}'
|
208 |
moved_files_str = '\t\n\n'.join(moved_files)
|
209 |
chatbot.append(['我上传了文件,请查收',
|
210 |
-
f'[Local Message] 收到以下文件: \n\n{moved_files_str}\n\n调用路径参数已自动修正到: \n\n{txt}\n\n
|
211 |
return chatbot, txt
|
212 |
|
213 |
|
|
|
107 |
# wtf input
|
108 |
lines = text.split("\n")
|
109 |
for i, line in enumerate(lines):
|
110 |
+
lines[i] = "<p>"+lines[i].replace(" ", " ")+"</p>"
|
111 |
+
text = "\n".join(lines)
|
112 |
return text
|
113 |
|
114 |
def markdown_convertion(txt):
|
|
|
207 |
txt = f'private_upload/{time_tag}'
|
208 |
moved_files_str = '\t\n\n'.join(moved_files)
|
209 |
chatbot.append(['我上传了文件,请查收',
|
210 |
+
f'[Local Message] 收到以下文件: \n\n{moved_files_str}\n\n调用路径参数已自动修正到: \n\n{txt}\n\n现在您点击任意实验功能时,以上文件将被作为输入参数'])
|
211 |
return chatbot, txt
|
212 |
|
213 |
|