Spaces:
Running
Running
follow master
Browse files- README.md +1 -1
- app.py +17 -8
- config.py +7 -3
- crazy_functional.py +4 -5
- crazy_functions/CodeInterpreter.py +5 -4
- crazy_functions/Langchain知识库.py +18 -19
- crazy_functions/Latex全文润色.py +7 -5
- crazy_functions/Latex全文翻译.py +4 -3
- crazy_functions/Latex输出PDF结果.py +2 -2
- crazy_functions/crazy_utils.py +77 -26
- crazy_functions/latex_fns/latex_actions.py +5 -5
- crazy_functions/latex_fns/latex_toolbox.py +10 -5
- crazy_functions/下载arxiv论文翻译摘要.py +9 -12
- crazy_functions/图片生成.py +2 -2
- crazy_functions/对话历史存档.py +8 -8
- crazy_functions/总结word文档.py +6 -3
- crazy_functions/总结音视频.py +13 -11
- crazy_functions/批量Markdown翻译.py +6 -6
- crazy_functions/批量总结PDF文档.py +4 -3
- crazy_functions/批量总结PDF文档pdfminer.py +4 -2
- crazy_functions/批量翻译PDF文档_NOUGAT.py +3 -71
- crazy_functions/批量翻译PDF文档_多线程.py +7 -50
- crazy_functions/生成函数注释.py +4 -2
- crazy_functions/虚空终端.py +4 -3
- crazy_functions/解析JupyterNotebook.py +4 -2
- crazy_functions/解析项目源代码.py +11 -9
- crazy_functions/读文章写摘要.py +18 -21
- crazy_functions/辅助功能.py +6 -7
- docs/GithubAction+AllCapacity +1 -1
- docs/translate_english.json +72 -7
- docs/translate_japanese.json +0 -3
- docs/translate_std.json +3 -1
- docs/translate_traditionalchinese.json +0 -3
- multi_language.py +4 -2
- request_llm/bridge_chatgpt.py +18 -10
- request_llm/com_sparkapi.py +1 -0
- themes/gradios.py +46 -0
- themes/theme.py +3 -0
- toolbox.py +81 -70
- version +2 -2
README.md
CHANGED
@@ -267,7 +267,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
|
267 |
|
268 |
3. 虚空终端(从自然语言输入中,理解用户意图+自动调用其他插件)
|
269 |
|
270 |
-
- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://
|
271 |
- 步骤二:点击“虚空终端”
|
272 |
|
273 |
<div align="center">
|
|
|
267 |
|
268 |
3. 虚空终端(从自然语言输入中,理解用户意图+自动调用其他插件)
|
269 |
|
270 |
+
- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://openreview.net/pdf?id=rJl0r3R9KX ”
|
271 |
- 步骤二:点击“虚空终端”
|
272 |
|
273 |
<div align="center">
|
app.py
CHANGED
@@ -9,7 +9,7 @@ def main():
|
|
9 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
10 |
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
11 |
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
12 |
-
ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT')
|
13 |
|
14 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
15 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
@@ -22,12 +22,12 @@ def main():
|
|
22 |
|
23 |
# 问询记录, python 版本建议3.9+(越新越好)
|
24 |
import logging, uuid
|
25 |
-
os.makedirs(
|
26 |
-
try:logging.basicConfig(filename="
|
27 |
-
except:logging.basicConfig(filename="
|
28 |
# Disable logging output from the 'httpx' logger
|
29 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
30 |
-
print("所有问询记录将自动保存在本地目录./
|
31 |
|
32 |
# 一些普通功能模块
|
33 |
from core_functional import get_core_functions
|
@@ -125,6 +125,15 @@ def main():
|
|
125 |
max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",)
|
126 |
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
127 |
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
gr.Markdown(description)
|
129 |
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
|
130 |
with gr.Row():
|
@@ -152,7 +161,7 @@ def main():
|
|
152 |
# 整理反复出现的控件句柄组合
|
153 |
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
154 |
output_combo = [cookies, chatbot, history, status]
|
155 |
-
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
156 |
# 提交按钮、重置按钮
|
157 |
cancel_handles.append(txt.submit(**predict_args))
|
158 |
cancel_handles.append(txt2.submit(**predict_args))
|
@@ -177,7 +186,7 @@ def main():
|
|
177 |
# 函数插件-固定按钮区
|
178 |
for k in plugins:
|
179 |
if not plugins[k].get("AsButton", True): continue
|
180 |
-
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo
|
181 |
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
182 |
cancel_handles.append(click_handle)
|
183 |
# 函数插件-下拉菜单与随变按钮的互动
|
@@ -197,7 +206,7 @@ def main():
|
|
197 |
def route(request: gr.Request, k, *args, **kwargs):
|
198 |
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
199 |
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
200 |
-
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo
|
201 |
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
202 |
cancel_handles.append(click_handle)
|
203 |
# 终止按钮的回调函数注册
|
|
|
9 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
10 |
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
11 |
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
12 |
+
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING')
|
13 |
|
14 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
15 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
|
|
22 |
|
23 |
# 问询记录, python 版本建议3.9+(越新越好)
|
24 |
import logging, uuid
|
25 |
+
os.makedirs(PATH_LOGGING, exist_ok=True)
|
26 |
+
try:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
27 |
+
except:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
28 |
# Disable logging output from the 'httpx' logger
|
29 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
30 |
+
print(f"所有问询记录将自动保存在本地目录./{PATH_LOGGING}/chat_secrets.log, 请注意自我隐私保护哦!")
|
31 |
|
32 |
# 一些普通功能模块
|
33 |
from core_functional import get_core_functions
|
|
|
125 |
max_length_sl = gr.Slider(minimum=256, maximum=8192, value=4096, step=1, interactive=True, label="Local LLM MaxLength",)
|
126 |
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
127 |
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
128 |
+
dark_mode_btn = gr.Button("Toggle Dark Mode ☀", variant="secondary").style(size="sm")
|
129 |
+
dark_mode_btn.click(None, None, None, _js="""() => {
|
130 |
+
if (document.querySelectorAll('.dark').length) {
|
131 |
+
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
132 |
+
} else {
|
133 |
+
document.querySelector('body').classList.add('dark');
|
134 |
+
}
|
135 |
+
}""",
|
136 |
+
)
|
137 |
gr.Markdown(description)
|
138 |
with gr.Accordion("备选输入区", open=True, visible=False, elem_id="input-panel2") as area_input_secondary:
|
139 |
with gr.Row():
|
|
|
161 |
# 整理反复出现的控件句柄组合
|
162 |
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
163 |
output_combo = [cookies, chatbot, history, status]
|
164 |
+
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
|
165 |
# 提交按钮、重置按钮
|
166 |
cancel_handles.append(txt.submit(**predict_args))
|
167 |
cancel_handles.append(txt2.submit(**predict_args))
|
|
|
186 |
# 函数插件-固定按钮区
|
187 |
for k in plugins:
|
188 |
if not plugins[k].get("AsButton", True): continue
|
189 |
+
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
|
190 |
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
191 |
cancel_handles.append(click_handle)
|
192 |
# 函数插件-下拉菜单与随变按钮的互动
|
|
|
206 |
def route(request: gr.Request, k, *args, **kwargs):
|
207 |
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
208 |
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
209 |
+
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
210 |
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
211 |
cancel_handles.append(click_handle)
|
212 |
# 终止按钮的回调函数注册
|
config.py
CHANGED
@@ -47,8 +47,9 @@ API_URL_REDIRECT = {}
|
|
47 |
DEFAULT_WORKER_NUM = 3
|
48 |
|
49 |
|
50 |
-
#
|
51 |
-
|
|
|
52 |
|
53 |
|
54 |
# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
|
@@ -185,7 +186,10 @@ GROBID_URLS = [
|
|
185 |
|
186 |
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
187 |
ALLOW_RESET_CONFIG = False
|
188 |
-
|
|
|
|
|
|
|
189 |
|
190 |
"""
|
191 |
在线大模型配置关联关系示意图
|
|
|
47 |
DEFAULT_WORKER_NUM = 3
|
48 |
|
49 |
|
50 |
+
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
51 |
+
# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...]
|
52 |
+
THEME = "Default"
|
53 |
|
54 |
|
55 |
# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
|
|
|
186 |
|
187 |
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
188 |
ALLOW_RESET_CONFIG = False
|
189 |
+
# 临时的上传文件夹位置,请勿修改
|
190 |
+
PATH_PRIVATE_UPLOAD = "private_upload"
|
191 |
+
# 日志文件夹的位置,请勿修改
|
192 |
+
PATH_LOGGING = "gpt_log"
|
193 |
|
194 |
"""
|
195 |
在线大模型配置关联关系示意图
|
crazy_functional.py
CHANGED
@@ -13,7 +13,6 @@ def get_crazy_functions():
|
|
13 |
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
14 |
from crazy_functions.解析项目源代码 import 解析一个前端项目
|
15 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
16 |
-
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
17 |
from crazy_functions.Latex全文润色 import Latex英文润色
|
18 |
from crazy_functions.询问多个大语言模型 import 同时问询
|
19 |
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
@@ -400,12 +399,12 @@ def get_crazy_functions():
|
|
400 |
try:
|
401 |
from crazy_functions.Langchain知识库 import 知识库问答
|
402 |
function_plugins.update({
|
403 |
-
"
|
404 |
"Group": "对话",
|
405 |
"Color": "stop",
|
406 |
"AsButton": False,
|
407 |
"AdvancedArgs": True,
|
408 |
-
"ArgsReminder": "
|
409 |
"Function": HotReload(知识库问答)
|
410 |
}
|
411 |
})
|
@@ -415,12 +414,12 @@ def get_crazy_functions():
|
|
415 |
try:
|
416 |
from crazy_functions.Langchain知识库 import 读取知识库作答
|
417 |
function_plugins.update({
|
418 |
-
"
|
419 |
"Group": "对话",
|
420 |
"Color": "stop",
|
421 |
"AsButton": False,
|
422 |
"AdvancedArgs": True,
|
423 |
-
"ArgsReminder": "待提取的知识库名称id, 默认为default,
|
424 |
"Function": HotReload(读取知识库作答)
|
425 |
}
|
426 |
})
|
|
|
13 |
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
14 |
from crazy_functions.解析项目源代码 import 解析一个前端项目
|
15 |
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
|
|
16 |
from crazy_functions.Latex全文润色 import Latex英文润色
|
17 |
from crazy_functions.询问多个大语言模型 import 同时问询
|
18 |
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
|
|
399 |
try:
|
400 |
from crazy_functions.Langchain知识库 import 知识库问答
|
401 |
function_plugins.update({
|
402 |
+
"构建知识库(先上传文件素材,再运行此插件)": {
|
403 |
"Group": "对话",
|
404 |
"Color": "stop",
|
405 |
"AsButton": False,
|
406 |
"AdvancedArgs": True,
|
407 |
+
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
|
408 |
"Function": HotReload(知识库问答)
|
409 |
}
|
410 |
})
|
|
|
414 |
try:
|
415 |
from crazy_functions.Langchain知识库 import 读取知识库作答
|
416 |
function_plugins.update({
|
417 |
+
"知识库问答(构建知识库后,再运行此插件)": {
|
418 |
"Group": "对话",
|
419 |
"Color": "stop",
|
420 |
"AsButton": False,
|
421 |
"AdvancedArgs": True,
|
422 |
+
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
|
423 |
"Function": HotReload(读取知识库作答)
|
424 |
}
|
425 |
})
|
crazy_functions/CodeInterpreter.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from collections.abc import Callable, Iterable, Mapping
|
2 |
from typing import Any
|
3 |
-
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc
|
|
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
from .crazy_utils import input_clipping, try_install_deps
|
6 |
from multiprocessing import Process, Pipe
|
@@ -92,7 +93,7 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
|
92 |
|
93 |
def make_module(code):
|
94 |
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
95 |
-
with open(f'
|
96 |
f.write(code)
|
97 |
|
98 |
def get_class_name(class_string):
|
@@ -102,7 +103,7 @@ def make_module(code):
|
|
102 |
return class_name
|
103 |
|
104 |
class_name = get_class_name(code)
|
105 |
-
return f"
|
106 |
|
107 |
def init_module_instance(module):
|
108 |
import importlib
|
@@ -171,7 +172,7 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history
|
|
171 |
file_type = file_path.split('.')[-1]
|
172 |
|
173 |
# 粗心检查
|
174 |
-
if
|
175 |
chatbot.append([
|
176 |
"...",
|
177 |
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
|
|
1 |
from collections.abc import Callable, Iterable, Mapping
|
2 |
from typing import Any
|
3 |
+
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc
|
4 |
+
from toolbox import promote_file_to_downloadzone, get_log_folder
|
5 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
6 |
from .crazy_utils import input_clipping, try_install_deps
|
7 |
from multiprocessing import Process, Pipe
|
|
|
93 |
|
94 |
def make_module(code):
|
95 |
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
96 |
+
with open(f'{get_log_folder()}/{module_file}.py', 'w', encoding='utf8') as f:
|
97 |
f.write(code)
|
98 |
|
99 |
def get_class_name(class_string):
|
|
|
103 |
return class_name
|
104 |
|
105 |
class_name = get_class_name(code)
|
106 |
+
return f"{get_log_folder().replace('/', '.')}.{module_file}->{class_name}"
|
107 |
|
108 |
def init_module_instance(module):
|
109 |
import importlib
|
|
|
172 |
file_type = file_path.split('.')[-1]
|
173 |
|
174 |
# 粗心检查
|
175 |
+
if is_the_upload_folder(txt):
|
176 |
chatbot.append([
|
177 |
"...",
|
178 |
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
crazy_functions/Langchain知识库.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from toolbox import CatchException, update_ui, ProxyNetworkActivate
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
3 |
|
4 |
|
@@ -15,7 +15,12 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
15 |
web_port 当前软件运行的端口号
|
16 |
"""
|
17 |
history = [] # 清空历史,以免输入溢出
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
19 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
20 |
|
21 |
# resolve deps
|
@@ -24,17 +29,12 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
24 |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
25 |
from .crazy_utils import knowledge_archive_interface
|
26 |
except Exception as e:
|
27 |
-
chatbot.append(
|
28 |
-
["依赖不足",
|
29 |
-
"导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."]
|
30 |
-
)
|
31 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
32 |
from .crazy_utils import try_install_deps
|
33 |
-
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'])
|
34 |
-
|
35 |
-
|
36 |
-
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
37 |
-
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
38 |
|
39 |
# < --------------------读取文件--------------- >
|
40 |
file_manifest = []
|
@@ -84,19 +84,18 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
84 |
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
85 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
86 |
from .crazy_utils import try_install_deps
|
87 |
-
try_install_deps(['zh_langchain==0.2.1'])
|
|
|
|
|
88 |
|
89 |
# < ------------------- --------------- >
|
90 |
kai = knowledge_archive_interface()
|
91 |
|
92 |
-
if
|
93 |
-
|
94 |
-
|
95 |
-
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
96 |
-
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
97 |
-
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
|
98 |
|
99 |
-
chatbot.append((txt, '[
|
100 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
101 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
102 |
inputs=prompt, inputs_show_user=txt,
|
|
|
1 |
+
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
3 |
|
4 |
|
|
|
15 |
web_port 当前软件运行的端口号
|
16 |
"""
|
17 |
history = [] # 清空历史,以免输入溢出
|
18 |
+
|
19 |
+
# < --------------------读取参数--------------- >
|
20 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
21 |
+
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
22 |
+
|
23 |
+
chatbot.append((f"向`{kai_id}`知识库中添加文件。", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。"))
|
24 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
25 |
|
26 |
# resolve deps
|
|
|
29 |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
30 |
from .crazy_utils import knowledge_archive_interface
|
31 |
except Exception as e:
|
32 |
+
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
|
|
|
|
|
|
33 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
34 |
from .crazy_utils import try_install_deps
|
35 |
+
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
36 |
+
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
37 |
+
return
|
|
|
|
|
38 |
|
39 |
# < --------------------读取文件--------------- >
|
40 |
file_manifest = []
|
|
|
84 |
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
85 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
86 |
from .crazy_utils import try_install_deps
|
87 |
+
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
88 |
+
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
89 |
+
return
|
90 |
|
91 |
# < ------------------- --------------- >
|
92 |
kai = knowledge_archive_interface()
|
93 |
|
94 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
95 |
+
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
96 |
+
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
|
|
|
|
|
|
|
97 |
|
98 |
+
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
|
99 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
100 |
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
101 |
inputs=prompt, inputs_show_user=txt,
|
crazy_functions/Latex全文润色.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
from toolbox import update_ui, trimmed_format_exc
|
2 |
-
from toolbox import CatchException, report_execption,
|
3 |
|
4 |
|
5 |
class PaperFileGroup():
|
@@ -51,7 +51,7 @@ class PaperFileGroup():
|
|
51 |
import os, time
|
52 |
folder = os.path.dirname(self.file_paths[0])
|
53 |
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
54 |
-
zip_folder(folder,
|
55 |
|
56 |
|
57 |
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'):
|
@@ -126,7 +126,9 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
126 |
|
127 |
# <-------- 整理结果,退出 ---------->
|
128 |
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
129 |
-
res =
|
|
|
|
|
130 |
history = gpt_response_collection
|
131 |
chatbot.append((f"{fp}完成了吗?", res))
|
132 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
@@ -137,7 +139,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|
137 |
# 基本信息:功能、贡献者
|
138 |
chatbot.append([
|
139 |
"函数插件功能?",
|
140 |
-
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
141 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
142 |
|
143 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
|
|
1 |
+
from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
|
2 |
+
from toolbox import CatchException, report_execption, write_history_to_file, zip_folder
|
3 |
|
4 |
|
5 |
class PaperFileGroup():
|
|
|
51 |
import os, time
|
52 |
folder = os.path.dirname(self.file_paths[0])
|
53 |
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
54 |
+
zip_folder(folder, get_log_folder(), f'{t}-polished.zip')
|
55 |
|
56 |
|
57 |
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'):
|
|
|
126 |
|
127 |
# <-------- 整理结果,退出 ---------->
|
128 |
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
129 |
+
res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name)
|
130 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
131 |
+
|
132 |
history = gpt_response_collection
|
133 |
chatbot.append((f"{fp}完成了吗?", res))
|
134 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
139 |
# 基本信息:功能、贡献者
|
140 |
chatbot.append([
|
141 |
"函数插件功能?",
|
142 |
+
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex环境,请使用“Latex英文纠错+高亮”插件)"])
|
143 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
144 |
|
145 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
crazy_functions/Latex全文翻译.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption,
|
3 |
fast_debug = False
|
4 |
|
5 |
class PaperFileGroup():
|
@@ -95,7 +95,8 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
95 |
|
96 |
# <-------- 整理结果,退出 ---------->
|
97 |
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
98 |
-
res =
|
|
|
99 |
history = gpt_response_collection
|
100 |
chatbot.append((f"{fp}完成了吗?", res))
|
101 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
1 |
+
from toolbox import update_ui, promote_file_to_downloadzone
|
2 |
+
from toolbox import CatchException, report_execption, write_history_to_file
|
3 |
fast_debug = False
|
4 |
|
5 |
class PaperFileGroup():
|
|
|
95 |
|
96 |
# <-------- 整理结果,退出 ---------->
|
97 |
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
98 |
+
res = write_history_to_file(gpt_response_collection, create_report_file_name)
|
99 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
100 |
history = gpt_response_collection
|
101 |
chatbot.append((f"{fp}完成了吗?", res))
|
102 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/Latex输出PDF结果.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from toolbox import update_ui, trimmed_format_exc, get_conf,
|
2 |
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
|
3 |
from functools import partial
|
4 |
import glob, os, requests, time
|
@@ -65,7 +65,7 @@ def move_project(project_folder, arxiv_id=None):
|
|
65 |
if arxiv_id is not None:
|
66 |
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
67 |
else:
|
68 |
-
new_workfolder = f'
|
69 |
try:
|
70 |
shutil.rmtree(new_workfolder)
|
71 |
except:
|
|
|
1 |
+
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
2 |
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
|
3 |
from functools import partial
|
4 |
import glob, os, requests, time
|
|
|
65 |
if arxiv_id is not None:
|
66 |
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
67 |
else:
|
68 |
+
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
69 |
try:
|
70 |
shutil.rmtree(new_workfolder)
|
71 |
except:
|
crazy_functions/crazy_utils.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
-
from toolbox import update_ui, get_conf, trimmed_format_exc
|
2 |
import threading
|
|
|
|
|
3 |
|
4 |
def input_clipping(inputs, history, max_token_limit):
|
5 |
import numpy as np
|
@@ -469,14 +471,16 @@ def read_and_clean_pdf_text(fp):
|
|
469 |
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
470 |
|
471 |
############################## <第 2 步,获取正文主字体> ##################################
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
|
|
|
|
480 |
############################## <第 3 步,切分和重新整合> ##################################
|
481 |
mega_sec = []
|
482 |
sec = []
|
@@ -703,49 +707,96 @@ class knowledge_archive_interface():
|
|
703 |
)
|
704 |
self.threadLock.release()
|
705 |
return resp, prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
706 |
|
707 |
-
def try_install_deps(deps):
|
|
|
708 |
for dep in deps:
|
709 |
-
import subprocess, sys
|
710 |
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
|
|
|
|
|
|
|
|
|
711 |
|
712 |
|
713 |
-
|
714 |
-
def __init__(self) -> None:
|
715 |
-
self.css = """
|
716 |
.row {
|
717 |
display: flex;
|
718 |
flex-wrap: wrap;
|
719 |
}
|
720 |
-
|
721 |
.column {
|
722 |
flex: 1;
|
723 |
padding: 10px;
|
724 |
}
|
725 |
-
|
726 |
.table-header {
|
727 |
font-weight: bold;
|
728 |
border-bottom: 1px solid black;
|
729 |
}
|
730 |
-
|
731 |
.table-row {
|
732 |
border-bottom: 1px solid lightgray;
|
733 |
}
|
734 |
-
|
735 |
.table-cell {
|
736 |
padding: 5px;
|
737 |
}
|
738 |
-
|
739 |
-
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
|
740 |
|
741 |
-
|
742 |
-
def add_row(self, a, b):
|
743 |
-
tmp = """
|
744 |
<div class="row table-row">
|
745 |
<div class="column table-cell">REPLACE_A</div>
|
746 |
<div class="column table-cell">REPLACE_B</div>
|
747 |
</div>
|
748 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
749 |
from toolbox import markdown_convertion
|
750 |
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
|
751 |
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
|
@@ -753,6 +804,6 @@ class construct_html():
|
|
753 |
|
754 |
|
755 |
def save_file(self, file_name):
|
756 |
-
with open(
|
757 |
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
758 |
-
|
|
|
1 |
+
from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
|
2 |
import threading
|
3 |
+
import os
|
4 |
+
import logging
|
5 |
|
6 |
def input_clipping(inputs, history, max_token_limit):
|
7 |
import numpy as np
|
|
|
471 |
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
472 |
|
473 |
############################## <第 2 步,获取正文主字体> ##################################
|
474 |
+
try:
|
475 |
+
fsize_statiscs = {}
|
476 |
+
for span in meta_span:
|
477 |
+
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
|
478 |
+
fsize_statiscs[span[1]] += span[2]
|
479 |
+
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
|
480 |
+
if REMOVE_FOOT_NOTE:
|
481 |
+
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
482 |
+
except:
|
483 |
+
raise RuntimeError(f'抱歉, 我们暂时无法解析此PDF文档: {fp}。')
|
484 |
############################## <第 3 步,切分和重新整合> ##################################
|
485 |
mega_sec = []
|
486 |
sec = []
|
|
|
707 |
)
|
708 |
self.threadLock.release()
|
709 |
return resp, prompt
|
710 |
+
|
711 |
+
@Singleton
|
712 |
+
class nougat_interface():
|
713 |
+
def __init__(self):
|
714 |
+
self.threadLock = threading.Lock()
|
715 |
+
|
716 |
+
def nougat_with_timeout(self, command, cwd, timeout=3600):
|
717 |
+
import subprocess
|
718 |
+
logging.info(f'正在执行命令 {command}')
|
719 |
+
process = subprocess.Popen(command, shell=True, cwd=cwd)
|
720 |
+
try:
|
721 |
+
stdout, stderr = process.communicate(timeout=timeout)
|
722 |
+
except subprocess.TimeoutExpired:
|
723 |
+
process.kill()
|
724 |
+
stdout, stderr = process.communicate()
|
725 |
+
print("Process timed out!")
|
726 |
+
return False
|
727 |
+
return True
|
728 |
+
|
729 |
+
|
730 |
+
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
731 |
+
from toolbox import update_ui_lastest_msg
|
732 |
+
|
733 |
+
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
734 |
+
chatbot=chatbot, history=history, delay=0)
|
735 |
+
self.threadLock.acquire()
|
736 |
+
import glob, threading, os
|
737 |
+
from toolbox import get_log_folder, gen_time_str
|
738 |
+
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
739 |
+
os.makedirs(dst)
|
740 |
+
|
741 |
+
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
742 |
+
chatbot=chatbot, history=history, delay=0)
|
743 |
+
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
|
744 |
+
res = glob.glob(os.path.join(dst,'*.mmd'))
|
745 |
+
if len(res) == 0:
|
746 |
+
self.threadLock.release()
|
747 |
+
raise RuntimeError("Nougat解析论文失败。")
|
748 |
+
self.threadLock.release()
|
749 |
+
return res[0]
|
750 |
+
|
751 |
+
|
752 |
+
|
753 |
|
754 |
+
def try_install_deps(deps, reload_m=[]):
|
755 |
+
import subprocess, sys, importlib
|
756 |
for dep in deps:
|
|
|
757 |
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
|
758 |
+
import site
|
759 |
+
importlib.reload(site)
|
760 |
+
for m in reload_m:
|
761 |
+
importlib.reload(__import__(m))
|
762 |
|
763 |
|
764 |
+
HTML_CSS = """
|
|
|
|
|
765 |
.row {
|
766 |
display: flex;
|
767 |
flex-wrap: wrap;
|
768 |
}
|
|
|
769 |
.column {
|
770 |
flex: 1;
|
771 |
padding: 10px;
|
772 |
}
|
|
|
773 |
.table-header {
|
774 |
font-weight: bold;
|
775 |
border-bottom: 1px solid black;
|
776 |
}
|
|
|
777 |
.table-row {
|
778 |
border-bottom: 1px solid lightgray;
|
779 |
}
|
|
|
780 |
.table-cell {
|
781 |
padding: 5px;
|
782 |
}
|
783 |
+
"""
|
|
|
784 |
|
785 |
+
TABLE_CSS = """
|
|
|
|
|
786 |
<div class="row table-row">
|
787 |
<div class="column table-cell">REPLACE_A</div>
|
788 |
<div class="column table-cell">REPLACE_B</div>
|
789 |
</div>
|
790 |
+
"""
|
791 |
+
|
792 |
+
class construct_html():
|
793 |
+
def __init__(self) -> None:
|
794 |
+
self.css = HTML_CSS
|
795 |
+
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
|
796 |
+
|
797 |
+
|
798 |
+
def add_row(self, a, b):
|
799 |
+
tmp = TABLE_CSS
|
800 |
from toolbox import markdown_convertion
|
801 |
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
|
802 |
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
|
|
|
804 |
|
805 |
|
806 |
def save_file(self, file_name):
|
807 |
+
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
808 |
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
809 |
+
return os.path.join(get_log_folder(), file_name)
|
crazy_functions/latex_fns/latex_actions.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from toolbox import update_ui, update_ui_lastest_msg
|
2 |
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
3 |
from .latex_toolbox import PRESERVE, TRANSFORM
|
4 |
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
@@ -363,7 +363,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|
363 |
if mode!='translate_zh':
|
364 |
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
365 |
print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
366 |
-
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
367 |
|
368 |
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
369 |
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
@@ -439,9 +439,9 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
|
439 |
trans = k
|
440 |
ch.add_row(a=orig, b=trans)
|
441 |
create_report_file_name = f"{gen_time_str()}.trans.html"
|
442 |
-
ch.save_file(create_report_file_name)
|
443 |
-
shutil.copyfile(
|
444 |
-
promote_file_to_downloadzone(file=
|
445 |
except:
|
446 |
from toolbox import trimmed_format_exc
|
447 |
print('writing html result failed:', trimmed_format_exc())
|
|
|
1 |
+
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
2 |
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
3 |
from .latex_toolbox import PRESERVE, TRANSFORM
|
4 |
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
|
|
363 |
if mode!='translate_zh':
|
364 |
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
365 |
print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
366 |
+
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd())
|
367 |
|
368 |
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
369 |
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
|
|
439 |
trans = k
|
440 |
ch.add_row(a=orig, b=trans)
|
441 |
create_report_file_name = f"{gen_time_str()}.trans.html"
|
442 |
+
res = ch.save_file(create_report_file_name)
|
443 |
+
shutil.copyfile(res, pj(project_folder, create_report_file_name))
|
444 |
+
promote_file_to_downloadzone(file=res, chatbot=chatbot)
|
445 |
except:
|
446 |
from toolbox import trimmed_format_exc
|
447 |
print('writing html result failed:', trimmed_format_exc())
|
crazy_functions/latex_fns/latex_toolbox.py
CHANGED
@@ -256,6 +256,7 @@ def find_main_tex_file(file_manifest, mode):
|
|
256 |
canidates_score.append(0)
|
257 |
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
258 |
file_content = f.read()
|
|
|
259 |
for uw in unexpected_words:
|
260 |
if uw in file_content:
|
261 |
canidates_score[-1] -= 1
|
@@ -290,7 +291,11 @@ def find_tex_file_ignore_case(fp):
|
|
290 |
import glob
|
291 |
for f in glob.glob(dir_name+'/*.tex'):
|
292 |
base_name_s = os.path.basename(fp)
|
293 |
-
|
|
|
|
|
|
|
|
|
294 |
return None
|
295 |
|
296 |
def merge_tex_files_(project_foler, main_file, mode):
|
@@ -301,9 +306,9 @@ def merge_tex_files_(project_foler, main_file, mode):
|
|
301 |
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
302 |
f = s.group(1)
|
303 |
fp = os.path.join(project_foler, f)
|
304 |
-
|
305 |
-
if
|
306 |
-
with open(
|
307 |
else:
|
308 |
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
309 |
c = merge_tex_files_(project_foler, c, mode)
|
@@ -423,7 +428,7 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
|
|
423 |
|
424 |
def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
425 |
import PyPDF2
|
426 |
-
Percent = 0.
|
427 |
# Open the first PDF file
|
428 |
with open(pdf1_path, 'rb') as pdf1_file:
|
429 |
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
|
|
256 |
canidates_score.append(0)
|
257 |
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
258 |
file_content = f.read()
|
259 |
+
file_content = rm_comments(file_content)
|
260 |
for uw in unexpected_words:
|
261 |
if uw in file_content:
|
262 |
canidates_score[-1] -= 1
|
|
|
291 |
import glob
|
292 |
for f in glob.glob(dir_name+'/*.tex'):
|
293 |
base_name_s = os.path.basename(fp)
|
294 |
+
base_name_f = os.path.basename(f)
|
295 |
+
if base_name_s.lower() == base_name_f.lower(): return f
|
296 |
+
# 试着加上.tex后缀试试
|
297 |
+
if not base_name_s.endswith('.tex'): base_name_s+='.tex'
|
298 |
+
if base_name_s.lower() == base_name_f.lower(): return f
|
299 |
return None
|
300 |
|
301 |
def merge_tex_files_(project_foler, main_file, mode):
|
|
|
306 |
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
307 |
f = s.group(1)
|
308 |
fp = os.path.join(project_foler, f)
|
309 |
+
fp_ = find_tex_file_ignore_case(fp)
|
310 |
+
if fp_:
|
311 |
+
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
312 |
else:
|
313 |
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
314 |
c = merge_tex_files_(project_foler, c, mode)
|
|
|
428 |
|
429 |
def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
430 |
import PyPDF2
|
431 |
+
Percent = 0.95
|
432 |
# Open the first PDF file
|
433 |
with open(pdf1_path, 'rb') as pdf1_file:
|
434 |
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
crazy_functions/下载arxiv论文翻译摘要.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
-
from toolbox import update_ui
|
2 |
-
from toolbox import
|
|
|
3 |
import re, requests, unicodedata, os
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
def download_arxiv_(url_pdf):
|
@@ -28,7 +29,7 @@ def download_arxiv_(url_pdf):
|
|
28 |
if k in other_info['comment']:
|
29 |
title = k + ' ' + title
|
30 |
|
31 |
-
download_dir = '
|
32 |
os.makedirs(download_dir, exist_ok=True)
|
33 |
|
34 |
title_str = title.replace('?', '?')\
|
@@ -40,9 +41,6 @@ def download_arxiv_(url_pdf):
|
|
40 |
|
41 |
requests_pdf_url = url_pdf
|
42 |
file_path = download_dir+title_str
|
43 |
-
# if os.path.exists(file_path):
|
44 |
-
# print('返回缓存文件')
|
45 |
-
# return './gpt_log/arxiv/'+title_str
|
46 |
|
47 |
print('下载中')
|
48 |
proxies, = get_conf('proxies')
|
@@ -61,7 +59,7 @@ def download_arxiv_(url_pdf):
|
|
61 |
.replace('\n', '')\
|
62 |
.replace(' ', ' ')\
|
63 |
.replace(' ', ' ')
|
64 |
-
return
|
65 |
|
66 |
|
67 |
def get_name(_url_):
|
@@ -184,11 +182,10 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
|
|
184 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
185 |
history.append(i_say_show_user); history.append(gpt_say)
|
186 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
res = write_results_to_file(history)
|
192 |
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
193 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
194 |
|
|
|
1 |
+
from toolbox import update_ui, get_log_folder
|
2 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
3 |
+
from toolbox import CatchException, report_execption, get_conf
|
4 |
import re, requests, unicodedata, os
|
5 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
6 |
def download_arxiv_(url_pdf):
|
|
|
29 |
if k in other_info['comment']:
|
30 |
title = k + ' ' + title
|
31 |
|
32 |
+
download_dir = get_log_folder(plugin_name='arxiv')
|
33 |
os.makedirs(download_dir, exist_ok=True)
|
34 |
|
35 |
title_str = title.replace('?', '?')\
|
|
|
41 |
|
42 |
requests_pdf_url = url_pdf
|
43 |
file_path = download_dir+title_str
|
|
|
|
|
|
|
44 |
|
45 |
print('下载中')
|
46 |
proxies, = get_conf('proxies')
|
|
|
59 |
.replace('\n', '')\
|
60 |
.replace(' ', ' ')\
|
61 |
.replace(' ', ' ')
|
62 |
+
return file_path, other_info
|
63 |
|
64 |
|
65 |
def get_name(_url_):
|
|
|
182 |
chatbot[-1] = (i_say_show_user, gpt_say)
|
183 |
history.append(i_say_show_user); history.append(gpt_say)
|
184 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
185 |
+
res = write_history_to_file(history)
|
186 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
187 |
+
promote_file_to_downloadzone(pdf_path, chatbot=chatbot)
|
188 |
+
|
|
|
189 |
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
190 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
191 |
|
crazy_functions/图片生成.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from toolbox import CatchException, update_ui, get_conf, select_api_key
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
4 |
|
@@ -33,7 +33,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
|
|
33 |
raise RuntimeError(response.content.decode())
|
34 |
# 文件保存到本地
|
35 |
r = requests.get(image_url, proxies=proxies)
|
36 |
-
file_path = '
|
37 |
os.makedirs(file_path, exist_ok=True)
|
38 |
file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
|
39 |
with open(file_path+file_name, 'wb+') as f: f.write(r.content)
|
|
|
1 |
+
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
4 |
|
|
|
33 |
raise RuntimeError(response.content.decode())
|
34 |
# 文件保存到本地
|
35 |
r = requests.get(image_url, proxies=proxies)
|
36 |
+
file_path = f'{get_log_folder()}/image_gen/'
|
37 |
os.makedirs(file_path, exist_ok=True)
|
38 |
file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
|
39 |
with open(file_path+file_name, 'wb+') as f: f.write(r.content)
|
crazy_functions/对话历史存档.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import re
|
4 |
|
@@ -10,8 +10,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
|
10 |
import time
|
11 |
if file_name is None:
|
12 |
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
13 |
-
os.
|
14 |
-
with open(
|
15 |
from themes.theme import advanced_css
|
16 |
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
|
17 |
for i, contents in enumerate(chatbot):
|
@@ -29,8 +29,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
|
29 |
for h in history:
|
30 |
f.write("\n>>>" + h)
|
31 |
f.write('</code>')
|
32 |
-
promote_file_to_downloadzone(
|
33 |
-
return '对话历史写入:' +
|
34 |
|
35 |
def gen_file_preview(file_name):
|
36 |
try:
|
@@ -106,7 +106,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
106 |
if not success:
|
107 |
if txt == "": txt = '空空如也的输入栏'
|
108 |
import glob
|
109 |
-
local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'
|
110 |
chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
|
111 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
112 |
return
|
@@ -132,8 +132,8 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
|
|
132 |
"""
|
133 |
|
134 |
import glob, os
|
135 |
-
local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'
|
136 |
-
for f in glob.glob(f'
|
137 |
os.remove(f)
|
138 |
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
139 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
1 |
+
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import re
|
4 |
|
|
|
10 |
import time
|
11 |
if file_name is None:
|
12 |
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
13 |
+
fp = os.path.join(get_log_folder(), file_name)
|
14 |
+
with open(fp, 'w', encoding='utf8') as f:
|
15 |
from themes.theme import advanced_css
|
16 |
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
|
17 |
for i, contents in enumerate(chatbot):
|
|
|
29 |
for h in history:
|
30 |
f.write("\n>>>" + h)
|
31 |
f.write('</code>')
|
32 |
+
promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot)
|
33 |
+
return '对话历史写入:' + fp
|
34 |
|
35 |
def gen_file_preview(file_name):
|
36 |
try:
|
|
|
106 |
if not success:
|
107 |
if txt == "": txt = '空空如也的输入栏'
|
108 |
import glob
|
109 |
+
local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
|
110 |
chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
|
111 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
112 |
return
|
|
|
132 |
"""
|
133 |
|
134 |
import glob, os
|
135 |
+
local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
|
136 |
+
for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True):
|
137 |
os.remove(f)
|
138 |
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
139 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/总结word文档.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption
|
|
|
3 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
fast_debug = False
|
5 |
|
@@ -71,11 +72,13 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
|
71 |
history.extend([i_say,gpt_say])
|
72 |
this_paper_history.extend([i_say,gpt_say])
|
73 |
|
74 |
-
res =
|
|
|
75 |
chatbot.append(("完成了吗?", res))
|
76 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
77 |
|
78 |
-
res =
|
|
|
79 |
chatbot.append(("所有文件都总结完成了吗?", res))
|
80 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
81 |
|
|
|
1 |
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
fast_debug = False
|
6 |
|
|
|
72 |
history.extend([i_say,gpt_say])
|
73 |
this_paper_history.extend([i_say,gpt_say])
|
74 |
|
75 |
+
res = write_history_to_file(history)
|
76 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
77 |
chatbot.append(("完成了吗?", res))
|
78 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
79 |
|
80 |
+
res = write_history_to_file(history)
|
81 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
82 |
chatbot.append(("所有文件都总结完成了吗?", res))
|
83 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
84 |
|
crazy_functions/总结音视频.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
-
from toolbox import CatchException, report_execption, select_api_key, update_ui,
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
|
3 |
|
4 |
def split_audio_file(filename, split_duration=1000):
|
5 |
"""
|
@@ -15,7 +16,7 @@ def split_audio_file(filename, split_duration=1000):
|
|
15 |
"""
|
16 |
from moviepy.editor import AudioFileClip
|
17 |
import os
|
18 |
-
os.makedirs('
|
19 |
|
20 |
# 读取音频文件
|
21 |
audio = AudioFileClip(filename)
|
@@ -31,8 +32,8 @@ def split_audio_file(filename, split_duration=1000):
|
|
31 |
start_time = split_points[i]
|
32 |
end_time = split_points[i + 1]
|
33 |
split_audio = audio.subclip(start_time, end_time)
|
34 |
-
split_audio.write_audiofile(f"
|
35 |
-
filelist.append(f"
|
36 |
|
37 |
audio.close()
|
38 |
return filelist
|
@@ -52,7 +53,7 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
|
52 |
'Authorization': f"Bearer {api_key}"
|
53 |
}
|
54 |
|
55 |
-
os.makedirs('
|
56 |
for index, fp in enumerate(file_manifest):
|
57 |
audio_history = []
|
58 |
# 提取文件扩展名
|
@@ -60,8 +61,8 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
|
60 |
# 提取视频中的音频
|
61 |
if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
|
62 |
audio_clip = AudioFileClip(fp)
|
63 |
-
audio_clip.write_audiofile(f'
|
64 |
-
fp = f'
|
65 |
# 调用whisper模型音频转文字
|
66 |
voice = split_audio_file(fp)
|
67 |
for j, i in enumerate(voice):
|
@@ -113,18 +114,19 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
|
113 |
history=audio_history,
|
114 |
sys_prompt="总结文章。"
|
115 |
)
|
116 |
-
|
117 |
history.extend([i_say, gpt_say])
|
118 |
audio_history.extend([i_say, gpt_say])
|
119 |
|
120 |
-
res =
|
|
|
121 |
chatbot.append((f"第{index + 1}段音频完成了吗?", res))
|
122 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
123 |
|
124 |
# 删除中间文件夹
|
125 |
import shutil
|
126 |
-
shutil.rmtree('
|
127 |
-
res =
|
|
|
128 |
chatbot.append(("所有音频都总结完成了吗?", res))
|
129 |
yield from update_ui(chatbot=chatbot, history=history)
|
130 |
|
|
|
1 |
+
from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder
|
4 |
|
5 |
def split_audio_file(filename, split_duration=1000):
|
6 |
"""
|
|
|
16 |
"""
|
17 |
from moviepy.editor import AudioFileClip
|
18 |
import os
|
19 |
+
os.makedirs(f"{get_log_folder(plugin_name='audio')}/mp3/cut/", exist_ok=True) # 创建存储切割音频的文件夹
|
20 |
|
21 |
# 读取音频文件
|
22 |
audio = AudioFileClip(filename)
|
|
|
32 |
start_time = split_points[i]
|
33 |
end_time = split_points[i + 1]
|
34 |
split_audio = audio.subclip(start_time, end_time)
|
35 |
+
split_audio.write_audiofile(f"{get_log_folder(plugin_name='audio')}/mp3/cut/{filename[0]}_{i}.mp3")
|
36 |
+
filelist.append(f"{get_log_folder(plugin_name='audio')}/mp3/cut/{filename[0]}_{i}.mp3")
|
37 |
|
38 |
audio.close()
|
39 |
return filelist
|
|
|
53 |
'Authorization': f"Bearer {api_key}"
|
54 |
}
|
55 |
|
56 |
+
os.makedirs(f"{get_log_folder(plugin_name='audio')}/mp3/", exist_ok=True)
|
57 |
for index, fp in enumerate(file_manifest):
|
58 |
audio_history = []
|
59 |
# 提取文件扩展名
|
|
|
61 |
# 提取视频中的音频
|
62 |
if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
|
63 |
audio_clip = AudioFileClip(fp)
|
64 |
+
audio_clip.write_audiofile(f"{get_log_folder(plugin_name='audio')}/mp3/output{index}.mp3")
|
65 |
+
fp = f"{get_log_folder(plugin_name='audio')}/mp3/output{index}.mp3"
|
66 |
# 调用whisper模型音频转文字
|
67 |
voice = split_audio_file(fp)
|
68 |
for j, i in enumerate(voice):
|
|
|
114 |
history=audio_history,
|
115 |
sys_prompt="总结文章。"
|
116 |
)
|
|
|
117 |
history.extend([i_say, gpt_say])
|
118 |
audio_history.extend([i_say, gpt_say])
|
119 |
|
120 |
+
res = write_history_to_file(history)
|
121 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
122 |
chatbot.append((f"第{index + 1}段音频完成了吗?", res))
|
123 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
124 |
|
125 |
# 删除中间文件夹
|
126 |
import shutil
|
127 |
+
shutil.rmtree(f"{get_log_folder(plugin_name='audio')}/mp3")
|
128 |
+
res = write_history_to_file(history)
|
129 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
130 |
chatbot.append(("所有音频都总结完成了吗?", res))
|
131 |
yield from update_ui(chatbot=chatbot, history=history)
|
132 |
|
crazy_functions/批量Markdown翻译.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
import glob, time, os, re
|
2 |
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
3 |
-
from toolbox import CatchException, report_execption,
|
4 |
-
from toolbox import
|
5 |
fast_debug = False
|
6 |
|
7 |
class PaperFileGroup():
|
@@ -34,7 +34,7 @@ class PaperFileGroup():
|
|
34 |
self.sp_file_contents.append(segment)
|
35 |
self.sp_file_index.append(index)
|
36 |
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
|
37 |
-
|
38 |
|
39 |
def merge_result(self):
|
40 |
self.file_result = ["" for _ in range(len(self.file_paths))]
|
@@ -101,7 +101,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
101 |
pfg.merge_result()
|
102 |
pfg.write_result(language)
|
103 |
except:
|
104 |
-
|
105 |
|
106 |
# <-------- 整理结果,退出 ---------->
|
107 |
create_report_file_name = gen_time_str() + f"-chatgpt.md"
|
@@ -121,7 +121,7 @@ def get_files_from_everything(txt, preference=''):
|
|
121 |
proxies, = get_conf('proxies')
|
122 |
# 网络的远程文件
|
123 |
if preference == 'Github':
|
124 |
-
|
125 |
if not txt.endswith('.md'):
|
126 |
# Make a request to the GitHub API to retrieve the repository information
|
127 |
url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme'
|
|
|
1 |
+
import glob, time, os, re, logging
|
2 |
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
3 |
+
from toolbox import CatchException, report_execption, get_log_folder
|
4 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
5 |
fast_debug = False
|
6 |
|
7 |
class PaperFileGroup():
|
|
|
34 |
self.sp_file_contents.append(segment)
|
35 |
self.sp_file_index.append(index)
|
36 |
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
|
37 |
+
logging.info('Segmentation: done')
|
38 |
|
39 |
def merge_result(self):
|
40 |
self.file_result = ["" for _ in range(len(self.file_paths))]
|
|
|
101 |
pfg.merge_result()
|
102 |
pfg.write_result(language)
|
103 |
except:
|
104 |
+
logging.error(trimmed_format_exc())
|
105 |
|
106 |
# <-------- 整理结果,退出 ---------->
|
107 |
create_report_file_name = gen_time_str() + f"-chatgpt.md"
|
|
|
121 |
proxies, = get_conf('proxies')
|
122 |
# 网络的远程文件
|
123 |
if preference == 'Github':
|
124 |
+
logging.info('正在从github下载资源 ...')
|
125 |
if not txt.endswith('.md'):
|
126 |
# Make a request to the GitHub API to retrieve the repository information
|
127 |
url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme'
|
crazy_functions/批量总结PDF文档.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
|
2 |
-
from toolbox import CatchException, report_execption
|
|
|
3 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
from .crazy_utils import read_and_clean_pdf_text
|
5 |
from .crazy_utils import input_clipping
|
@@ -99,8 +100,8 @@ do not have too much repetitive information, numerical values using the original
|
|
99 |
_, final_results = input_clipping("", final_results, max_token_limit=3200)
|
100 |
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
|
101 |
|
102 |
-
res =
|
103 |
-
promote_file_to_downloadzone(res
|
104 |
yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面
|
105 |
|
106 |
|
|
|
1 |
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
from .crazy_utils import read_and_clean_pdf_text
|
6 |
from .crazy_utils import input_clipping
|
|
|
100 |
_, final_results = input_clipping("", final_results, max_token_limit=3200)
|
101 |
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
|
102 |
|
103 |
+
res = write_history_to_file(file_write_buffer)
|
104 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
105 |
yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面
|
106 |
|
107 |
|
crazy_functions/批量总结PDF文档pdfminer.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption
|
3 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
|
4 |
|
5 |
fast_debug = False
|
6 |
|
@@ -115,7 +116,8 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|
115 |
chatbot[-1] = (i_say, gpt_say)
|
116 |
history.append(i_say); history.append(gpt_say)
|
117 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
118 |
-
res =
|
|
|
119 |
chatbot.append(("完成了吗?", res))
|
120 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
121 |
|
|
|
1 |
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
5 |
|
6 |
fast_debug = False
|
7 |
|
|
|
116 |
chatbot[-1] = (i_say, gpt_say)
|
117 |
history.append(i_say); history.append(gpt_say)
|
118 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
119 |
+
res = write_history_to_file(history)
|
120 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
121 |
chatbot.append(("完成了吗?", res))
|
122 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
123 |
|
crazy_functions/批量翻译PDF文档_NOUGAT.py
CHANGED
@@ -86,30 +86,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
86 |
# 开始正式执行任务
|
87 |
yield from 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
88 |
|
89 |
-
|
90 |
-
def nougat_with_timeout(command, cwd, timeout=3600):
|
91 |
-
import subprocess
|
92 |
-
process = subprocess.Popen(command, shell=True, cwd=cwd)
|
93 |
-
try:
|
94 |
-
stdout, stderr = process.communicate(timeout=timeout)
|
95 |
-
except subprocess.TimeoutExpired:
|
96 |
-
process.kill()
|
97 |
-
stdout, stderr = process.communicate()
|
98 |
-
print("Process timed out!")
|
99 |
-
return False
|
100 |
-
return True
|
101 |
-
|
102 |
|
103 |
-
def NOUGAT_parse_pdf(fp):
|
104 |
-
import glob
|
105 |
-
from toolbox import get_log_folder, gen_time_str
|
106 |
-
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
107 |
-
os.makedirs(dst)
|
108 |
-
nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd())
|
109 |
-
res = glob.glob(os.path.join(dst,'*.mmd'))
|
110 |
-
if len(res) == 0:
|
111 |
-
raise RuntimeError("Nougat解析论文失败。")
|
112 |
-
return res[0]
|
113 |
|
114 |
|
115 |
def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
@@ -119,9 +96,11 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa
|
|
119 |
generated_conclusion_files = []
|
120 |
generated_html_files = []
|
121 |
DST_LANG = "中文"
|
|
|
|
|
122 |
for index, fp in enumerate(file_manifest):
|
123 |
chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
124 |
-
fpp = NOUGAT_parse_pdf(fp)
|
125 |
|
126 |
with open(fpp, 'r', encoding='utf8') as f:
|
127 |
article_content = f.readlines()
|
@@ -222,50 +201,3 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa
|
|
222 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
223 |
|
224 |
|
225 |
-
|
226 |
-
class construct_html():
|
227 |
-
def __init__(self) -> None:
|
228 |
-
self.css = """
|
229 |
-
.row {
|
230 |
-
display: flex;
|
231 |
-
flex-wrap: wrap;
|
232 |
-
}
|
233 |
-
|
234 |
-
.column {
|
235 |
-
flex: 1;
|
236 |
-
padding: 10px;
|
237 |
-
}
|
238 |
-
|
239 |
-
.table-header {
|
240 |
-
font-weight: bold;
|
241 |
-
border-bottom: 1px solid black;
|
242 |
-
}
|
243 |
-
|
244 |
-
.table-row {
|
245 |
-
border-bottom: 1px solid lightgray;
|
246 |
-
}
|
247 |
-
|
248 |
-
.table-cell {
|
249 |
-
padding: 5px;
|
250 |
-
}
|
251 |
-
"""
|
252 |
-
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
|
253 |
-
|
254 |
-
|
255 |
-
def add_row(self, a, b):
|
256 |
-
tmp = """
|
257 |
-
<div class="row table-row">
|
258 |
-
<div class="column table-cell">REPLACE_A</div>
|
259 |
-
<div class="column table-cell">REPLACE_B</div>
|
260 |
-
</div>
|
261 |
-
"""
|
262 |
-
from toolbox import markdown_convertion
|
263 |
-
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
|
264 |
-
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
|
265 |
-
self.html_string += tmp
|
266 |
-
|
267 |
-
|
268 |
-
def save_file(self, file_name):
|
269 |
-
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
270 |
-
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
271 |
-
return os.path.join(get_log_folder(), file_name)
|
|
|
86 |
# 开始正式执行任务
|
87 |
yield from 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
|
92 |
def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
|
|
96 |
generated_conclusion_files = []
|
97 |
generated_html_files = []
|
98 |
DST_LANG = "中文"
|
99 |
+
from crazy_functions.crazy_utils import nougat_interface, construct_html
|
100 |
+
nougat_handle = nougat_interface()
|
101 |
for index, fp in enumerate(file_manifest):
|
102 |
chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
103 |
+
fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history)
|
104 |
|
105 |
with open(fpp, 'r', encoding='utf8') as f:
|
106 |
article_content = f.readlines()
|
|
|
201 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
202 |
|
203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
crazy_functions/批量翻译PDF文档_多线程.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
from toolbox import CatchException, report_execption,
|
2 |
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
3 |
-
from toolbox import write_history_to_file,
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
6 |
from .crazy_utils import read_and_clean_pdf_text
|
@@ -63,6 +63,7 @@ def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwa
|
|
63 |
generated_conclusion_files = []
|
64 |
generated_html_files = []
|
65 |
DST_LANG = "中文"
|
|
|
66 |
for index, fp in enumerate(file_manifest):
|
67 |
chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
68 |
article_dict = parse_pdf(fp, grobid_url)
|
@@ -166,6 +167,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|
166 |
TOKEN_LIMIT_PER_FRAGMENT = 1280
|
167 |
generated_conclusion_files = []
|
168 |
generated_html_files = []
|
|
|
169 |
for index, fp in enumerate(file_manifest):
|
170 |
# 读取PDF文件
|
171 |
file_content, page_one = read_and_clean_pdf_text(fp)
|
@@ -216,10 +218,11 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|
216 |
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
217 |
final.extend(gpt_response_collection_md)
|
218 |
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
219 |
-
res =
|
|
|
220 |
|
221 |
# 更新UI
|
222 |
-
generated_conclusion_files.append(f'
|
223 |
chatbot.append((f"{fp}完成了吗?", res))
|
224 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
225 |
|
@@ -261,49 +264,3 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|
261 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
262 |
|
263 |
|
264 |
-
class construct_html():
|
265 |
-
def __init__(self) -> None:
|
266 |
-
self.css = """
|
267 |
-
.row {
|
268 |
-
display: flex;
|
269 |
-
flex-wrap: wrap;
|
270 |
-
}
|
271 |
-
|
272 |
-
.column {
|
273 |
-
flex: 1;
|
274 |
-
padding: 10px;
|
275 |
-
}
|
276 |
-
|
277 |
-
.table-header {
|
278 |
-
font-weight: bold;
|
279 |
-
border-bottom: 1px solid black;
|
280 |
-
}
|
281 |
-
|
282 |
-
.table-row {
|
283 |
-
border-bottom: 1px solid lightgray;
|
284 |
-
}
|
285 |
-
|
286 |
-
.table-cell {
|
287 |
-
padding: 5px;
|
288 |
-
}
|
289 |
-
"""
|
290 |
-
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
|
291 |
-
|
292 |
-
|
293 |
-
def add_row(self, a, b):
|
294 |
-
tmp = """
|
295 |
-
<div class="row table-row">
|
296 |
-
<div class="column table-cell">REPLACE_A</div>
|
297 |
-
<div class="column table-cell">REPLACE_B</div>
|
298 |
-
</div>
|
299 |
-
"""
|
300 |
-
from toolbox import markdown_convertion
|
301 |
-
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
|
302 |
-
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
|
303 |
-
self.html_string += tmp
|
304 |
-
|
305 |
-
|
306 |
-
def save_file(self, file_name):
|
307 |
-
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
308 |
-
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
309 |
-
return os.path.join(get_log_folder(), file_name)
|
|
|
1 |
+
from toolbox import CatchException, report_execption, get_log_folder
|
2 |
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
6 |
from .crazy_utils import read_and_clean_pdf_text
|
|
|
63 |
generated_conclusion_files = []
|
64 |
generated_html_files = []
|
65 |
DST_LANG = "中文"
|
66 |
+
from crazy_functions.crazy_utils import construct_html
|
67 |
for index, fp in enumerate(file_manifest):
|
68 |
chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
69 |
article_dict = parse_pdf(fp, grobid_url)
|
|
|
167 |
TOKEN_LIMIT_PER_FRAGMENT = 1280
|
168 |
generated_conclusion_files = []
|
169 |
generated_html_files = []
|
170 |
+
from crazy_functions.crazy_utils import construct_html
|
171 |
for index, fp in enumerate(file_manifest):
|
172 |
# 读取PDF文件
|
173 |
file_content, page_one = read_and_clean_pdf_text(fp)
|
|
|
218 |
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
219 |
final.extend(gpt_response_collection_md)
|
220 |
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
221 |
+
res = write_history_to_file(final, create_report_file_name)
|
222 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
223 |
|
224 |
# 更新UI
|
225 |
+
generated_conclusion_files.append(f'{get_log_folder()}/{create_report_file_name}')
|
226 |
chatbot.append((f"{fp}完成了吗?", res))
|
227 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
228 |
|
|
|
264 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
265 |
|
266 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
crazy_functions/生成函数注释.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption
|
|
|
3 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
fast_debug = False
|
5 |
|
@@ -27,7 +28,8 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
27 |
if not fast_debug: time.sleep(2)
|
28 |
|
29 |
if not fast_debug:
|
30 |
-
res =
|
|
|
31 |
chatbot.append(("完成了吗?", res))
|
32 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
33 |
|
|
|
1 |
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
5 |
fast_debug = False
|
6 |
|
|
|
28 |
if not fast_debug: time.sleep(2)
|
29 |
|
30 |
if not fast_debug:
|
31 |
+
res = write_history_to_file(history)
|
32 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
33 |
chatbot.append(("完成了吗?", res))
|
34 |
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
35 |
|
crazy_functions/虚空终端.py
CHANGED
@@ -25,11 +25,12 @@ explain_msg = """
|
|
25 |
|
26 |
1. 请用**自然语言**描述您需要做什么。例如:
|
27 |
- 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了」
|
28 |
-
- 「请调用插件翻译PDF论文,地址为https://
|
29 |
- 「把Arxiv论文翻译成中文PDF,arxiv论文的ID是1812.10695,记得用插件!」
|
30 |
- 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现」
|
31 |
- 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」
|
32 |
- 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"」
|
|
|
33 |
- 「请问Transformer网络的结构是怎样的?」
|
34 |
|
35 |
2. 您可以打开插件下拉菜单以了解本项目的各种能力。
|
@@ -45,7 +46,7 @@ explain_msg = """
|
|
45 |
|
46 |
from pydantic import BaseModel, Field
|
47 |
from typing import List
|
48 |
-
from toolbox import CatchException, update_ui,
|
49 |
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
50 |
from request_llm.bridge_all import predict_no_ui_long_connection
|
51 |
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
@@ -111,7 +112,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
111 |
|
112 |
# 用简单的关键词检测用户意图
|
113 |
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
114 |
-
if
|
115 |
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
|
116 |
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
117 |
|
|
|
25 |
|
26 |
1. 请用**自然语言**描述您需要做什么。例如:
|
27 |
- 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了」
|
28 |
+
- 「请调用插件翻译PDF论文,地址为https://openreview.net/pdf?id=rJl0r3R9KX」
|
29 |
- 「把Arxiv论文翻译成中文PDF,arxiv论文的ID是1812.10695,记得用插件!」
|
30 |
- 「生成一张图片,图中鲜花怒放,绿草如茵,用插件实现」
|
31 |
- 「用插件翻译README,Github网址是https://github.com/facebookresearch/co-tracker」
|
32 |
- 「我不喜欢当前的界面颜色,修改配置,把主题THEME更换为THEME="High-Contrast"」
|
33 |
+
- 「请调用插件,解析python源代码项目,代码我刚刚打包拖到上传区了」
|
34 |
- 「请问Transformer网络的结构是怎样的?」
|
35 |
|
36 |
2. 您可以打开插件下拉菜单以了解本项目的各种能力。
|
|
|
46 |
|
47 |
from pydantic import BaseModel, Field
|
48 |
from typing import List
|
49 |
+
from toolbox import CatchException, update_ui, is_the_upload_folder
|
50 |
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
51 |
from request_llm.bridge_all import predict_no_ui_long_connection
|
52 |
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
|
112 |
|
113 |
# 用简单的关键词检测用户意图
|
114 |
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
115 |
+
if is_the_upload_folder(txt):
|
116 |
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
|
117 |
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
118 |
|
crazy_functions/解析JupyterNotebook.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption
|
|
|
3 |
fast_debug = True
|
4 |
|
5 |
|
@@ -110,7 +111,8 @@ def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|
110 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
111 |
|
112 |
# <-------- 写入文件,退出 ---------->
|
113 |
-
res =
|
|
|
114 |
chatbot.append(("完成了吗?", res))
|
115 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
116 |
|
|
|
1 |
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
4 |
fast_debug = True
|
5 |
|
6 |
|
|
|
111 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
112 |
|
113 |
# <-------- 写入文件,退出 ---------->
|
114 |
+
res = write_history_to_file(history)
|
115 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
116 |
chatbot.append(("完成了吗?", res))
|
117 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
118 |
|
crazy_functions/解析项目源代码.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
-
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption,
|
3 |
from .crazy_utils import input_clipping
|
4 |
|
5 |
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
6 |
import os, copy
|
7 |
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
8 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
9 |
-
|
|
|
10 |
summary_batch_isolation = True
|
11 |
inputs_array = []
|
12 |
inputs_show_user_array = []
|
@@ -22,7 +23,7 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
22 |
file_content = f.read()
|
23 |
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
|
24 |
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
|
25 |
-
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {
|
26 |
# 装载请求内容
|
27 |
inputs_array.append(i_say)
|
28 |
inputs_show_user_array.append(i_say_show_user)
|
@@ -43,7 +44,8 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
43 |
# 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析
|
44 |
report_part_1 = copy.deepcopy(gpt_response_collection)
|
45 |
history_to_return = report_part_1
|
46 |
-
res =
|
|
|
47 |
chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
|
48 |
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
49 |
|
@@ -97,7 +99,8 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
97 |
|
98 |
############################## <END> ##################################
|
99 |
history_to_return.extend(report_part_2)
|
100 |
-
res =
|
|
|
101 |
chatbot.append(("完成了吗?", res))
|
102 |
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
103 |
|
@@ -106,9 +109,8 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
106 |
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
107 |
history = [] # 清空历史,以免输入溢出
|
108 |
import glob
|
109 |
-
file_manifest = [f for f in glob.glob('./*.py')
|
110 |
-
[f for f in glob.glob('
|
111 |
-
[f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
112 |
project_folder = './'
|
113 |
if len(file_manifest) == 0:
|
114 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
|
|
1 |
+
from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
|
2 |
+
from toolbox import CatchException, report_execption, write_history_to_file
|
3 |
from .crazy_utils import input_clipping
|
4 |
|
5 |
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
6 |
import os, copy
|
7 |
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
8 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
9 |
+
disable_auto_promotion(chatbot=chatbot)
|
10 |
+
|
11 |
summary_batch_isolation = True
|
12 |
inputs_array = []
|
13 |
inputs_show_user_array = []
|
|
|
23 |
file_content = f.read()
|
24 |
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
|
25 |
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
|
26 |
+
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {fp}'
|
27 |
# 装载请求内容
|
28 |
inputs_array.append(i_say)
|
29 |
inputs_show_user_array.append(i_say_show_user)
|
|
|
44 |
# 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析
|
45 |
report_part_1 = copy.deepcopy(gpt_response_collection)
|
46 |
history_to_return = report_part_1
|
47 |
+
res = write_history_to_file(report_part_1)
|
48 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
49 |
chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
|
50 |
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
51 |
|
|
|
99 |
|
100 |
############################## <END> ##################################
|
101 |
history_to_return.extend(report_part_2)
|
102 |
+
res = write_history_to_file(history_to_return)
|
103 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
104 |
chatbot.append(("完成了吗?", res))
|
105 |
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
106 |
|
|
|
109 |
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
110 |
history = [] # 清空历史,以免输入溢出
|
111 |
import glob
|
112 |
+
file_manifest = [f for f in glob.glob('./*.py')] + \
|
113 |
+
[f for f in glob.glob('./*/*.py')]
|
|
|
114 |
project_folder = './'
|
115 |
if len(file_manifest) == 0:
|
116 |
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
crazy_functions/读文章写摘要.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
from toolbox import update_ui
|
2 |
-
from toolbox import CatchException, report_execption
|
|
|
3 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
4 |
-
fast_debug = False
|
5 |
|
6 |
|
7 |
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
@@ -17,32 +17,29 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|
17 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
18 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
history.append(i_say_show_user); history.append(gpt_say)
|
27 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
28 |
-
if not fast_debug: time.sleep(2)
|
29 |
|
30 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
31 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
32 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
33 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
46 |
|
47 |
|
48 |
|
|
|
1 |
from toolbox import update_ui
|
2 |
+
from toolbox import CatchException, report_execption
|
3 |
+
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
4 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
|
5 |
|
6 |
|
7 |
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
|
|
17 |
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
18 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
19 |
|
20 |
+
msg = '正常'
|
21 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
22 |
+
chatbot[-1] = (i_say_show_user, gpt_say)
|
23 |
+
history.append(i_say_show_user); history.append(gpt_say)
|
24 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
25 |
+
time.sleep(2)
|
|
|
|
|
|
|
26 |
|
27 |
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
28 |
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
29 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
30 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
31 |
|
32 |
+
msg = '正常'
|
33 |
+
# ** gpt request **
|
34 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
|
|
35 |
|
36 |
+
chatbot[-1] = (i_say, gpt_say)
|
37 |
+
history.append(i_say); history.append(gpt_say)
|
38 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
39 |
+
res = write_history_to_file(history)
|
40 |
+
promote_file_to_downloadzone(res, chatbot=chatbot)
|
41 |
+
chatbot.append(("完成了吗?", res))
|
42 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
43 |
|
44 |
|
45 |
|
crazy_functions/辅助功能.py
CHANGED
@@ -2,8 +2,8 @@
|
|
2 |
# @Time : 2023/4/19
|
3 |
# @Author : Spike
|
4 |
# @Descr :
|
5 |
-
from toolbox import update_ui
|
6 |
-
from toolbox import CatchException
|
7 |
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
8 |
|
9 |
|
@@ -30,14 +30,13 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
30 |
|
31 |
@CatchException
|
32 |
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
33 |
-
chatbot.append(['清除本地缓存数据', '执行中.
|
34 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
35 |
|
36 |
import shutil, os
|
37 |
-
|
38 |
-
|
39 |
-
shutil.rmtree(
|
40 |
-
shutil.rmtree(private_upload_dir, ignore_errors=True)
|
41 |
|
42 |
chatbot.append(['清除本地缓存数据', '执行完成'])
|
43 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
2 |
# @Time : 2023/4/19
|
3 |
# @Author : Spike
|
4 |
# @Descr :
|
5 |
+
from toolbox import update_ui, get_conf
|
6 |
+
from toolbox import CatchException
|
7 |
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
8 |
|
9 |
|
|
|
30 |
|
31 |
@CatchException
|
32 |
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
33 |
+
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
34 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
35 |
|
36 |
import shutil, os
|
37 |
+
PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
|
38 |
+
shutil.rmtree(PATH_LOGGING, ignore_errors=True)
|
39 |
+
shutil.rmtree(PATH_PRIVATE_UPLOAD, ignore_errors=True)
|
|
|
40 |
|
41 |
chatbot.append(['清除本地缓存数据', '执行完成'])
|
42 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
docs/GithubAction+AllCapacity
CHANGED
@@ -12,7 +12,7 @@ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/
|
|
12 |
RUN python3 -m pip install openai numpy arxiv rich
|
13 |
RUN python3 -m pip install colorama Markdown pygments pymupdf
|
14 |
RUN python3 -m pip install python-docx moviepy pdfminer
|
15 |
-
RUN python3 -m pip install zh_langchain==0.2.1
|
16 |
RUN python3 -m pip install nougat-ocr
|
17 |
RUN python3 -m pip install rarfile py7zr
|
18 |
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
|
|
12 |
RUN python3 -m pip install openai numpy arxiv rich
|
13 |
RUN python3 -m pip install colorama Markdown pygments pymupdf
|
14 |
RUN python3 -m pip install python-docx moviepy pdfminer
|
15 |
+
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
16 |
RUN python3 -m pip install nougat-ocr
|
17 |
RUN python3 -m pip install rarfile py7zr
|
18 |
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
docs/translate_english.json
CHANGED
@@ -299,7 +299,6 @@
|
|
299 |
"地址🚀": "Address 🚀",
|
300 |
"感谢热情的": "Thanks to the enthusiastic",
|
301 |
"开发者们❤️": "Developers ❤️",
|
302 |
-
"所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "All inquiry records will be automatically saved in the local directory ./gpt_log/chat_secrets.log",
|
303 |
"请注意自我隐私保护哦!": "Please pay attention to self-privacy protection!",
|
304 |
"当前模型": "Current model",
|
305 |
"输入区": "Input area",
|
@@ -892,7 +891,6 @@
|
|
892 |
"保存当前对话": "Save current conversation",
|
893 |
"您可以调用“LoadConversationHistoryArchive”还原当下的对话": "You can call 'LoadConversationHistoryArchive' to restore the current conversation",
|
894 |
"警告!被保存的对话历史可以被使用该系统的任何人查阅": "Warning! The saved conversation history can be viewed by anyone using this system",
|
895 |
-
"gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT conversation history *.html",
|
896 |
"正在查找对话历史文件": "Looking for conversation history file",
|
897 |
"html格式": "HTML format",
|
898 |
"找不到任何html文件": "No HTML files found",
|
@@ -908,7 +906,6 @@
|
|
908 |
"pip install pywin32 用于doc格式": "pip install pywin32 for doc format",
|
909 |
"仅支持Win平台": "Only supports Win platform",
|
910 |
"打开文件": "Open file",
|
911 |
-
"private_upload里面的文件名在解压zip后容易出现乱码": "The file name in private_upload is prone to garbled characters after unzipping",
|
912 |
"rar和7z格式正常": "RAR and 7z formats are normal",
|
913 |
"故可以只分析文章内容": "So you can only analyze the content of the article",
|
914 |
"不输入文件名": "Do not enter the file name",
|
@@ -1364,7 +1361,6 @@
|
|
1364 |
"注意文章中的每一句话都要翻译": "Please translate every sentence in the article",
|
1365 |
"一、论文概况": "I. Overview of the paper",
|
1366 |
"二、论文翻译": "II. Translation of the paper",
|
1367 |
-
"/gpt_log/总结论文-": "/gpt_log/Summary of the paper-",
|
1368 |
"给出输出文件清单": "Provide a list of output files",
|
1369 |
"第 0 步": "Step 0",
|
1370 |
"切割PDF": "Split PDF",
|
@@ -1564,7 +1560,6 @@
|
|
1564 |
"广义速度": "Generalized velocity",
|
1565 |
"粒子的固有": "Intrinsic of particle",
|
1566 |
"一个包含所有切割音频片段文件路径的列表": "A list containing the file paths of all segmented audio clips",
|
1567 |
-
"/gpt_log/翻译-": "Translation log-",
|
1568 |
"计算文件总时长和切割点": "Calculate total duration and cutting points of the file",
|
1569 |
"总结音频": "Summarize audio",
|
1570 |
"作者": "Author",
|
@@ -2339,7 +2334,6 @@
|
|
2339 |
"将文件拖动到文件上传区": "Drag and drop the file to the file upload area",
|
2340 |
"如果意图模糊": "If the intent is ambiguous",
|
2341 |
"星火认知大模型": "Spark Cognitive Big Model",
|
2342 |
-
"执行中. 删除 gpt_log & private_upload": "Executing. Delete gpt_log & private_upload",
|
2343 |
"默认 Color = secondary": "Default Color = secondary",
|
2344 |
"此处也不需要修改": "No modification is needed here",
|
2345 |
"⭐ ⭐ ⭐ 分析用户意图": "⭐ ⭐ ⭐ Analyze user intent",
|
@@ -2448,5 +2442,76 @@
|
|
2448 |
"插件说明": "Plugin description",
|
2449 |
"├── CODE_HIGHLIGHT 代码高亮": "├── CODE_HIGHLIGHT Code highlighting",
|
2450 |
"记得用插件": "Remember to use the plugin",
|
2451 |
-
"谨慎操作": "Handle with caution"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2452 |
}
|
|
|
299 |
"地址🚀": "Address 🚀",
|
300 |
"感谢热情的": "Thanks to the enthusiastic",
|
301 |
"开发者们❤️": "Developers ❤️",
|
|
|
302 |
"请注意自我隐私保护哦!": "Please pay attention to self-privacy protection!",
|
303 |
"当前模型": "Current model",
|
304 |
"输入区": "Input area",
|
|
|
891 |
"保存当前对话": "Save current conversation",
|
892 |
"您可以调用“LoadConversationHistoryArchive”还原当下的对话": "You can call 'LoadConversationHistoryArchive' to restore the current conversation",
|
893 |
"警告!被保存的对话历史可以被使用该系统的任何人查阅": "Warning! The saved conversation history can be viewed by anyone using this system",
|
|
|
894 |
"正在查找对话历史文件": "Looking for conversation history file",
|
895 |
"html格式": "HTML format",
|
896 |
"找不到任何html文件": "No HTML files found",
|
|
|
906 |
"pip install pywin32 用于doc格式": "pip install pywin32 for doc format",
|
907 |
"仅支持Win平台": "Only supports Win platform",
|
908 |
"打开文件": "Open file",
|
|
|
909 |
"rar和7z格式正常": "RAR and 7z formats are normal",
|
910 |
"故可以只分析文章内容": "So you can only analyze the content of the article",
|
911 |
"不输入文件名": "Do not enter the file name",
|
|
|
1361 |
"注意文章中的每一句话都要翻译": "Please translate every sentence in the article",
|
1362 |
"一、论文概况": "I. Overview of the paper",
|
1363 |
"二、论文翻译": "II. Translation of the paper",
|
|
|
1364 |
"给出输出文件清单": "Provide a list of output files",
|
1365 |
"第 0 步": "Step 0",
|
1366 |
"切割PDF": "Split PDF",
|
|
|
1560 |
"广义速度": "Generalized velocity",
|
1561 |
"粒子的固有": "Intrinsic of particle",
|
1562 |
"一个包含所有切割音频片段文件路径的列表": "A list containing the file paths of all segmented audio clips",
|
|
|
1563 |
"计算文件总时长和切割点": "Calculate total duration and cutting points of the file",
|
1564 |
"总结音频": "Summarize audio",
|
1565 |
"作者": "Author",
|
|
|
2334 |
"将文件拖动到文件上传区": "Drag and drop the file to the file upload area",
|
2335 |
"如果意图模糊": "If the intent is ambiguous",
|
2336 |
"星火认知大模型": "Spark Cognitive Big Model",
|
|
|
2337 |
"默认 Color = secondary": "Default Color = secondary",
|
2338 |
"此处也不需要修改": "No modification is needed here",
|
2339 |
"⭐ ⭐ ⭐ 分析用户意图": "⭐ ⭐ ⭐ Analyze user intent",
|
|
|
2442 |
"插件说明": "Plugin description",
|
2443 |
"├── CODE_HIGHLIGHT 代码高亮": "├── CODE_HIGHLIGHT Code highlighting",
|
2444 |
"记得用插件": "Remember to use the plugin",
|
2445 |
+
"谨慎操作": "Handle with caution",
|
2446 |
+
"private_upload里面的文件名在解压zip后容易出现乱码": "The file name inside private_upload is prone to garbled characters after unzipping",
|
2447 |
+
"直接返回报错": "Direct return error",
|
2448 |
+
"临时的上传文件夹位置": "Temporary upload folder location",
|
2449 |
+
"使用latex格式 测试3 写出麦克斯韦方程组": "Write Maxwell's equations using latex format for test 3",
|
2450 |
+
"这是一张图片": "This is an image",
|
2451 |
+
"没有发现任何近期上传的文件": "No recent uploaded files found",
|
2452 |
+
"如url未成功匹配返回None": "Return None if the URL does not match successfully",
|
2453 |
+
"如果有Latex环境": "If there is a Latex environment",
|
2454 |
+
"第一次运行时": "When running for the first time",
|
2455 |
+
"创建工作路径": "Create a working directory",
|
2456 |
+
"向": "To",
|
2457 |
+
"执行中. 删除数据": "Executing. Deleting data",
|
2458 |
+
"CodeInterpreter开源版": "CodeInterpreter open source version",
|
2459 |
+
"建议选择更稳定的接口": "It is recommended to choose a more stable interface",
|
2460 |
+
"现在您点击任意函数插件时": "Now when you click on any function plugin",
|
2461 |
+
"请使用“LatexEnglishCorrection+高亮”插件": "Please use the 'LatexEnglishCorrection+Highlight' plugin",
|
2462 |
+
"安装完成": "Installation completed",
|
2463 |
+
"记得用插件!」": "Remember to use the plugin!",
|
2464 |
+
"结论": "Conclusion",
|
2465 |
+
"无法下载资源": "Unable to download resources",
|
2466 |
+
"首先排除一个one-api没有done数据包的第三方Bug情形": "First exclude a third-party bug where one-api does not have a done data package",
|
2467 |
+
"知识库中添加文件": "Add files to the knowledge base",
|
2468 |
+
"处理重名的章节": "Handling duplicate chapter names",
|
2469 |
+
"先上传文件素材": "Upload file materials first",
|
2470 |
+
"无法从google获取信息!": "Unable to retrieve information from Google!",
|
2471 |
+
"展示如下": "Display as follows",
|
2472 |
+
"「把Arxiv论文翻译成中文PDF": "Translate Arxiv papers into Chinese PDF",
|
2473 |
+
"论文我刚刚放到上传区了」": "I just put the paper in the upload area",
|
2474 |
+
"正在下载Gradio主题": "Downloading Gradio themes",
|
2475 |
+
"再运行此插件": "Run this plugin again",
|
2476 |
+
"记录近期文件": "Record recent files",
|
2477 |
+
"粗心检查": "Careful check",
|
2478 |
+
"更多主题": "More themes",
|
2479 |
+
"//huggingface.co/spaces/gradio/theme-gallery 可选": "//huggingface.co/spaces/gradio/theme-gallery optional",
|
2480 |
+
"由 test_on_result_chg": "By test_on_result_chg",
|
2481 |
+
"所有问询记录将自动保存在本地目录./": "All inquiry records will be automatically saved in the local directory ./",
|
2482 |
+
"正在解析论文": "Analyzing the paper",
|
2483 |
+
"逐个文件转移到目标路径": "Move each file to the target path",
|
2484 |
+
"最多重试5次": "Retry up to 5 times",
|
2485 |
+
"日志文件夹的位置": "Location of the log folder",
|
2486 |
+
"我们暂时无法解析此PDF文档": "We are temporarily unable to parse this PDF document",
|
2487 |
+
"文件检索": "File retrieval",
|
2488 |
+
"/**/chatGPT对话历史*.html": "/**/chatGPT conversation history*.html",
|
2489 |
+
"非OpenAI官方接口返回了错误": "Non-OpenAI official interface returned an error",
|
2490 |
+
"如果在Arxiv上匹配失败": "If the match fails on Arxiv",
|
2491 |
+
"文件进入知识库后可长期保存": "Files can be saved for a long time after entering the knowledge base",
|
2492 |
+
"您可以再次重试": "You can try again",
|
2493 |
+
"整理文件集合": "Organize file collection",
|
2494 |
+
"检测到有缺陷的非OpenAI官方接口": "Detected defective non-OpenAI official interface",
|
2495 |
+
"此插件不调用Latex": "This plugin does not call Latex",
|
2496 |
+
"移除过时的旧文件从而节省空间&保护隐私": "Remove outdated old files to save space & protect privacy",
|
2497 |
+
"代码我刚刚打包拖到上传区了」": "I just packed the code and dragged it to the upload area",
|
2498 |
+
"将图像转为灰度图像": "Convert the image to grayscale",
|
2499 |
+
"待排除": "To be excluded",
|
2500 |
+
"请勿修改": "Please do not modify",
|
2501 |
+
"crazy_functions/代码重写为全英文_多线程.py": "crazy_functions/code rewritten to all English_multi-threading.py",
|
2502 |
+
"开发中": "Under development",
|
2503 |
+
"请查阅Gradio主题商店": "Please refer to the Gradio theme store",
|
2504 |
+
"输出消息": "Output message",
|
2505 |
+
"其他情况": "Other situations",
|
2506 |
+
"获取文献失败": "Failed to retrieve literature",
|
2507 |
+
"可以通过再次调用本插件的方式": "You can use this plugin again by calling it",
|
2508 |
+
"保留下半部分": "Keep the lower half",
|
2509 |
+
"排除问题": "Exclude the problem",
|
2510 |
+
"知识库": "Knowledge base",
|
2511 |
+
"ParsePDF失败": "ParsePDF failed",
|
2512 |
+
"向知识库追加更多文档": "Append more documents to the knowledge base",
|
2513 |
+
"此处待注入的知识库名称id": "The knowledge base name ID to be injected here",
|
2514 |
+
"您需要构建知识库后再运行此插件": "You need to build the knowledge base before running this plugin",
|
2515 |
+
"判定是否为公式 | 测试1 写出洛伦兹定律": "Determine whether it is a formula | Test 1 write out the Lorentz law",
|
2516 |
+
"构建知识库后": "After building the knowledge base"
|
2517 |
}
|
docs/translate_japanese.json
CHANGED
@@ -301,7 +301,6 @@
|
|
301 |
"缺少的依赖": "不足している依存関係",
|
302 |
"紫色": "紫色",
|
303 |
"唤起高级参数输入区": "高度なパラメータ入力エリアを呼び出す",
|
304 |
-
"所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "すべての問い合わせ記録は自動的にローカルディレクトリ./gpt_log/chat_secrets.logに保存されます",
|
305 |
"则换行符更有可能表示段落分隔": "したがって、改行記号は段落の区切りを表す可能性がより高いです",
|
306 |
";4、引用数量": ";4、引用数量",
|
307 |
"中转网址预览": "中継ウェブサイトのプレビュー",
|
@@ -448,7 +447,6 @@
|
|
448 |
"表示函数是否成功执行": "関数が正常に実行されたかどうかを示す",
|
449 |
"一般原样传递下去就行": "通常はそのまま渡すだけでよい",
|
450 |
"琥珀色": "琥珀色",
|
451 |
-
"gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT対話履歴*.html",
|
452 |
"jittorllms 没有 sys_prompt 接口": "jittorllmsにはsys_promptインターフェースがありません",
|
453 |
"清除": "クリア",
|
454 |
"小于正文的": "本文より小さい",
|
@@ -1234,7 +1232,6 @@
|
|
1234 |
"找不到任何前端相关文件": "No frontend-related files can be found",
|
1235 |
"Not enough point. API2D账户点数不足": "Not enough points. API2D account points are insufficient",
|
1236 |
"当前版本": "Current version",
|
1237 |
-
"/gpt_log/总结论文-": "/gpt_log/Summary paper-",
|
1238 |
"1. 临时解决方案": "1. Temporary solution",
|
1239 |
"第8步": "Step 8",
|
1240 |
"历史": "History",
|
|
|
301 |
"缺少的依赖": "不足している依存関係",
|
302 |
"紫色": "紫色",
|
303 |
"唤起高级参数输入区": "高度なパラメータ入力エリアを呼び出す",
|
|
|
304 |
"则换行符更有可能表示段落分隔": "したがって、改行記号は段落の区切りを表す可能性がより高いです",
|
305 |
";4、引用数量": ";4、引用数量",
|
306 |
"中转网址预览": "中継ウェブサイトのプレビュー",
|
|
|
447 |
"表示函数是否成功执行": "関数が正常に実行されたかどうかを示す",
|
448 |
"一般原样传递下去就行": "通常はそのまま渡すだけでよい",
|
449 |
"琥珀色": "琥珀色",
|
|
|
450 |
"jittorllms 没有 sys_prompt 接口": "jittorllmsにはsys_promptインターフェースがありません",
|
451 |
"清除": "クリア",
|
452 |
"小于正文的": "本文より小さい",
|
|
|
1232 |
"找不到任何前端相关文件": "No frontend-related files can be found",
|
1233 |
"Not enough point. API2D账户点数不足": "Not enough points. API2D account points are insufficient",
|
1234 |
"当前版本": "Current version",
|
|
|
1235 |
"1. 临时解决方案": "1. Temporary solution",
|
1236 |
"第8步": "Step 8",
|
1237 |
"历史": "History",
|
docs/translate_std.json
CHANGED
@@ -88,5 +88,7 @@
|
|
88 |
"辅助功能": "Accessibility",
|
89 |
"虚空终端": "VoidTerminal",
|
90 |
"解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID",
|
91 |
-
"虚空终端主路由": "VoidTerminalMainRoute"
|
|
|
|
|
92 |
}
|
|
|
88 |
"辅助功能": "Accessibility",
|
89 |
"虚空终端": "VoidTerminal",
|
90 |
"解析PDF_基于GROBID": "ParsePDF_BasedOnGROBID",
|
91 |
+
"虚空终端主路由": "VoidTerminalMainRoute",
|
92 |
+
"批量翻译PDF文档_NOUGAT": "BatchTranslatePDFDocuments_NOUGAT",
|
93 |
+
"解析PDF_基于NOUGAT": "ParsePDF_NOUGAT"
|
94 |
}
|
docs/translate_traditionalchinese.json
CHANGED
@@ -314,7 +314,6 @@
|
|
314 |
"请用markdown格式输出": "請用 Markdown 格式輸出",
|
315 |
"模仿ChatPDF": "模仿 ChatPDF",
|
316 |
"等待多久判定为超时": "等待多久判定為超時",
|
317 |
-
"/gpt_log/总结论文-": "/gpt_log/總結論文-",
|
318 |
"请结合互联网信息回答以下问题": "請結合互聯網信息回答以下問題",
|
319 |
"IP查询频率受限": "IP查詢頻率受限",
|
320 |
"高级参数输入区的显示提示": "高級參數輸入區的顯示提示",
|
@@ -511,7 +510,6 @@
|
|
511 |
"將生成的報告自動投射到文件上傳區": "將生成的報告自動上傳到文件區",
|
512 |
"函數插件作者": "函數插件作者",
|
513 |
"將要匹配的模式": "將要匹配的模式",
|
514 |
-
"所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "所有詢問記錄將自動保存在本地目錄./gpt_log/chat_secrets.log",
|
515 |
"正在分析一个项目的源代码": "正在分析一個專案的源代碼",
|
516 |
"使每个段落之间有两个换行符分隔": "使每個段落之間有兩個換行符分隔",
|
517 |
"并在被装饰的函数上执行": "並在被裝飾的函數上執行",
|
@@ -1059,7 +1057,6 @@
|
|
1059 |
"重试中": "重試中",
|
1060 |
"月": "月份",
|
1061 |
"localhost意思是代理软件安装在本机上": "localhost意思是代理軟體安裝在本機上",
|
1062 |
-
"gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT對話歷史*.html",
|
1063 |
"的长度必须小于 2500 个 Token": "長度必須小於 2500 個 Token",
|
1064 |
"抽取可用的api-key": "提取可用的api-key",
|
1065 |
"增强报告的可读性": "增強報告的可讀性",
|
|
|
314 |
"请用markdown格式输出": "請用 Markdown 格式輸出",
|
315 |
"模仿ChatPDF": "模仿 ChatPDF",
|
316 |
"等待多久判定为超时": "等待多久判定為超時",
|
|
|
317 |
"请结合互联网信息回答以下问题": "請結合互聯網信息回答以下問題",
|
318 |
"IP查询频率受限": "IP查詢頻率受限",
|
319 |
"高级参数输入区的显示提示": "高級參數輸入區的顯示提示",
|
|
|
510 |
"將生成的報告自動投射到文件上傳區": "將生成的報告自動上傳到文件區",
|
511 |
"函數插件作者": "函數插件作者",
|
512 |
"將要匹配的模式": "將要匹配的模式",
|
|
|
513 |
"正在分析一个项目的源代码": "正在分析一個專案的源代碼",
|
514 |
"使每个段落之间有两个换行符分隔": "使每個段落之間有兩個換行符分隔",
|
515 |
"并在被装饰的函数上执行": "並在被裝飾的函數上執行",
|
|
|
1057 |
"重试中": "重試中",
|
1058 |
"月": "月份",
|
1059 |
"localhost意思是代理软件安装在本机上": "localhost意思是代理軟體安裝在本機上",
|
|
|
1060 |
"的长度必须小于 2500 个 Token": "長度必須小於 2500 個 Token",
|
1061 |
"抽取可用的api-key": "提取可用的api-key",
|
1062 |
"增强报告的可读性": "增強報告的可讀性",
|
multi_language.py
CHANGED
@@ -33,9 +33,11 @@ import functools
|
|
33 |
import re
|
34 |
import pickle
|
35 |
import time
|
|
|
36 |
|
37 |
-
CACHE_FOLDER =
|
38 |
-
|
|
|
39 |
|
40 |
# LANG = "TraditionalChinese"
|
41 |
# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
|
|
|
33 |
import re
|
34 |
import pickle
|
35 |
import time
|
36 |
+
from toolbox import get_conf
|
37 |
|
38 |
+
CACHE_FOLDER, = get_conf('PATH_LOGGING')
|
39 |
+
|
40 |
+
blacklist = ['multi-language', CACHE_FOLDER, '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
|
41 |
|
42 |
# LANG = "TraditionalChinese"
|
43 |
# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
|
request_llm/bridge_chatgpt.py
CHANGED
@@ -21,7 +21,7 @@ import importlib
|
|
21 |
|
22 |
# config_private.py放自己的秘密如API和代理网址
|
23 |
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
24 |
-
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
|
25 |
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
|
26 |
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
|
27 |
|
@@ -72,6 +72,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|
72 |
|
73 |
stream_response = response.iter_lines()
|
74 |
result = ''
|
|
|
75 |
while True:
|
76 |
try: chunk = next(stream_response).decode()
|
77 |
except StopIteration:
|
@@ -90,20 +91,21 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|
90 |
delta = json_data["delta"]
|
91 |
if len(delta) == 0: break
|
92 |
if "role" in delta: continue
|
93 |
-
if "content" in delta:
|
94 |
result += delta["content"]
|
95 |
if not console_slience: print(delta["content"], end='')
|
96 |
if observe_window is not None:
|
97 |
# 观测窗,把已经获取的数据显示出去
|
98 |
-
if len(observe_window) >= 1:
|
|
|
99 |
# 看门狗,如果超过期限没有喂狗,则终止
|
100 |
-
if len(observe_window) >= 2:
|
101 |
if (time.time()-observe_window[1]) > watch_dog_patience:
|
102 |
raise RuntimeError("用户取消了程序。")
|
103 |
else: raise RuntimeError("意外Json结构:"+delta)
|
104 |
-
if json_data['finish_reason'] == 'content_filter':
|
105 |
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
|
106 |
-
if json_data['finish_reason'] == 'length':
|
107 |
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
108 |
return result
|
109 |
|
@@ -128,6 +130,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
128 |
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
129 |
return
|
130 |
|
|
|
131 |
if additional_fn is not None:
|
132 |
from core_functional import handle_core_functionality
|
133 |
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
@@ -138,8 +141,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
138 |
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
139 |
|
140 |
# check mis-behavior
|
141 |
-
if
|
142 |
-
chatbot[-1] = (inputs, f"[Local Message]
|
143 |
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
144 |
time.sleep(2)
|
145 |
|
@@ -179,8 +182,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
179 |
# 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里
|
180 |
chunk_decoded = chunk.decode()
|
181 |
error_msg = chunk_decoded
|
|
|
|
|
|
|
|
|
|
|
182 |
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
183 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="非
|
184 |
return
|
185 |
|
186 |
chunk_decoded = chunk.decode()
|
@@ -199,7 +207,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
199 |
chunkjson = json.loads(chunk_decoded[6:])
|
200 |
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
201 |
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
202 |
-
gpt_replying_buffer = gpt_replying_buffer +
|
203 |
history[-1] = gpt_replying_buffer
|
204 |
chatbot[-1] = (history[-2], history[-1])
|
205 |
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
|
|
21 |
|
22 |
# config_private.py放自己的秘密如API和代理网址
|
23 |
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
24 |
+
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder
|
25 |
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
|
26 |
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
|
27 |
|
|
|
72 |
|
73 |
stream_response = response.iter_lines()
|
74 |
result = ''
|
75 |
+
json_data = None
|
76 |
while True:
|
77 |
try: chunk = next(stream_response).decode()
|
78 |
except StopIteration:
|
|
|
91 |
delta = json_data["delta"]
|
92 |
if len(delta) == 0: break
|
93 |
if "role" in delta: continue
|
94 |
+
if "content" in delta:
|
95 |
result += delta["content"]
|
96 |
if not console_slience: print(delta["content"], end='')
|
97 |
if observe_window is not None:
|
98 |
# 观测窗,把已经获取的数据显示出去
|
99 |
+
if len(observe_window) >= 1:
|
100 |
+
observe_window[0] += delta["content"]
|
101 |
# 看门狗,如果超过期限没有喂狗,则终止
|
102 |
+
if len(observe_window) >= 2:
|
103 |
if (time.time()-observe_window[1]) > watch_dog_patience:
|
104 |
raise RuntimeError("用户取消了程序。")
|
105 |
else: raise RuntimeError("意外Json结构:"+delta)
|
106 |
+
if json_data and json_data['finish_reason'] == 'content_filter':
|
107 |
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
|
108 |
+
if json_data and json_data['finish_reason'] == 'length':
|
109 |
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
110 |
return result
|
111 |
|
|
|
130 |
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
131 |
return
|
132 |
|
133 |
+
user_input = inputs
|
134 |
if additional_fn is not None:
|
135 |
from core_functional import handle_core_functionality
|
136 |
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
|
|
141 |
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
142 |
|
143 |
# check mis-behavior
|
144 |
+
if is_the_upload_folder(user_input):
|
145 |
+
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
146 |
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
147 |
time.sleep(2)
|
148 |
|
|
|
182 |
# 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里
|
183 |
chunk_decoded = chunk.decode()
|
184 |
error_msg = chunk_decoded
|
185 |
+
# 首先排除一个one-api没有done数据包的第三方Bug情形
|
186 |
+
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
|
187 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
|
188 |
+
break
|
189 |
+
# 其他情况,直接返回报错
|
190 |
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
191 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
192 |
return
|
193 |
|
194 |
chunk_decoded = chunk.decode()
|
|
|
207 |
chunkjson = json.loads(chunk_decoded[6:])
|
208 |
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
209 |
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
210 |
+
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
211 |
history[-1] = gpt_replying_buffer
|
212 |
chatbot[-1] = (history[-2], history[-1])
|
213 |
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
request_llm/com_sparkapi.py
CHANGED
@@ -109,6 +109,7 @@ class SparkRequestInstance():
|
|
109 |
code = data['header']['code']
|
110 |
if code != 0:
|
111 |
print(f'请求错误: {code}, {data}')
|
|
|
112 |
ws.close()
|
113 |
self.time_to_exit_event.set()
|
114 |
else:
|
|
|
109 |
code = data['header']['code']
|
110 |
if code != 0:
|
111 |
print(f'请求错误: {code}, {data}')
|
112 |
+
self.result_buf += str(data)
|
113 |
ws.close()
|
114 |
self.time_to_exit_event.set()
|
115 |
else:
|
themes/gradios.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import logging
|
3 |
+
from toolbox import get_conf, ProxyNetworkActivate
|
4 |
+
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
|
5 |
+
|
6 |
+
def adjust_theme():
|
7 |
+
|
8 |
+
try:
|
9 |
+
set_theme = gr.themes.ThemeClass()
|
10 |
+
with ProxyNetworkActivate():
|
11 |
+
logging.info('正在下载Gradio主题,请稍等。')
|
12 |
+
THEME, = get_conf('THEME')
|
13 |
+
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
14 |
+
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
15 |
+
set_theme = set_theme.from_hub(THEME.lower())
|
16 |
+
|
17 |
+
if LAYOUT=="TOP-DOWN":
|
18 |
+
js = ""
|
19 |
+
else:
|
20 |
+
with open('themes/common.js', 'r', encoding='utf8') as f:
|
21 |
+
js = f"<script>{f.read()}</script>"
|
22 |
+
|
23 |
+
# 添加一个萌萌的看板娘
|
24 |
+
if ADD_WAIFU:
|
25 |
+
js += """
|
26 |
+
<script src="file=docs/waifu_plugin/jquery.min.js"></script>
|
27 |
+
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
28 |
+
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
29 |
+
"""
|
30 |
+
gradio_original_template_fn = gr.routes.templates.TemplateResponse
|
31 |
+
def gradio_new_template_fn(*args, **kwargs):
|
32 |
+
res = gradio_original_template_fn(*args, **kwargs)
|
33 |
+
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
34 |
+
res.init_headers()
|
35 |
+
return res
|
36 |
+
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
37 |
+
except Exception as e:
|
38 |
+
set_theme = None
|
39 |
+
from toolbox import trimmed_format_exc
|
40 |
+
logging.error('gradio版本较旧, 不能自定义字体和颜色:', trimmed_format_exc())
|
41 |
+
return set_theme
|
42 |
+
|
43 |
+
# with open("themes/default.css", "r", encoding="utf-8") as f:
|
44 |
+
# advanced_css = f.read()
|
45 |
+
with open("themes/common.css", "r", encoding="utf-8") as f:
|
46 |
+
advanced_css = f.read()
|
themes/theme.py
CHANGED
@@ -8,6 +8,9 @@ if THEME == 'Chuanhu-Small-and-Beautiful':
|
|
8 |
elif THEME == 'High-Contrast':
|
9 |
from .contrast import adjust_theme, advanced_css
|
10 |
theme_declaration = ""
|
|
|
|
|
|
|
11 |
else:
|
12 |
from .default import adjust_theme, advanced_css
|
13 |
theme_declaration = ""
|
|
|
8 |
elif THEME == 'High-Contrast':
|
9 |
from .contrast import adjust_theme, advanced_css
|
10 |
theme_declaration = ""
|
11 |
+
elif '/' in THEME:
|
12 |
+
from .gradios import adjust_theme, advanced_css
|
13 |
+
theme_declaration = ""
|
14 |
else:
|
15 |
from .default import adjust_theme, advanced_css
|
16 |
theme_declaration = ""
|
toolbox.py
CHANGED
@@ -5,6 +5,8 @@ import inspect
|
|
5 |
import re
|
6 |
import os
|
7 |
import gradio
|
|
|
|
|
8 |
from latex2mathml.converter import convert as tex2mathml
|
9 |
from functools import wraps, lru_cache
|
10 |
pj = os.path.join
|
@@ -77,14 +79,24 @@ def ArgsGeneralWrapper(f):
|
|
77 |
}
|
78 |
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
79 |
chatbot_with_cookie.write_list(chatbot)
|
|
|
80 |
if cookies.get('lock_plugin', None) is None:
|
81 |
# 正常状态
|
82 |
-
|
|
|
|
|
|
|
83 |
else:
|
84 |
-
#
|
85 |
module, fn_name = cookies['lock_plugin'].split('->')
|
86 |
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
|
87 |
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, request)
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
return decorated
|
89 |
|
90 |
|
@@ -94,7 +106,8 @@ def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
|
|
94 |
"""
|
95 |
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时, 可用clear将其清空, 然后用for+append循环重新赋值。"
|
96 |
cookies = chatbot.get_cookies()
|
97 |
-
|
|
|
98 |
# 解决插件锁定时的界面显示问题
|
99 |
if cookies.get('lock_plugin', None):
|
100 |
label = cookies.get('llm_model', "") + " | " + "正在锁定插件" + cookies.get('lock_plugin', None)
|
@@ -171,7 +184,7 @@ def HotReload(f):
|
|
171 |
========================================================================
|
172 |
第二部分
|
173 |
其他小工具:
|
174 |
-
-
|
175 |
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
176 |
- report_execption: 向chatbot中添加简单的意外错误信息
|
177 |
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
@@ -203,36 +216,6 @@ def get_reduce_token_percent(text):
|
|
203 |
return 0.5, '不详'
|
204 |
|
205 |
|
206 |
-
def write_results_to_file(history, file_name=None):
|
207 |
-
"""
|
208 |
-
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
209 |
-
"""
|
210 |
-
import os
|
211 |
-
import time
|
212 |
-
if file_name is None:
|
213 |
-
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
214 |
-
file_name = 'GPT-Report-' + gen_time_str() + '.md'
|
215 |
-
os.makedirs('./gpt_log/', exist_ok=True)
|
216 |
-
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
217 |
-
f.write('# GPT-Academic Report\n')
|
218 |
-
for i, content in enumerate(history):
|
219 |
-
try:
|
220 |
-
if type(content) != str: content = str(content)
|
221 |
-
except:
|
222 |
-
continue
|
223 |
-
if i % 2 == 0:
|
224 |
-
f.write('## ')
|
225 |
-
try:
|
226 |
-
f.write(content)
|
227 |
-
except:
|
228 |
-
# remove everything that cannot be handled by utf8
|
229 |
-
f.write(content.encode('utf-8', 'ignore').decode())
|
230 |
-
f.write('\n\n')
|
231 |
-
res = '以上材料已经被写入:\t' + os.path.abspath(f'./gpt_log/{file_name}')
|
232 |
-
print(res)
|
233 |
-
return res
|
234 |
-
|
235 |
-
|
236 |
def write_history_to_file(history, file_basename=None, file_fullname=None):
|
237 |
"""
|
238 |
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
@@ -241,9 +224,9 @@ def write_history_to_file(history, file_basename=None, file_fullname=None):
|
|
241 |
import time
|
242 |
if file_fullname is None:
|
243 |
if file_basename is not None:
|
244 |
-
file_fullname =
|
245 |
else:
|
246 |
-
file_fullname =
|
247 |
os.makedirs(os.path.dirname(file_fullname), exist_ok=True)
|
248 |
with open(file_fullname, 'w', encoding='utf8') as f:
|
249 |
f.write('# GPT-Academic Report\n')
|
@@ -519,7 +502,7 @@ def find_recent_files(directory):
|
|
519 |
if not os.path.exists(directory):
|
520 |
os.makedirs(directory, exist_ok=True)
|
521 |
for filename in os.listdir(directory):
|
522 |
-
file_path =
|
523 |
if file_path.endswith('.log'):
|
524 |
continue
|
525 |
created_time = os.path.getmtime(file_path)
|
@@ -534,7 +517,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
|
534 |
# 将文件复制一份到下载区
|
535 |
import shutil
|
536 |
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
537 |
-
new_path =
|
538 |
# 如果已经存在,先删除
|
539 |
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
540 |
# 把文件复制过去
|
@@ -549,44 +532,70 @@ def disable_auto_promotion(chatbot):
|
|
549 |
chatbot._cookies.update({'files_to_promote': []})
|
550 |
return
|
551 |
|
552 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
553 |
"""
|
554 |
当文件被上传时的回调函数
|
555 |
"""
|
556 |
if len(files) == 0:
|
557 |
return chatbot, txt
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
except:
|
566 |
-
pass
|
567 |
time_tag = gen_time_str()
|
568 |
-
|
569 |
-
|
|
|
|
|
|
|
|
|
570 |
for file in files:
|
571 |
file_origin_name = os.path.basename(file.orig_name)
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
|
|
579 |
else:
|
580 |
-
txt =
|
581 |
-
|
|
|
582 |
moved_files_str = '\t\n\n'.join(moved_files)
|
583 |
-
chatbot.append(['我上传了文件,请查收',
|
584 |
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
585 |
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
586 |
-
f'\n\n现在您点击任意函数插件时,以上文件将被作为输入参数'+
|
|
|
|
|
587 |
cookies.update({
|
588 |
'most_recent_uploaded': {
|
589 |
-
'path':
|
590 |
'time': time.time(),
|
591 |
'time_str': time_tag
|
592 |
}})
|
@@ -595,11 +604,12 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies):
|
|
595 |
|
596 |
def on_report_generated(cookies, files, chatbot):
|
597 |
from toolbox import find_recent_files
|
|
|
598 |
if 'files_to_promote' in cookies:
|
599 |
report_files = cookies['files_to_promote']
|
600 |
cookies.pop('files_to_promote')
|
601 |
else:
|
602 |
-
report_files = find_recent_files(
|
603 |
if len(report_files) == 0:
|
604 |
return cookies, None, chatbot
|
605 |
# files.extend(report_files)
|
@@ -909,34 +919,35 @@ def zip_folder(source_folder, dest_folder, zip_name):
|
|
909 |
return
|
910 |
|
911 |
# Create the name for the zip file
|
912 |
-
zip_file =
|
913 |
|
914 |
# Create a ZipFile object
|
915 |
with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
916 |
# Walk through the source folder and add files to the zip file
|
917 |
for foldername, subfolders, filenames in os.walk(source_folder):
|
918 |
for filename in filenames:
|
919 |
-
filepath =
|
920 |
zipf.write(filepath, arcname=os.path.relpath(filepath, source_folder))
|
921 |
|
922 |
# Move the zip file to the destination folder (if it wasn't already there)
|
923 |
if os.path.dirname(zip_file) != dest_folder:
|
924 |
-
os.rename(zip_file,
|
925 |
-
zip_file =
|
926 |
|
927 |
print(f"Zip file created at {zip_file}")
|
928 |
|
929 |
def zip_result(folder):
|
930 |
t = gen_time_str()
|
931 |
-
zip_folder(folder,
|
932 |
-
return pj(
|
933 |
|
934 |
def gen_time_str():
|
935 |
import time
|
936 |
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
937 |
|
938 |
def get_log_folder(user='default', plugin_name='shared'):
|
939 |
-
|
|
|
940 |
if not os.path.exists(_dir): os.makedirs(_dir)
|
941 |
return _dir
|
942 |
|
|
|
5 |
import re
|
6 |
import os
|
7 |
import gradio
|
8 |
+
import shutil
|
9 |
+
import glob
|
10 |
from latex2mathml.converter import convert as tex2mathml
|
11 |
from functools import wraps, lru_cache
|
12 |
pj = os.path.join
|
|
|
79 |
}
|
80 |
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
81 |
chatbot_with_cookie.write_list(chatbot)
|
82 |
+
|
83 |
if cookies.get('lock_plugin', None) is None:
|
84 |
# 正常状态
|
85 |
+
if len(args) == 0: # 插件通道
|
86 |
+
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, request)
|
87 |
+
else: # 对话通道,或者基础功能通道
|
88 |
+
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
89 |
else:
|
90 |
+
# 处理少数情况下的特殊插件的锁定状态
|
91 |
module, fn_name = cookies['lock_plugin'].split('->')
|
92 |
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
|
93 |
yield from f_hot_reload(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, request)
|
94 |
+
# 判断一下用户是否错误地通过对话通道进入,如果是,则进行提醒
|
95 |
+
final_cookies = chatbot_with_cookie.get_cookies()
|
96 |
+
# len(args) != 0 代表“提交”键对话通道,或者基础功能通道
|
97 |
+
if len(args) != 0 and 'files_to_promote' in final_cookies and len(final_cookies['files_to_promote']) > 0:
|
98 |
+
chatbot_with_cookie.append(["检测到**滞留的缓存文档**,请及时处理。", "请及时点击“**保存当前对话**”获取所有滞留文档。"])
|
99 |
+
yield from update_ui(chatbot_with_cookie, final_cookies['history'], msg="检测到被滞留的缓存文档")
|
100 |
return decorated
|
101 |
|
102 |
|
|
|
106 |
"""
|
107 |
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时, 可用clear将其清空, 然后用for+append循环重新赋值。"
|
108 |
cookies = chatbot.get_cookies()
|
109 |
+
# 备份一份History作为记录
|
110 |
+
cookies.update({'history': history})
|
111 |
# 解决插件锁定时的界面显示问题
|
112 |
if cookies.get('lock_plugin', None):
|
113 |
label = cookies.get('llm_model', "") + " | " + "正在锁定插件" + cookies.get('lock_plugin', None)
|
|
|
184 |
========================================================================
|
185 |
第二部分
|
186 |
其他小工具:
|
187 |
+
- write_history_to_file: 将结果写入markdown文件中
|
188 |
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
189 |
- report_execption: 向chatbot中添加简单的意外错误信息
|
190 |
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
|
|
216 |
return 0.5, '不详'
|
217 |
|
218 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
def write_history_to_file(history, file_basename=None, file_fullname=None):
|
220 |
"""
|
221 |
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
|
|
224 |
import time
|
225 |
if file_fullname is None:
|
226 |
if file_basename is not None:
|
227 |
+
file_fullname = pj(get_log_folder(), file_basename)
|
228 |
else:
|
229 |
+
file_fullname = pj(get_log_folder(), f'GPT-Academic-{gen_time_str()}.md')
|
230 |
os.makedirs(os.path.dirname(file_fullname), exist_ok=True)
|
231 |
with open(file_fullname, 'w', encoding='utf8') as f:
|
232 |
f.write('# GPT-Academic Report\n')
|
|
|
502 |
if not os.path.exists(directory):
|
503 |
os.makedirs(directory, exist_ok=True)
|
504 |
for filename in os.listdir(directory):
|
505 |
+
file_path = pj(directory, filename)
|
506 |
if file_path.endswith('.log'):
|
507 |
continue
|
508 |
created_time = os.path.getmtime(file_path)
|
|
|
517 |
# 将文件复制一份到下载区
|
518 |
import shutil
|
519 |
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
520 |
+
new_path = pj(get_log_folder(), rename_file)
|
521 |
# 如果已经存在,先删除
|
522 |
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
523 |
# 把文件复制过去
|
|
|
532 |
chatbot._cookies.update({'files_to_promote': []})
|
533 |
return
|
534 |
|
535 |
+
def is_the_upload_folder(string):
|
536 |
+
PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD')
|
537 |
+
pattern = r'^PATH_PRIVATE_UPLOAD/[A-Za-z0-9_-]+/\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$'
|
538 |
+
pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD)
|
539 |
+
if re.match(pattern, string): return True
|
540 |
+
else: return False
|
541 |
+
|
542 |
+
def del_outdated_uploads(outdate_time_seconds):
|
543 |
+
PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD')
|
544 |
+
current_time = time.time()
|
545 |
+
one_hour_ago = current_time - outdate_time_seconds
|
546 |
+
# Get a list of all subdirectories in the PATH_PRIVATE_UPLOAD folder
|
547 |
+
# Remove subdirectories that are older than one hour
|
548 |
+
for subdirectory in glob.glob(f'{PATH_PRIVATE_UPLOAD}/*/*'):
|
549 |
+
subdirectory_time = os.path.getmtime(subdirectory)
|
550 |
+
if subdirectory_time < one_hour_ago:
|
551 |
+
try: shutil.rmtree(subdirectory)
|
552 |
+
except: pass
|
553 |
+
return
|
554 |
+
|
555 |
+
def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkboxes, cookies):
|
556 |
"""
|
557 |
当文件被上传时的回调函数
|
558 |
"""
|
559 |
if len(files) == 0:
|
560 |
return chatbot, txt
|
561 |
+
|
562 |
+
# 移除过时的旧文件从而节省空间&保护隐私
|
563 |
+
outdate_time_seconds = 60
|
564 |
+
del_outdated_uploads(outdate_time_seconds)
|
565 |
+
|
566 |
+
# 创建工作路径
|
567 |
+
user_name = "default" if not request.username else request.username
|
|
|
|
|
568 |
time_tag = gen_time_str()
|
569 |
+
PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD')
|
570 |
+
target_path_base = pj(PATH_PRIVATE_UPLOAD, user_name, time_tag)
|
571 |
+
os.makedirs(target_path_base, exist_ok=True)
|
572 |
+
|
573 |
+
# 逐个文件转移到目标路径
|
574 |
+
upload_msg = ''
|
575 |
for file in files:
|
576 |
file_origin_name = os.path.basename(file.orig_name)
|
577 |
+
this_file_path = pj(target_path_base, file_origin_name)
|
578 |
+
shutil.move(file.name, this_file_path)
|
579 |
+
upload_msg += extract_archive(file_path=this_file_path, dest_dir=this_file_path+'.extract')
|
580 |
+
|
581 |
+
# 整理文件集合
|
582 |
+
moved_files = [fp for fp in glob.glob(f'{target_path_base}/**/*', recursive=True)]
|
583 |
+
if "底部输入区" in checkboxes:
|
584 |
+
txt, txt2 = "", target_path_base
|
585 |
else:
|
586 |
+
txt, txt2 = target_path_base, ""
|
587 |
+
|
588 |
+
# 输出消息
|
589 |
moved_files_str = '\t\n\n'.join(moved_files)
|
590 |
+
chatbot.append(['我上传了文件,请查收',
|
591 |
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
592 |
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
593 |
+
f'\n\n现在您点击任意函数插件时,以上文件将被作为输入参数'+upload_msg])
|
594 |
+
|
595 |
+
# 记录近期文件
|
596 |
cookies.update({
|
597 |
'most_recent_uploaded': {
|
598 |
+
'path': target_path_base,
|
599 |
'time': time.time(),
|
600 |
'time_str': time_tag
|
601 |
}})
|
|
|
604 |
|
605 |
def on_report_generated(cookies, files, chatbot):
|
606 |
from toolbox import find_recent_files
|
607 |
+
PATH_LOGGING, = get_conf('PATH_LOGGING')
|
608 |
if 'files_to_promote' in cookies:
|
609 |
report_files = cookies['files_to_promote']
|
610 |
cookies.pop('files_to_promote')
|
611 |
else:
|
612 |
+
report_files = find_recent_files(PATH_LOGGING)
|
613 |
if len(report_files) == 0:
|
614 |
return cookies, None, chatbot
|
615 |
# files.extend(report_files)
|
|
|
919 |
return
|
920 |
|
921 |
# Create the name for the zip file
|
922 |
+
zip_file = pj(dest_folder, zip_name)
|
923 |
|
924 |
# Create a ZipFile object
|
925 |
with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
926 |
# Walk through the source folder and add files to the zip file
|
927 |
for foldername, subfolders, filenames in os.walk(source_folder):
|
928 |
for filename in filenames:
|
929 |
+
filepath = pj(foldername, filename)
|
930 |
zipf.write(filepath, arcname=os.path.relpath(filepath, source_folder))
|
931 |
|
932 |
# Move the zip file to the destination folder (if it wasn't already there)
|
933 |
if os.path.dirname(zip_file) != dest_folder:
|
934 |
+
os.rename(zip_file, pj(dest_folder, os.path.basename(zip_file)))
|
935 |
+
zip_file = pj(dest_folder, os.path.basename(zip_file))
|
936 |
|
937 |
print(f"Zip file created at {zip_file}")
|
938 |
|
939 |
def zip_result(folder):
|
940 |
t = gen_time_str()
|
941 |
+
zip_folder(folder, get_log_folder(), f'{t}-result.zip')
|
942 |
+
return pj(get_log_folder(), f'{t}-result.zip')
|
943 |
|
944 |
def gen_time_str():
|
945 |
import time
|
946 |
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
947 |
|
948 |
def get_log_folder(user='default', plugin_name='shared'):
|
949 |
+
PATH_LOGGING, = get_conf('PATH_LOGGING')
|
950 |
+
_dir = pj(PATH_LOGGING, user, plugin_name)
|
951 |
if not os.path.exists(_dir): os.makedirs(_dir)
|
952 |
return _dir
|
953 |
|
version
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"version": 3.
|
3 |
"show_feature": true,
|
4 |
-
"new_feature": "
|
5 |
}
|
|
|
1 |
{
|
2 |
+
"version": 3.52,
|
3 |
"show_feature": true,
|
4 |
+
"new_feature": "提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
|
5 |
}
|