Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
1318660
1
Parent(s):
c904b2a
feat: 加入Gemini Pro (Vision) 支持 #1039
Browse files- config_example.json +1 -1
- locale/en_US.json +3 -2
- modules/config.py +7 -4
- modules/models/GoogleGemini.py +81 -0
- modules/models/base_model.py +3 -0
- modules/models/models.py +6 -1
- modules/presets.py +10 -0
- modules/utils.py +1 -1
- requirements.txt +1 -0
config_example.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
|
4 |
//== API 配置 ==
|
5 |
"openai_api_key": "", // 你的 OpenAI API Key,一般必填,若空缺则需在图形界面中填入API Key
|
6 |
-
"
|
7 |
"xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
|
8 |
"minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
|
9 |
"minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
|
|
|
3 |
|
4 |
//== API 配置 ==
|
5 |
"openai_api_key": "", // 你的 OpenAI API Key,一般必填,若空缺则需在图形界面中填入API Key
|
6 |
+
"google_genai_api_key": "", // 你的 Google PaLM API Key,用于 Google PaLM 对话模型
|
7 |
"xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
|
8 |
"minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
|
9 |
"minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
|
locale/en_US.json
CHANGED
@@ -170,7 +170,7 @@
|
|
170 |
"本地编制索引": "Local indexing",
|
171 |
"是否在本地编制知识库索引?如果是,可以在使用本地模型时离线使用知识库,否则使用OpenAI服务来编制索引(需要OpenAI API Key)。请确保你的电脑有至少16GB内存。本地索引模型需要从互联网下载。": "Do you want to index the knowledge base locally? If so, you can use the knowledge base offline when using the local model, otherwise use the OpenAI service to index (requires OpenAI API Key). Make sure your computer has at least 16GB of memory. The local index model needs to be downloaded from the Internet.",
|
172 |
"现在开始设置其他在线模型的API Key": "Start setting the API Key for other online models",
|
173 |
-
"是否设置默认 Google
|
174 |
"是否设置默认 XMChat API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。": "Set the default XMChat API Key? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, you can manually enter the API Key after the software starts.",
|
175 |
"是否设置默认 MiniMax API 密钥和 Group ID?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,将无法使用 MiniMax 模型。": "Set the default MiniMax API Key and Group ID? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, the MiniMax model will not be available.",
|
176 |
"你的": "Your ",
|
@@ -227,5 +227,6 @@
|
|
227 |
"设置完成。现在请重启本程序。": "Setup completed. Please restart this program now.",
|
228 |
"你设置了 ": "You set ",
|
229 |
" 为: ": " as: ",
|
230 |
-
"输入的不是数字,将使用默认值。": "The input is not a number, the default value will be used."
|
|
|
231 |
}
|
|
|
170 |
"本地编制索引": "Local indexing",
|
171 |
"是否在本地编制知识库索引?如果是,可以在使用本地模型时离线使用知识库,否则使用OpenAI服务来编制索引(需要OpenAI API Key)。请确保你的电脑有至少16GB内存。本地索引模型需要从互联网下载。": "Do you want to index the knowledge base locally? If so, you can use the knowledge base offline when using the local model, otherwise use the OpenAI service to index (requires OpenAI API Key). Make sure your computer has at least 16GB of memory. The local index model needs to be downloaded from the Internet.",
|
172 |
"现在开始设置其他在线模型的API Key": "Start setting the API Key for other online models",
|
173 |
+
"是否设置默认 Google AI Studio API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。": "Set the default Google Palm API Key? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, you can manually enter the API Key after the software starts.",
|
174 |
"是否设置默认 XMChat API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。": "Set the default XMChat API Key? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, you can manually enter the API Key after the software starts.",
|
175 |
"是否设置默认 MiniMax API 密钥和 Group ID?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,将无法使用 MiniMax 模型。": "Set the default MiniMax API Key and Group ID? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, the MiniMax model will not be available.",
|
176 |
"你的": "Your ",
|
|
|
227 |
"设置完成。现在请重启本程序。": "Setup completed. Please restart this program now.",
|
228 |
"你设置了 ": "You set ",
|
229 |
" 为: ": " as: ",
|
230 |
+
"输入的不是数字,将使用默认值。": "The input is not a number, the default value will be used.",
|
231 |
+
"由于下面的原因,Google 拒绝返回 Gemini 的回答:\n\n": "For the following reasons, Google refuses to return Gemini's response:\n\n",
|
232 |
}
|
modules/config.py
CHANGED
@@ -108,10 +108,13 @@ if "extra_models" in config:
|
|
108 |
|
109 |
HIDE_MY_KEY = config.get("hide_my_key", False)
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
115 |
|
116 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
117 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
|
|
108 |
|
109 |
HIDE_MY_KEY = config.get("hide_my_key", False)
|
110 |
|
111 |
+
google_genai_api_key = os.environ.get(
|
112 |
+
"GOOGLE_PALM_API_KEY", None)
|
113 |
+
google_genai_api_key = os.environ.get(
|
114 |
+
"GOOGLE_GENAI_API_KEY", None)
|
115 |
+
google_genai_api_key = config.get("google_palm_api_key", google_genai_api_key)
|
116 |
+
google_genai_api_key = config.get("google_genai_api_key", google_genai_api_key)
|
117 |
+
os.environ["GOOGLE_GENAI_API_KEY"] = google_genai_api_key
|
118 |
|
119 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
120 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
modules/models/GoogleGemini.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
import textwrap
|
4 |
+
import uuid
|
5 |
+
|
6 |
+
import google.generativeai as genai
|
7 |
+
import gradio as gr
|
8 |
+
import PIL
|
9 |
+
import requests
|
10 |
+
|
11 |
+
from modules.presets import i18n
|
12 |
+
|
13 |
+
from ..index_func import construct_index
|
14 |
+
from ..utils import count_token
|
15 |
+
from .base_model import BaseLLMModel
|
16 |
+
|
17 |
+
|
18 |
+
class GoogleGeminiClient(BaseLLMModel):
|
19 |
+
def __init__(self, model_name, api_key, user_name="") -> None:
|
20 |
+
super().__init__(model_name=model_name, user=user_name)
|
21 |
+
self.api_key = api_key
|
22 |
+
if "vision" in model_name.lower():
|
23 |
+
self.multimodal = True
|
24 |
+
else:
|
25 |
+
self.multimodal = False
|
26 |
+
self.image_paths = []
|
27 |
+
|
28 |
+
def _get_gemini_style_input(self):
|
29 |
+
self.history.extend([{"role": "image", "content": i} for i in self.image_paths])
|
30 |
+
self.image_paths = []
|
31 |
+
messages = []
|
32 |
+
for item in self.history:
|
33 |
+
if item["role"] == "image":
|
34 |
+
messages.append(PIL.Image.open(item["content"]))
|
35 |
+
else:
|
36 |
+
messages.append(item["content"])
|
37 |
+
return messages
|
38 |
+
|
39 |
+
def to_markdown(self, text):
|
40 |
+
text = text.replace("•", " *")
|
41 |
+
return textwrap.indent(text, "> ", predicate=lambda _: True)
|
42 |
+
|
43 |
+
def handle_file_upload(self, files, chatbot, language):
|
44 |
+
if files:
|
45 |
+
if self.multimodal:
|
46 |
+
for file in files:
|
47 |
+
if file.name:
|
48 |
+
self.image_paths.append(file.name)
|
49 |
+
chatbot = chatbot + [((file.name,), None)]
|
50 |
+
return None, chatbot, None
|
51 |
+
else:
|
52 |
+
construct_index(self.api_key, file_src=files)
|
53 |
+
status = i18n("索引构建完成")
|
54 |
+
return gr.Files.update(), chatbot, status
|
55 |
+
|
56 |
+
def get_answer_at_once(self):
|
57 |
+
genai.configure(api_key=self.api_key)
|
58 |
+
messages = self._get_gemini_style_input()
|
59 |
+
model = genai.GenerativeModel(self.model_name)
|
60 |
+
response = model.generate_content(messages)
|
61 |
+
try:
|
62 |
+
return self.to_markdown(response.text), len(response.text)
|
63 |
+
except ValueError:
|
64 |
+
return (
|
65 |
+
i18n("由于下面的原因,Google 拒绝返回 Gemini 的回答:\n\n")
|
66 |
+
+ str(response.prompt_feedback),
|
67 |
+
0,
|
68 |
+
)
|
69 |
+
|
70 |
+
def get_answer_stream_iter(self):
|
71 |
+
genai.configure(api_key=self.api_key)
|
72 |
+
messages = self._get_gemini_style_input()
|
73 |
+
model = genai.GenerativeModel(self.model_name)
|
74 |
+
response = model.generate_content(messages, stream=True)
|
75 |
+
partial_text = ""
|
76 |
+
for i in response:
|
77 |
+
response = i.text
|
78 |
+
partial_text += response
|
79 |
+
yield partial_text
|
80 |
+
self.all_token_counts[-1] = count_token(partial_text)
|
81 |
+
yield partial_text
|
modules/models/base_model.py
CHANGED
@@ -154,6 +154,7 @@ class ModelType(Enum):
|
|
154 |
OpenAIVision = 16
|
155 |
ERNIE = 17
|
156 |
DALLE3 = 18
|
|
|
157 |
|
158 |
@classmethod
|
159 |
def get_type(cls, model_name: str):
|
@@ -184,6 +185,8 @@ class ModelType(Enum):
|
|
184 |
model_type = ModelType.ChuanhuAgent
|
185 |
elif "palm" in model_name_lower:
|
186 |
model_type = ModelType.GooglePaLM
|
|
|
|
|
187 |
elif "midjourney" in model_name_lower:
|
188 |
model_type = ModelType.Midjourney
|
189 |
elif "azure" in model_name_lower or "api" in model_name_lower:
|
|
|
154 |
OpenAIVision = 16
|
155 |
ERNIE = 17
|
156 |
DALLE3 = 18
|
157 |
+
GoogleGemini = 19
|
158 |
|
159 |
@classmethod
|
160 |
def get_type(cls, model_name: str):
|
|
|
185 |
model_type = ModelType.ChuanhuAgent
|
186 |
elif "palm" in model_name_lower:
|
187 |
model_type = ModelType.GooglePaLM
|
188 |
+
elif "gemini" in model_name_lower:
|
189 |
+
model_type = ModelType.GoogleGemini
|
190 |
elif "midjourney" in model_name_lower:
|
191 |
model_type = ModelType.Midjourney
|
192 |
elif "azure" in model_name_lower or "api" in model_name_lower:
|
modules/models/models.py
CHANGED
@@ -105,9 +105,14 @@ def get_model(
|
|
105 |
msg = i18n("启用的工具:") + ", ".join([i.name for i in model.tools])
|
106 |
elif model_type == ModelType.GooglePaLM:
|
107 |
from .GooglePaLM import Google_PaLM_Client
|
108 |
-
access_key = os.environ.get("
|
109 |
model = Google_PaLM_Client(
|
110 |
model_name, access_key, user_name=user_name)
|
|
|
|
|
|
|
|
|
|
|
111 |
elif model_type == ModelType.LangchainChat:
|
112 |
from .Azure import Azure_OpenAI_Client
|
113 |
model = Azure_OpenAI_Client(model_name, user_name=user_name)
|
|
|
105 |
msg = i18n("启用的工具:") + ", ".join([i.name for i in model.tools])
|
106 |
elif model_type == ModelType.GooglePaLM:
|
107 |
from .GooglePaLM import Google_PaLM_Client
|
108 |
+
access_key = os.environ.get("GOOGLE_GENAI_API_KEY", access_key)
|
109 |
model = Google_PaLM_Client(
|
110 |
model_name, access_key, user_name=user_name)
|
111 |
+
elif model_type == ModelType.GoogleGemini:
|
112 |
+
from .GoogleGemini import GoogleGeminiClient
|
113 |
+
access_key = os.environ.get("GOOGLE_GENAI_API_KEY", access_key)
|
114 |
+
model = GoogleGeminiClient(
|
115 |
+
model_name, access_key, user_name=user_name)
|
116 |
elif model_type == ModelType.LangchainChat:
|
117 |
from .Azure import Azure_OpenAI_Client
|
118 |
model = Azure_OpenAI_Client(model_name, user_name=user_name)
|
modules/presets.py
CHANGED
@@ -64,6 +64,8 @@ ONLINE_MODELS = [
|
|
64 |
"川虎助理",
|
65 |
"川虎助理 Pro",
|
66 |
"DALL-E 3",
|
|
|
|
|
67 |
"GooglePaLM",
|
68 |
"xmchat",
|
69 |
"Azure OpenAI",
|
@@ -169,6 +171,14 @@ MODEL_METADATA = {
|
|
169 |
"model_name": "ERNIE-Bot-4",
|
170 |
"token_limit": 1024,
|
171 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
}
|
173 |
|
174 |
if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
|
|
|
64 |
"川虎助理",
|
65 |
"川虎助理 Pro",
|
66 |
"DALL-E 3",
|
67 |
+
"Gemini Pro",
|
68 |
+
"Gemini Pro Vision",
|
69 |
"GooglePaLM",
|
70 |
"xmchat",
|
71 |
"Azure OpenAI",
|
|
|
171 |
"model_name": "ERNIE-Bot-4",
|
172 |
"token_limit": 1024,
|
173 |
},
|
174 |
+
"Gemini Pro": {
|
175 |
+
"model_name": "gemini-pro",
|
176 |
+
"token_limit": 30720,
|
177 |
+
},
|
178 |
+
"Gemini Pro Vision": {
|
179 |
+
"model_name": "gemini-pro-vision",
|
180 |
+
"token_limit": 30720,
|
181 |
+
}
|
182 |
}
|
183 |
|
184 |
if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
|
modules/utils.py
CHANGED
@@ -1097,7 +1097,7 @@ def setup_wizard():
|
|
1097 |
type=ConfigType.Password,
|
1098 |
)
|
1099 |
],
|
1100 |
-
"是否设置默认 Google
|
1101 |
)
|
1102 |
# XMChat
|
1103 |
wizard.set(
|
|
|
1097 |
type=ConfigType.Password,
|
1098 |
)
|
1099 |
],
|
1100 |
+
"是否设置默认 Google AI Studio API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。",
|
1101 |
)
|
1102 |
# XMChat
|
1103 |
wizard.set(
|
requirements.txt
CHANGED
@@ -23,6 +23,7 @@ faiss-cpu==1.7.4
|
|
23 |
duckduckgo-search>=4.1.1
|
24 |
arxiv
|
25 |
wikipedia
|
|
|
26 |
google.generativeai
|
27 |
unstructured
|
28 |
google-api-python-client
|
|
|
23 |
duckduckgo-search>=4.1.1
|
24 |
arxiv
|
25 |
wikipedia
|
26 |
+
google-cloud-aiplatform
|
27 |
google.generativeai
|
28 |
unstructured
|
29 |
google-api-python-client
|