Keldos commited on
Commit
d79b4b6
·
1 Parent(s): 8ad2261

feat: 调整chatbot placeholder

Browse files
locale/en_US.json CHANGED
@@ -270,4 +270,6 @@
270
  "groq_llama3_70b_description": "LLaMA 3 70B with [Groq](https://console.groq.com/), the impressively fast language model inferencing service.",
271
  "groq_mixtral_8x7b_description": "Mixtral 8x7B with [Groq](https://console.groq.com/), the impressively fast language model inferencing service.",
272
  "groq_gemma_7b_description": "Gemma 7B with [Groq](https://console.groq.com/), the impressively fast language model inferencing service.",
 
 
273
  }
 
270
  "groq_llama3_70b_description": "LLaMA 3 70B with [Groq](https://console.groq.com/), the impressively fast language model inferencing service.",
271
  "groq_mixtral_8x7b_description": "Mixtral 8x7B with [Groq](https://console.groq.com/), the impressively fast language model inferencing service.",
272
  "groq_gemma_7b_description": "Gemma 7B with [Groq](https://console.groq.com/), the impressively fast language model inferencing service.",
273
+ "gpt_default_slogan": "How can I help you today?",
274
+ "claude_default_slogan": "What can l help you with?",
275
  }
locale/zh_CN.json CHANGED
@@ -12,4 +12,6 @@
12
  "groq_llama3_70b_description": "采用 [Groq](https://console.groq.com/) 的 LLaMA 3 70B。Groq 是一个非常快速的语言模型推理服务。",
13
  "groq_mixtral_8x7b_description": "采用 [Groq](https://console.groq.com/) 的 Mixtral 8x7B。Groq 是一个非常快速的语言模型推理服务。",
14
  "groq_gemma_7b_description": "采用 [Groq](https://console.groq.com/) 的 Gemma 7B。Groq 是一个非常快速的语言模型推理服务。",
 
 
15
  }
 
12
  "groq_llama3_70b_description": "采用 [Groq](https://console.groq.com/) 的 LLaMA 3 70B。Groq 是一个非常快速的语言模型推理服务。",
13
  "groq_mixtral_8x7b_description": "采用 [Groq](https://console.groq.com/) 的 Mixtral 8x7B。Groq 是一个非常快速的语言模型推理服务。",
14
  "groq_gemma_7b_description": "采用 [Groq](https://console.groq.com/) 的 Gemma 7B。Groq 是一个非常快速的语言模型推理服务。",
15
+ "gpt_default_slogan": "今天能帮您些什么?",
16
+ "claude_default_slogan": "What can l help you with?",
17
  }
modules/presets.py CHANGED
@@ -111,7 +111,9 @@ DEFAULT_METADATA = {
111
  "model_name": None, # api model name, used if this model is meant to be used online
112
  "filelist": None, # file list in the repo to download, now only support .gguf file
113
  "description": "", # description of the model, displayed in the chatbot header when cursor overing the info icon
114
- "placeholder": {}, # placeholder for the model, displayed in the chat area when no message is present
 
 
115
  "model_type": None, # model type, used to determine the model's behavior. If not set, the model type is inferred from the model name
116
  "multimodal": False, # whether the model is multimodal
117
  "api_host": None, # base url for the model's api
@@ -150,80 +152,140 @@ MODEL_METADATA = {
150
  "model_name": "gpt-3.5-turbo",
151
  "description": "gpt3.5turbo_description",
152
  "token_limit": 4096,
 
 
 
 
153
  },
154
  "GPT3.5 Turbo Instruct": {
155
  "model_name": "gpt-3.5-turbo-instruct",
156
  "description": "gpt3.5turbo_instruct_description",
157
  "token_limit": 4096,
 
 
 
 
158
  },
159
  "GPT3.5 Turbo 16K": {
160
  "model_name": "gpt-3.5-turbo-16k",
161
  "description": "gpt3.5turbo_16k_description",
162
  "token_limit": 16384,
 
 
 
 
163
  },
164
  "GPT3.5 Turbo 0301": {
165
  "model_name": "gpt-3.5-turbo-0301",
166
  "token_limit": 4096,
 
 
 
 
167
  },
168
  "GPT3.5 Turbo 0613": {
169
  "model_name": "gpt-3.5-turbo-0613",
170
  "token_limit": 4096,
 
 
 
 
171
  },
172
  "GPT3.5 Turbo 1106": {
173
  "model_name": "gpt-3.5-turbo-1106",
174
  "token_limit": 16384,
 
 
 
 
175
  },
176
  "GPT4": {
177
  "model_name": "gpt-4",
178
  "description": "gpt4_description",
179
  "token_limit": 8192,
 
 
 
 
180
  },
181
  "GPT4 32K": {
182
  "model_name": "gpt-4-32k",
183
  "description": "gpt4_32k_description",
184
  "token_limit": 32768,
 
 
 
 
185
  },
186
  "GPT4 Turbo": {
187
  "model_name": "gpt-4-turbo",
188
  "description": "gpt4turbo_description",
189
  "token_limit": 128000,
190
- "multimodal": True
 
 
 
 
191
  },
192
  "Claude 3 Haiku": {
193
  "model_name": "claude-3-haiku-20240307",
194
  "description": "claude3_haiku_description",
195
  "token_limit": 200000,
196
  "max_generation": 4096,
197
- "multimodal": True
 
 
 
 
198
  },
199
  "Claude 3 Sonnet": {
200
  "model_name": "claude-3-sonnet-20240229",
201
  "description": "claude3_sonnet_description",
202
  "token_limit": 200000,
203
  "max_generation": 4096,
204
- "multimodal": True
 
 
 
 
205
  },
206
  "Claude 3 Opus": {
207
  "model_name": "claude-3-opus-20240229",
208
  "description": "claude3_opus_description",
209
  "token_limit": 200000,
210
  "max_generation": 4096,
211
- "multimodal": True
 
 
 
 
212
  },
213
  "川虎助理": {
214
  "model_name": "川虎助理",
215
- "description": "川虎助理是一款虚拟世界游戏,在这个游戏中,川虎扮演……",
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  "placeholder": {
217
  "logo": "file=web_assets/icon/any-icon-512.png",
218
  "logo_rounded": "false",
219
- "slogon": "我川虎今天能帮你做什么?",
220
- "question_1": "你好",
221
- "question_2": "你是谁",
222
- "question_3": "你会做什么",
223
- "question_4": "你会说什么语言",
224
  }
225
  },
226
- "川虎助理 Pro": {"model_name": "川虎助理 Pro"},
227
  "DALL-E 3": {"model_name": "dall-e-3"},
228
  "ERNIE-Bot-turbo": {
229
  "model_name": "ERNIE-Bot-turbo",
@@ -240,10 +302,18 @@ MODEL_METADATA = {
240
  "Gemini Pro": {
241
  "model_name": "gemini-pro",
242
  "token_limit": 30720,
 
 
 
 
243
  },
244
  "Gemini Pro Vision": {
245
  "model_name": "gemini-pro-vision",
246
  "token_limit": 30720,
 
 
 
 
247
  },
248
  "Ollama": {
249
  "model_name": "ollama",
 
111
  "model_name": None, # api model name, used if this model is meant to be used online
112
  "filelist": None, # file list in the repo to download, now only support .gguf file
113
  "description": "", # description of the model, displayed in the chatbot header when cursor overing the info icon
114
+ "placeholder": { # placeholder for the model, displayed in the chat area when no message is present
115
+ "slogan": i18n("gpt_default_slogan"),
116
+ },
117
  "model_type": None, # model type, used to determine the model's behavior. If not set, the model type is inferred from the model name
118
  "multimodal": False, # whether the model is multimodal
119
  "api_host": None, # base url for the model's api
 
152
  "model_name": "gpt-3.5-turbo",
153
  "description": "gpt3.5turbo_description",
154
  "token_limit": 4096,
155
+ "placeholder": {
156
+ "logo": "file=web_assets/model_logos/openai-green.webp",
157
+ "slogan": i18n("gpt_default_slogan"),
158
+ }
159
  },
160
  "GPT3.5 Turbo Instruct": {
161
  "model_name": "gpt-3.5-turbo-instruct",
162
  "description": "gpt3.5turbo_instruct_description",
163
  "token_limit": 4096,
164
+ "placeholder": {
165
+ "logo": "file=web_assets/model_logos/openai-green.webp",
166
+ "slogan": i18n("gpt_default_slogan"),
167
+ }
168
  },
169
  "GPT3.5 Turbo 16K": {
170
  "model_name": "gpt-3.5-turbo-16k",
171
  "description": "gpt3.5turbo_16k_description",
172
  "token_limit": 16384,
173
+ "placeholder": {
174
+ "logo": "file=web_assets/model_logos/openai-green.webp",
175
+ "slogan": i18n("gpt_default_slogan"),
176
+ }
177
  },
178
  "GPT3.5 Turbo 0301": {
179
  "model_name": "gpt-3.5-turbo-0301",
180
  "token_limit": 4096,
181
+ "placeholder": {
182
+ "logo": "file=web_assets/model_logos/openai-green.webp",
183
+ "slogan": i18n("gpt_default_slogan"),
184
+ }
185
  },
186
  "GPT3.5 Turbo 0613": {
187
  "model_name": "gpt-3.5-turbo-0613",
188
  "token_limit": 4096,
189
+ "placeholder": {
190
+ "logo": "file=web_assets/model_logos/openai-green.webp",
191
+ "slogan": i18n("gpt_default_slogan"),
192
+ }
193
  },
194
  "GPT3.5 Turbo 1106": {
195
  "model_name": "gpt-3.5-turbo-1106",
196
  "token_limit": 16384,
197
+ "placeholder": {
198
+ "logo": "file=web_assets/model_logos/openai-green.webp",
199
+ "slogan": i18n("gpt_default_slogan"),
200
+ }
201
  },
202
  "GPT4": {
203
  "model_name": "gpt-4",
204
  "description": "gpt4_description",
205
  "token_limit": 8192,
206
+ "placeholder": {
207
+ "logo": "file=web_assets/model_logos/openai-black.webp",
208
+ "slogan": i18n("gpt_default_slogan"),
209
+ }
210
  },
211
  "GPT4 32K": {
212
  "model_name": "gpt-4-32k",
213
  "description": "gpt4_32k_description",
214
  "token_limit": 32768,
215
+ "placeholder": {
216
+ "logo": "file=web_assets/model_logos/openai-black.webp",
217
+ "slogan": i18n("gpt_default_slogan"),
218
+ }
219
  },
220
  "GPT4 Turbo": {
221
  "model_name": "gpt-4-turbo",
222
  "description": "gpt4turbo_description",
223
  "token_limit": 128000,
224
+ "multimodal": True,
225
+ "placeholder": {
226
+ "logo": "file=web_assets/model_logos/openai-black.webp",
227
+ "slogan": i18n("gpt_default_slogan"),
228
+ }
229
  },
230
  "Claude 3 Haiku": {
231
  "model_name": "claude-3-haiku-20240307",
232
  "description": "claude3_haiku_description",
233
  "token_limit": 200000,
234
  "max_generation": 4096,
235
+ "multimodal": True,
236
+ "placeholder": {
237
+ "logo": "file=web_assets/model_logos/claude-3.jpg",
238
+ "slogan": i18n("claude_default_slogan"),
239
+ }
240
  },
241
  "Claude 3 Sonnet": {
242
  "model_name": "claude-3-sonnet-20240229",
243
  "description": "claude3_sonnet_description",
244
  "token_limit": 200000,
245
  "max_generation": 4096,
246
+ "multimodal": True,
247
+ "placeholder": {
248
+ "logo": "file=web_assets/model_logos/claude-3.jpg",
249
+ "slogan": i18n("claude_default_slogan"),
250
+ }
251
  },
252
  "Claude 3 Opus": {
253
  "model_name": "claude-3-opus-20240229",
254
  "description": "claude3_opus_description",
255
  "token_limit": 200000,
256
  "max_generation": 4096,
257
+ "multimodal": True,
258
+ "placeholder": {
259
+ "logo": "file=web_assets/model_logos/claude-3.jpg",
260
+ "slogan": i18n("claude_default_slogan"),
261
+ }
262
  },
263
  "川虎助理": {
264
  "model_name": "川虎助理",
265
+ "description": "类似 AutoGPT,全自动解决你的问题",
266
+ "placeholder": {
267
+ "logo": "file=web_assets/icon/any-icon-512.png",
268
+ "logo_rounded": "false",
269
+ "slogan": "川虎今天能帮你做些什么?",
270
+ "question_1": "今天杭州天气如何?",
271
+ "question_2": "最近 Apple 发布了什么新品?",
272
+ "question_3": "现在显卡的价格如何?",
273
+ "question_4": "TikTok 上有什么新梗?",
274
+ }
275
+ },
276
+ "川虎助理 Pro": {
277
+ "model_name": "川虎助理 Pro",
278
+ "description": "类似 AutoGPT,全自动解决你的问题",
279
  "placeholder": {
280
  "logo": "file=web_assets/icon/any-icon-512.png",
281
  "logo_rounded": "false",
282
+ "slogan": "川虎Pro今天能帮你做些什么?",
283
+ "question_1": "今天杭州天气如何?",
284
+ "question_2": "最近 Apple 发布了什么新品?",
285
+ "question_3": "现在显卡的价格如何?",
286
+ "question_4": "TikTok 上有什么新梗?",
287
  }
288
  },
 
289
  "DALL-E 3": {"model_name": "dall-e-3"},
290
  "ERNIE-Bot-turbo": {
291
  "model_name": "ERNIE-Bot-turbo",
 
302
  "Gemini Pro": {
303
  "model_name": "gemini-pro",
304
  "token_limit": 30720,
305
+ "placeholder": {
306
+ "logo": "file=web_assets/model_logos/gemini.svg",
307
+ "slogan": i18n("gpt_default_slogan"),
308
+ }
309
  },
310
  "Gemini Pro Vision": {
311
  "model_name": "gemini-pro-vision",
312
  "token_limit": 30720,
313
+ "placeholder": {
314
+ "logo": "file=web_assets/model_logos/gemini.svg",
315
+ "slogan": i18n("gpt_default_slogan"),
316
+ }
317
  },
318
  "Ollama": {
319
  "model_name": "ollama",
modules/utils.py CHANGED
@@ -1422,8 +1422,8 @@ def reboot_chuanhu():
1422
  from .models.base_model import BaseLLMModel
1423
  def setPlaceholder(model_name: str | None = "", model: BaseLLMModel | None = None):
1424
  from .webui import get_html
1425
- logo_class, slogon_class, question_class = "", "", ""
1426
- model_logo, model_logo_round, model_slogon, model_question_1, model_question_2, model_question_3, model_question_4 = "", "", "", "", "", "", ""
1427
 
1428
  if model is None:
1429
  try:
@@ -1435,9 +1435,9 @@ def setPlaceholder(model_name: str | None = "", model: BaseLLMModel | None = Non
1435
  except:
1436
  pass
1437
  try:
1438
- model_slogon = i18n(MODEL_METADATA[model_name]["placeholder"]["slogon"])
1439
  except:
1440
- slogon_class = "hideK"
1441
  try:
1442
  model_question_1 = i18n(MODEL_METADATA[model_name]["placeholder"]["question_1"])
1443
  model_question_2 = i18n(MODEL_METADATA[model_name]["placeholder"]["question_2"])
@@ -1455,9 +1455,9 @@ def setPlaceholder(model_name: str | None = "", model: BaseLLMModel | None = Non
1455
  except:
1456
  pass
1457
  try:
1458
- model_slogon = i18n(model.placeholder["slogon"])
1459
  except:
1460
- slogon_class = "hideK"
1461
  try:
1462
  model_question_1 = i18n(model.placeholder["question_1"])
1463
  model_question_2 = i18n(model.placeholder["question_2"])
@@ -1466,7 +1466,7 @@ def setPlaceholder(model_name: str | None = "", model: BaseLLMModel | None = Non
1466
  except:
1467
  question_class = "hideK"
1468
 
1469
- if logo_class == "hideK" and slogon_class == "hideK" and question_class == "hideK":
1470
  return ""
1471
  else:
1472
  # 除非明确指定为 squared 或 false 等,否则默认为圆角
@@ -1474,13 +1474,13 @@ def setPlaceholder(model_name: str | None = "", model: BaseLLMModel | None = Non
1474
  logo_class += " rounded"
1475
  return get_html("chatbot_placeholder.html").format(
1476
  chatbot_ph_logo = model_logo,
1477
- chatbot_ph_slogon = model_slogon,
1478
  chatbot_ph_question_1 = model_question_1,
1479
  chatbot_ph_question_2 = model_question_2,
1480
  chatbot_ph_question_3 = model_question_3,
1481
  chatbot_ph_question_4 = model_question_4,
1482
  chatbot_ph_logo_class = logo_class,
1483
- chatbot_ph_slogon_class = slogon_class,
1484
  chatbot_ph_question_class = question_class
1485
  )
1486
 
 
1422
  from .models.base_model import BaseLLMModel
1423
  def setPlaceholder(model_name: str | None = "", model: BaseLLMModel | None = None):
1424
  from .webui import get_html
1425
+ logo_class, slogan_class, question_class = "", "", ""
1426
+ model_logo, model_logo_round, model_slogan, model_question_1, model_question_2, model_question_3, model_question_4 = "", "", "", "", "", "", ""
1427
 
1428
  if model is None:
1429
  try:
 
1435
  except:
1436
  pass
1437
  try:
1438
+ model_slogan = i18n(MODEL_METADATA[model_name]["placeholder"]["slogan"])
1439
  except:
1440
+ slogan_class = "hideK"
1441
  try:
1442
  model_question_1 = i18n(MODEL_METADATA[model_name]["placeholder"]["question_1"])
1443
  model_question_2 = i18n(MODEL_METADATA[model_name]["placeholder"]["question_2"])
 
1455
  except:
1456
  pass
1457
  try:
1458
+ model_slogan = i18n(model.placeholder["slogan"])
1459
  except:
1460
+ slogan_class = "hideK"
1461
  try:
1462
  model_question_1 = i18n(model.placeholder["question_1"])
1463
  model_question_2 = i18n(model.placeholder["question_2"])
 
1466
  except:
1467
  question_class = "hideK"
1468
 
1469
+ if logo_class == "hideK" and slogan_class == "hideK" and question_class == "hideK":
1470
  return ""
1471
  else:
1472
  # 除非明确指定为 squared 或 false 等,否则默认为圆角
 
1474
  logo_class += " rounded"
1475
  return get_html("chatbot_placeholder.html").format(
1476
  chatbot_ph_logo = model_logo,
1477
+ chatbot_ph_slogan = model_slogan,
1478
  chatbot_ph_question_1 = model_question_1,
1479
  chatbot_ph_question_2 = model_question_2,
1480
  chatbot_ph_question_3 = model_question_3,
1481
  chatbot_ph_question_4 = model_question_4,
1482
  chatbot_ph_logo_class = logo_class,
1483
+ chatbot_ph_slogan_class = slogan_class,
1484
  chatbot_ph_question_class = question_class
1485
  )
1486
 
web_assets/html/chatbot_placeholder.html CHANGED
@@ -1,7 +1,7 @@
1
  <div id="chatbot-placeholder-pl">
2
  <div id="chatbot-placeholder-header">
3
  <img src="{chatbot_ph_logo}" alt="avatar" class="{chatbot_ph_logo_class}" />
4
- <h1 class="{chatbot_ph_slogon_class}">{chatbot_ph_slogon}</h1>
5
  </div>
6
 
7
  <div id="chatbot-placeholder-options" class="{chatbot_ph_question_class}">
 
1
  <div id="chatbot-placeholder-pl">
2
  <div id="chatbot-placeholder-header">
3
  <img src="{chatbot_ph_logo}" alt="avatar" class="{chatbot_ph_logo_class}" />
4
+ <h1 class="{chatbot_ph_slogan_class}">{chatbot_ph_slogan}</h1>
5
  </div>
6
 
7
  <div id="chatbot-placeholder-options" class="{chatbot_ph_question_class}">
web_assets/model_logos/claude-3.jpg ADDED
web_assets/model_logos/gemini.svg ADDED
web_assets/model_logos/meta.webp ADDED
web_assets/model_logos/openai-black.webp ADDED
web_assets/model_logos/openai-green.webp ADDED
web_assets/stylesheet/chatbot.css CHANGED
@@ -414,13 +414,17 @@ img.avatar-image {
414
  display: block;
415
  }
416
  #chatbot-placeholder-header img {
417
- width: 100px;
418
- height: 100px;
419
  margin: 20px auto;
 
420
  }
421
  .rounded {
422
  border-radius: 50%;
423
  }
 
 
 
424
 
425
  #chatbot-placeholder-header h1 {
426
  font-size: 1.5em;
@@ -448,6 +452,9 @@ img.avatar-image {
448
  flex: 0 1 320px;
449
  opacity: 0.65;
450
  height: 48px;
 
 
 
451
  }
452
 
453
  #chatbot-placeholder-options button:hover {
 
414
  display: block;
415
  }
416
  #chatbot-placeholder-header img {
417
+ width: 72px;
418
+ height: 72px;
419
  margin: 20px auto;
420
+ border-radius: 8px;
421
  }
422
  .rounded {
423
  border-radius: 50%;
424
  }
425
+ #chatbot-placeholder-header img.rounded {
426
+ border-radius: 50% !important;
427
+ }
428
 
429
  #chatbot-placeholder-header h1 {
430
  font-size: 1.5em;
 
452
  flex: 0 1 320px;
453
  opacity: 0.65;
454
  height: 48px;
455
+ text-overflow: ellipsis;
456
+ overflow: hidden;
457
+ white-space: nowrap;
458
  }
459
 
460
  #chatbot-placeholder-options button:hover {