shenyunhang commited on
Commit
f9a86ee
·
1 Parent(s): 54658f2

6a0e740d98a0@2025-02-26_11-00-48:

Browse files
Files changed (2) hide show
  1. app.py +2 -1
  2. requirements.txt +3 -3
app.py CHANGED
@@ -977,7 +977,7 @@ def bot(history: list):
977
  with gr.Blocks(title=model_name_or_path.split('/')[-1] + "🔥🚀🔥", theme=gr.themes.Ocean()) as demo:
978
  gr.HTML(html)
979
  with gr.Row():
980
- chatbot = gr.Chatbot(type="messages", elem_id="chatbot", bubble_full_width=False, height=800)
981
 
982
  with gr.Row():
983
  chat_input = gr.MultimodalTextbox(
@@ -998,3 +998,4 @@ with gr.Blocks(title=model_name_or_path.split('/')[-1] + "🔥🚀🔥", theme=g
998
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
999
 
1000
  demo.launch()
 
 
977
  with gr.Blocks(title=model_name_or_path.split('/')[-1] + "🔥🚀🔥", theme=gr.themes.Ocean()) as demo:
978
  gr.HTML(html)
979
  with gr.Row():
980
+ chatbot = gr.Chatbot(type="messages", elem_id="chatbot", bubble_full_width=False, height=600)
981
 
982
  with gr.Row():
983
  chat_input = gr.MultimodalTextbox(
 
998
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
999
 
1000
  demo.launch()
1001
+
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- torch==2.4.0
2
  torchvision
3
  #flash-attn
4
- https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
5
  deepspeed
6
  accelerate
7
  transformers
@@ -12,4 +12,4 @@ pillow
12
  opencv-python
13
  filetype
14
  natsort
15
- gradio
 
1
+ torch
2
  torchvision
3
  #flash-attn
4
+ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
5
  deepspeed
6
  accelerate
7
  transformers
 
12
  opencv-python
13
  filetype
14
  natsort
15
+ gradio