Spaces:
Running
on
Zero
Running
on
Zero
Kendamarron
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import gradio as gr
|
|
7 |
from gradio import FileData
|
8 |
import time
|
9 |
import spaces
|
10 |
-
ckpt = "
|
11 |
model = MllamaForConditionalGeneration.from_pretrained(ckpt,
|
12 |
torch_dtype=torch.bfloat16).to("cuda")
|
13 |
processor = AutoProcessor.from_pretrained(ckpt)
|
@@ -71,15 +71,15 @@ def bot_streaming(message, history, max_new_tokens=250):
|
|
71 |
|
72 |
|
73 |
demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
|
74 |
-
[{"text": "
|
75 |
200],
|
76 |
-
[{"text": "
|
77 |
250],
|
78 |
-
[{"text": "
|
79 |
250],
|
80 |
-
[{"text": "
|
81 |
250],
|
82 |
-
[{"text": "
|
83 |
250],
|
84 |
],
|
85 |
textbox=gr.MultimodalTextbox(),
|
@@ -92,7 +92,7 @@ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
|
|
92 |
)
|
93 |
],
|
94 |
cache_examples=False,
|
95 |
-
description="
|
96 |
stop_btn="Stop Generation",
|
97 |
fill_height=True,
|
98 |
multimodal=True)
|
|
|
7 |
from gradio import FileData
|
8 |
import time
|
9 |
import spaces
|
10 |
+
ckpt = "Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge"
|
11 |
model = MllamaForConditionalGeneration.from_pretrained(ckpt,
|
12 |
torch_dtype=torch.bfloat16).to("cuda")
|
13 |
processor = AutoProcessor.from_pretrained(ckpt)
|
|
|
71 |
|
72 |
|
73 |
demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
|
74 |
+
[{"text": "これはどの時代のものですか?時代について詳しく教えてください。", "files":["./examples/rococo.jpg"]},
|
75 |
200],
|
76 |
+
[{"text": "この図によると、干ばつはどこで起こるのでしょうか?", "files":["./examples/weather_events.png"]},
|
77 |
250],
|
78 |
+
[{"text": "このチェーンから白猫を外すとどうなるのか?", "files":["./examples/ai2d_test.jpg"]},
|
79 |
250],
|
80 |
+
[{"text": "請求書発行日から支払期日までの期間は?短く簡潔に。", "files":["./examples/invoice.png"]},
|
81 |
250],
|
82 |
+
[{"text": "このモニュメントはどこにありますか?また、その周辺でお勧めの場所を教えてください。", "files":["./examples/wat_arun.jpg"]},
|
83 |
250],
|
84 |
],
|
85 |
textbox=gr.MultimodalTextbox(),
|
|
|
92 |
)
|
93 |
],
|
94 |
cache_examples=False,
|
95 |
+
description="[Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge](https://huggingface.co/Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge)のデモ",
|
96 |
stop_btn="Stop Generation",
|
97 |
fill_height=True,
|
98 |
multimodal=True)
|