Kendamarron commited on
Commit
817d69b
·
verified ·
1 Parent(s): 11ec7bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -7,7 +7,7 @@ import gradio as gr
7
  from gradio import FileData
8
  import time
9
  import spaces
10
- ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
@@ -71,15 +71,15 @@ def bot_streaming(message, history, max_new_tokens=250):
71
 
72
 
73
  demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
74
- [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
75
  200],
76
- [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
77
  250],
78
- [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
79
  250],
80
- [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
81
  250],
82
- [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
83
  250],
84
  ],
85
  textbox=gr.MultimodalTextbox(),
@@ -92,7 +92,7 @@ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
92
  )
93
  ],
94
  cache_examples=False,
95
- description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)
 
7
  from gradio import FileData
8
  import time
9
  import spaces
10
+ ckpt = "Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
 
71
 
72
 
73
  demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
74
+ [{"text": "これはどの時代のものですか?時代について詳しく教えてください。", "files":["./examples/rococo.jpg"]},
75
  200],
76
+ [{"text": "この図によると、干ばつはどこで起こるのでしょうか?", "files":["./examples/weather_events.png"]},
77
  250],
78
+ [{"text": "このチェーンから白猫を外すとどうなるのか?", "files":["./examples/ai2d_test.jpg"]},
79
  250],
80
+ [{"text": "請求書発行日から支払期日までの期間は?短く簡潔に。", "files":["./examples/invoice.png"]},
81
  250],
82
+ [{"text": "このモニュメントはどこにありますか?また、その周辺でお勧めの場所を教えてください。", "files":["./examples/wat_arun.jpg"]},
83
  250],
84
  ],
85
  textbox=gr.MultimodalTextbox(),
 
92
  )
93
  ],
94
  cache_examples=False,
95
+ description="[Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge](https://huggingface.co/Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge)のデモ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)