Spaces:
Running
Running
import gradio as gr | |
from collections.abc import Generator | |
from openai import OpenAI | |
from gradio.chat_interface import ChatInterface | |
from pathlib import Path | |
import shutil | |
import os | |
from fastapi import FastAPI | |
from fastapi.staticfiles import StaticFiles | |
USERNAME = "ahmedheakl" | |
SPACE_NAME = "arabic-vlm-app" | |
TITLE = "AIN Arabic VLM" | |
DESCRIPTION = "Welcome to the AIN Arabic VLM chatbot. The best Arabic-English VLM developed by MBZUAI." | |
PUBLIC_DIR = Path("static") | |
TOP_N_HISTORY = 2 | |
LOGO_PATH = "./logo.jpeg" | |
os.makedirs(PUBLIC_DIR, exist_ok=True) | |
app = FastAPI() | |
app.mount("/static", StaticFiles(directory=PUBLIC_DIR), name="static") | |
# move the logo to public directory | |
shutil.copy(LOGO_PATH, PUBLIC_DIR / Path(LOGO_PATH).name) | |
logo_path = f"/static/{Path(LOGO_PATH).name}" | |
def load_chat( | |
base_url: str, | |
model: str, | |
token: str | None = None, | |
*, | |
system_message: str | None = None, | |
**kwargs, | |
) -> gr.ChatInterface: | |
client = OpenAI(api_key=token, base_url=base_url) | |
start_message = ( | |
[{"role": "system", "content": system_message}] if system_message else [] | |
) | |
def open_api_stream( | |
message: str, history: list | None | |
) -> Generator[str, None, None]: | |
history = history or start_message | |
if len(history) > 0 and isinstance(history[0], (list, tuple)): | |
history = history[:TOP_N_HISTORY] | |
history = ChatInterface._tuples_to_messages(history) | |
files = message.get('files', []) | |
content = [ | |
{"type": "text", "text": message.get('text', '')} | |
] | |
if files: | |
src_path = Path(files[0]) | |
dest_path = PUBLIC_DIR / src_path.name | |
shutil.move(src_path, dest_path) | |
image_url = f"https://{USERNAME}-{SPACE_NAME}.hf.space/static/{src_path.name}" | |
content.append({"type": "image_url", "image_url": {"url": image_url}}) | |
stream = client.chat.completions.create( | |
model=model, | |
messages=history + [{"role": "user", "content": content}], | |
stream=True, | |
) | |
response = "" | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
response += chunk.choices[0].delta.content | |
yield response | |
return ChatInterface( | |
open_api_stream, type="messages", **kwargs | |
) | |
with gr.Blocks(theme=gr.themes.Soft()) as gradio_interface: | |
# Add CSS for better styling | |
gr.Markdown( | |
""" | |
<style> | |
.container { margin: 0 auto; max-width: 1200px; padding: 20px; } | |
.header { text-align: center; margin-bottom: 40px; } | |
</style> | |
""" | |
) | |
# chatbot = gr.Chatbot() | |
# textbox = gr.MultimodalTextbox(file_count="single", file_types=["image"], sources=["upload"]) | |
load_chat( | |
"https://0f21-5-195-0-150.ngrok-free.app/v1", | |
model="test", | |
token="ollama", | |
multimodal=True, | |
# chatbot=chatbot, | |
# textbox=textbox, | |
).launch() | |
app = gr.mount_gradio_app(app, gradio_interface, path="/") | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |