gallKro / app.py
jawwad1234's picture
Update app.py
9af7cce verified
raw
history blame contribute delete
911 Bytes
import torch
import gradio as gr
from transformers import AutoProcessor, AutoModelForImageTextToText
# Load Model and Processor
MODEL_NAME = "Qwen/Qwen2.5-VL-72B-Instruct"
processor = AutoProcessor.from_pretrained(MODEL_NAME)
model = AutoModelForImageTextToText.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
# Function to handle text inputs
def chatbot(message):
inputs = processor([{"role": "user", "content": message}], return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()} # Move inputs to the correct device
outputs = model.generate(**inputs, max_length=512)
response = processor.batch_decode(outputs, skip_special_tokens=True)[0]
return response
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# 🤖 AI Chatbot using Qwen 2.5-VL-72B-Instruct")
chat = gr.ChatInterface(fn=chatbot)
# Launch Gradio App
demo.launch()