import gradio as gr from PIL import Image import spaces from transformers import pipeline # Initialize Model get_completion = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base", device=0) @spaces.GPU(duration=120) def captioner(input: Image.Image) -> str: """ Generate a caption for the given image using the BLIP-IMAGE-CAPTIONING-BASE model. Args: input (Image.Image): The input image for which to generate a caption. Returns: str: The generated caption text. """ output = get_completion(input) return output[0]['generated_text'] ####### GRADIO APP ####### title = """