import gradio as gr from transformers import pipeline get_completion = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base") def captioner(input): output = get_completion(input) return output[0]['generated_text'] gr.close_all() demo = gr.Interface(fn=captioner, inputs=[gr.Image(label="Upload image", type="pil")], outputs=[gr.Textbox(label="Caption")], title="Image Captioning with BLIP model", description="Caption any image using the `Salesforce/blip-image-captioning-base` model ", allow_flagging="never") demo.launch()