Spaces:
Sleeping
Sleeping
Initial upload of the application and requirements files
Browse files- app.py +38 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import CLIPProcessor, CLIPModel
|
3 |
+
|
4 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
5 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
6 |
+
|
7 |
+
|
8 |
+
def inference(input_img, captions):
|
9 |
+
captions_list = captions.split(",")
|
10 |
+
inputs = processor(text=captions_list, images=input_img, return_tensors="pt", padding=True)
|
11 |
+
outputs = model(**inputs)
|
12 |
+
# this is the image-text similarity score
|
13 |
+
logits_per_image = outputs.logits_per_image
|
14 |
+
probs = logits_per_image.softmax(dim=1)
|
15 |
+
probabilities_percentages = ', '.join(['{:.2f}%'.format(prob.item() * 100) for prob in probs[0]])
|
16 |
+
return probabilities_percentages
|
17 |
+
|
18 |
+
title = "CLIP Inference: Application using a pretrained CLIP model"
|
19 |
+
description = "An application using Gradio interface that accepts an image and some captions, and displays a probability score with which each caption describes the image "
|
20 |
+
|
21 |
+
examples = [["12863.jpg","photo of water, a photo of pizza, photo of a smiling lady, photo of luggage, photo of bank"],
|
22 |
+
["12659.jpg","person riding bicycle, person driving car, photo of traffic lights, photo of vehicle, photo of light"],
|
23 |
+
["12291.jpg","photo of a cat, a photo of a cat sleeping on keyboard, photo of desktop monitor, photo of typing keyboard, photo of computer mouse, photo of water glass"],
|
24 |
+
["12272.jpg","person playing base ball, person holding bat, photo of a net, photo of audience behind net, photo of currency"],
|
25 |
+
["9309.jpg","a photo of cows, a photo of grass, group of cows grazing grass, photo of electric pole, photo of trees"],
|
26 |
+
["3805.jpg","a photo of water, Zebras drinking water, photo of a bird swimming, photo of grass"],
|
27 |
+
["2788.jpg","a photo of a man dropping pigeon feed, a photo of pigeons, photo of a man feeding pigeons, photo of water, people walking, a photo of old building in the background"]
|
28 |
+
]
|
29 |
+
|
30 |
+
demo = gr.Interface(
|
31 |
+
inference,
|
32 |
+
inputs = [gr.Image(shape=(416, 416), label="Input Image"), gr.Textbox(placeholder="Enter different captions for image, separated by comma")],
|
33 |
+
outputs = [gr.Textbox(label="Probability score of captions")],
|
34 |
+
title = title,
|
35 |
+
description = description,
|
36 |
+
examples = examples,
|
37 |
+
)
|
38 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
pillow
|
4 |
+
numpy
|
5 |
+
gradio
|
6 |
+
pytorch-lightning
|
7 |
+
transformers
|