Spaces:
Sleeping
Sleeping
dmayboroda
commited on
Commit
·
f5527e2
1
Parent(s):
6c0c633
space code
Browse files- app.py +60 -0
- requirements.txt +9 -0
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import clip
|
4 |
+
import transformers
|
5 |
+
import numpy as np
|
6 |
+
import gradio as gr
|
7 |
+
from PIL import Image
|
8 |
+
from multilingual_clip import pt_multilingual_clip
|
9 |
+
from torch.utils.data import DataLoader
|
10 |
+
from datasets import load_dataset
|
11 |
+
from usearch.index import Index
|
12 |
+
|
13 |
+
dataset = load_dataset("dmayboroda/sk-test_1")
|
14 |
+
|
15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
clipmodel, preprocess = clip.load("ViT-L/14", device=device)
|
17 |
+
|
18 |
+
model_name = 'M-CLIP/LABSE-Vit-L-14'
|
19 |
+
model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_name)
|
20 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
21 |
+
model.to(device)
|
22 |
+
|
23 |
+
index = Index(ndim=768, metric='cos', dtype='f32')
|
24 |
+
|
25 |
+
img_embeddings = []
|
26 |
+
emb_to_img = {}
|
27 |
+
print('Encoding images...')
|
28 |
+
for img in dataset['train']:
|
29 |
+
image = preprocess(img['image']).unsqueeze(0).to(device)
|
30 |
+
with torch.no_grad():
|
31 |
+
image_features = clipmodel.encode_image(image)
|
32 |
+
img_embeddings.append(image_features)
|
33 |
+
emb_to_img[image_features] = img['image']
|
34 |
+
|
35 |
+
for i in range(0, len(img_embeddings)):
|
36 |
+
index.add(i, img_embeddings[i].squeeze(0).cpu().detach().numpy())
|
37 |
+
|
38 |
+
def get_similar(text, num_sim):
|
39 |
+
tokens = clip.tokenize(text).to(device)
|
40 |
+
text_features = clipmodel.encode_text(tokens)
|
41 |
+
search = text_features.squeeze(0).cpu().detach().numpy()
|
42 |
+
matches = index.search(search, num_sim)
|
43 |
+
similar = []
|
44 |
+
for match in matches:
|
45 |
+
key = match.key.item()
|
46 |
+
emb = img_embeddings[key]
|
47 |
+
similar.append(emb_to_img[emb])
|
48 |
+
return similar
|
49 |
+
|
50 |
+
iface = gr.Interface(
|
51 |
+
fn=get_similar,
|
52 |
+
inputs=[
|
53 |
+
gr.inputs.Textbox(lines=2, placeholder="Enter Text Here..."),
|
54 |
+
gr.inputs.Number(label="Number of Images", default=15)
|
55 |
+
],
|
56 |
+
outputs="image",
|
57 |
+
title="Model Testing"
|
58 |
+
)
|
59 |
+
|
60 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
usearch
|
3 |
+
multilingual-clip
|
4 |
+
git+https://github.com/openai/CLIP.git
|
5 |
+
Pillow
|
6 |
+
transformers
|
7 |
+
numpy
|
8 |
+
datasets
|
9 |
+
gradio
|