Snehil-Shah commited on
Commit
2a4ddda
·
1 Parent(s): c212e6b

Create Gradio Interface

Browse files

Signed-off-by: Snehil Shah <[email protected]>

Files changed (4) hide show
  1. app.py +58 -0
  2. images.ipynb → encoding.ipynb +0 -0
  3. models.txt +0 -3
  4. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from qdrant_client import QdrantClient
3
+ from sentence_transformers import SentenceTransformer
4
+
5
+ model = SentenceTransformer("clip-ViT-B-32")
6
+
7
+ qdrant_client = QdrantClient(
8
+ url = 'url',
9
+ port= 443,
10
+ api_key = "key",
11
+ )
12
+
13
+ def search_images(modality, count, input_text, input_image):
14
+ query = str(input_text) if modality=='Text' else input_image
15
+
16
+ results = qdrant_client.search(
17
+ collection_name = "images",
18
+ query_vector = model.encode(query).tolist(),
19
+ with_payload = True,
20
+ limit = count
21
+ )
22
+
23
+ return [gr.update(value="## Results")]+[gr.update(value=result.payload['url'], visible=True) for result in results]+[gr.update(visible=False)]*(100-count)
24
+
25
+ def input_interface(choice):
26
+ if choice == "Text":
27
+ return [gr.update(visible=True), gr.update(visible=False)]
28
+ else:
29
+ return [gr.update(visible=False), gr.update(visible=True)]
30
+
31
+ with gr.Blocks() as interface:
32
+ gr.Markdown("# Multi-Modal Image Search Engine\nSemantically search over 15k images using text or image inputs. The image data is limited, don't expect to find everything!")
33
+
34
+ # Input Interface
35
+ with gr.Column(variant='compact'):
36
+ input_type = gr.Radio(choices=["Text", "Image"], type="value", label="Modality", value="Text")
37
+ with gr.Column() as text_area:
38
+ text_input = gr.Textbox(label="Text", lines=1, placeholder="Try 'Golden Retriever'")
39
+ with gr.Column(visible=False) as image_uploader:
40
+ image_input = gr.Image(type="pil")
41
+ input_type.change(input_interface, input_type, [text_area, image_uploader])
42
+
43
+ # Search Controls
44
+ with gr.Column(variant="panel"):
45
+ count = gr.Slider(minimum=1, maximum=40, step=1, value=8, label="No. of Results")
46
+ images_btn = gr.Button(value="Search Images", variant="primary")
47
+
48
+ # Output Interface
49
+ images = []
50
+ images.append(gr.Markdown())
51
+ with gr.Column() as output_images:
52
+ for i in range(10):
53
+ with gr.Row():
54
+ for j in range(4):
55
+ images.append(gr.Image(visible=False))
56
+ images_btn.click(search_images, inputs=[input_type, count, text_input, image_input], outputs=images)
57
+
58
+ interface.launch()
images.ipynb → encoding.ipynb RENAMED
The diff for this file is too large to render. See raw diff
 
models.txt DELETED
@@ -1,3 +0,0 @@
1
- torchvision
2
- resnet
3
- OpenAI CLIP
 
 
 
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==4.13.0
2
+ qdrant-client==1.7.0
3
+ sentence-transformers==2.2.2