Spaces:
Runtime error
Runtime error
Jason Wu
commited on
Commit
•
aac647d
1
Parent(s):
31fe217
add initial files
Browse files- app.py +32 -0
- res/example.jpg +0 -0
- res/example_pair1.jpg +0 -0
- res/example_pair2.jpg +0 -0
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
import json
|
4 |
+
from torchvision import transforms
|
5 |
+
import torch.nn.functional as F
|
6 |
+
|
7 |
+
TORCHSCRIPT_PATH = "res/screensim-resnet-uda+web350k.torchscript"
|
8 |
+
IMG_SIZE = (256, 128)
|
9 |
+
|
10 |
+
model = torch.jit.load(TORCHSCRIPT_PATH)
|
11 |
+
|
12 |
+
img_transforms = transforms.Compose([
|
13 |
+
transforms.Resize(IMG_SIZE),
|
14 |
+
transforms.ToTensor(),
|
15 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
16 |
+
])
|
17 |
+
|
18 |
+
def predict(img1, img2, thresh=0.35):
|
19 |
+
img_input1 = img_transforms(img1).unsqueeze(0)
|
20 |
+
img_input2 = img_transforms(img2).unsqueeze(0)
|
21 |
+
diff = torch.linalg.norm(model(img_input1) - model(img_input2))
|
22 |
+
|
23 |
+
return "{:.3f}".format(diff), "same screen" if float(diff) < thresh else "different screens"
|
24 |
+
|
25 |
+
example_imgs = [
|
26 |
+
["res/example_pair1.jpg", "res/example_pair2.jpg", 0.35],
|
27 |
+
["res/example_pair1.jpg", "res/example.jpg", 0.35]
|
28 |
+
]
|
29 |
+
|
30 |
+
interface = gr.Interface(fn=predict, inputs=[gr.Image(type="pil"), gr.Image(type="pil"), gr.Slider(0.2, 0.5, step=0.05, value=0.35)], outputs=["text", "text"], examples=example_imgs)
|
31 |
+
|
32 |
+
interface.launch()
|
res/example.jpg
ADDED
res/example_pair1.jpg
ADDED
res/example_pair2.jpg
ADDED