update app.py
Browse files- app.py +103 -14
- requirements.txt +3 -1
- samples/augreg_base/{a_2.png → hummingbird.png} +0 -0
- samples/augreg_base/{a_3.png → hummingbird2.png} +0 -0
- samples/augreg_base/{3_in.png → lizard.png} +0 -0
- samples/augreg_base/{2_in.png → storck.png} +0 -0
- samples/augreg_base/{1_in.png → sundial.png} +0 -0
- samples/augreg_base/{a.png → tank.png} +0 -0
app.py
CHANGED
@@ -1,23 +1,112 @@
|
|
1 |
import torch
|
2 |
import timm
|
3 |
import gradio as gr
|
4 |
-
|
5 |
-
|
6 |
from ViT.ViT_new import vit_base_patch16_224 as vit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
model_finetuned.eval()
|
14 |
|
15 |
-
|
16 |
-
)
|
17 |
-
""
|
18 |
|
19 |
def image_classifier(inp):
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import timm
|
3 |
import gradio as gr
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
+
import os
|
6 |
from ViT.ViT_new import vit_base_patch16_224 as vit
|
7 |
+
import torchvision.transforms as transforms
|
8 |
+
import requests
|
9 |
+
from PIL import Image
|
10 |
+
import numpy as np
|
11 |
+
import cv2
|
12 |
+
|
13 |
+
|
14 |
+
# create heatmap from mask on image
|
15 |
+
def show_cam_on_image(img, mask):
|
16 |
+
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
|
17 |
+
heatmap = np.float32(heatmap) / 255
|
18 |
+
cam = heatmap + np.float32(img)
|
19 |
+
cam = cam / np.max(cam)
|
20 |
+
return cam
|
21 |
+
|
22 |
+
start_layer = 0
|
23 |
+
|
24 |
+
# rule 5 from paper
|
25 |
+
def avg_heads(cam, grad):
|
26 |
+
cam = cam.reshape(-1, cam.shape[-2], cam.shape[-1])
|
27 |
+
grad = grad.reshape(-1, grad.shape[-2], grad.shape[-1])
|
28 |
+
cam = grad * cam
|
29 |
+
cam = cam.clamp(min=0).mean(dim=0)
|
30 |
+
return cam
|
31 |
+
|
32 |
+
# rule 6 from paper
|
33 |
+
def apply_self_attention_rules(R_ss, cam_ss):
|
34 |
+
R_ss_addition = torch.matmul(cam_ss, R_ss)
|
35 |
+
return R_ss_addition
|
36 |
+
|
37 |
+
def generate_relevance(model, input, index=None):
|
38 |
+
output = model(input, register_hook=True)
|
39 |
+
if index == None:
|
40 |
+
index = np.argmax(output.cpu().data.numpy(), axis=-1)
|
41 |
+
|
42 |
+
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
|
43 |
+
one_hot[0, index] = 1
|
44 |
+
one_hot_vector = one_hot
|
45 |
+
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
|
46 |
+
one_hot = torch.sum(one_hot * output)
|
47 |
+
model.zero_grad()
|
48 |
+
one_hot.backward(retain_graph=True)
|
49 |
+
|
50 |
+
num_tokens = model.blocks[0].attn.get_attention_map().shape[-1]
|
51 |
+
R = torch.eye(num_tokens, num_tokens)
|
52 |
+
for i,blk in enumerate(model.blocks):
|
53 |
+
if i < start_layer:
|
54 |
+
continue
|
55 |
+
grad = blk.attn.get_attn_gradients()
|
56 |
+
cam = blk.attn.get_attention_map()
|
57 |
+
cam = avg_heads(cam, grad)
|
58 |
+
R += apply_self_attention_rules(R, cam)
|
59 |
+
return R[0, 1:]
|
60 |
+
|
61 |
+
def generate_visualization(model, original_image, class_index=None):
|
62 |
+
with torch.enable_grad():
|
63 |
+
transformer_attribution = generate_relevance(model, original_image.unsqueeze(0), index=class_index).detach()
|
64 |
+
transformer_attribution = transformer_attribution.reshape(1, 1, 14, 14)
|
65 |
+
transformer_attribution = torch.nn.functional.interpolate(transformer_attribution, scale_factor=16, mode='bilinear')
|
66 |
+
transformer_attribution = transformer_attribution.reshape(224, 224).data.cpu().numpy()
|
67 |
+
transformer_attribution = (transformer_attribution - transformer_attribution.min()) / (transformer_attribution.max() - transformer_attribution.min())
|
68 |
+
|
69 |
+
image_transformer_attribution = original_image.permute(1, 2, 0).data.cpu().numpy()
|
70 |
+
image_transformer_attribution = (image_transformer_attribution - image_transformer_attribution.min()) / (image_transformer_attribution.max() - image_transformer_attribution.min())
|
71 |
+
vis = show_cam_on_image(image_transformer_attribution, transformer_attribution)
|
72 |
+
vis = np.uint8(255 * vis)
|
73 |
+
vis = cv2.cvtColor(np.array(vis), cv2.COLOR_RGB2BGR)
|
74 |
+
return vis
|
75 |
+
|
76 |
+
model_finetuned = None
|
77 |
|
78 |
+
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
79 |
+
transform_224 = transforms.Compose([
|
80 |
+
transforms.ToTensor(),
|
81 |
+
normalize,
|
82 |
+
])
|
|
|
83 |
|
84 |
+
# Download human-readable labels for ImageNet.
|
85 |
+
response = requests.get("https://git.io/JJkYN")
|
86 |
+
labels = response.text.split("\n")
|
87 |
|
88 |
def image_classifier(inp):
|
89 |
+
image = transform_224(inp)
|
90 |
+
print(image.shape)
|
91 |
+
#return model_finetuned(image.unsqueeze(0))
|
92 |
+
with torch.no_grad():
|
93 |
+
prediction = torch.nn.functional.softmax(model_finetuned(image.unsqueeze(0))[0], dim=0)
|
94 |
+
confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
|
95 |
+
heatmap = generate_visualization(model_finetuned, image)
|
96 |
+
return confidences, heatmap
|
97 |
|
98 |
+
def _load_model(model_name: str):
|
99 |
+
global model_finetuned
|
100 |
+
path = hf_hub_download('Hila/RobustViT',
|
101 |
+
f'{model_name}')
|
102 |
+
|
103 |
+
model = vit(pretrained=True)
|
104 |
+
model.eval()
|
105 |
+
model_finetuned = vit()
|
106 |
+
checkpoint = torch.load(path, map_location='cpu')
|
107 |
+
model_finetuned.load_state_dict(checkpoint['state_dict'])
|
108 |
+
model_finetuned.eval()
|
109 |
+
|
110 |
+
_load_model('ar_base.tar')
|
111 |
+
demo = gr.Interface(image_classifier, gr.inputs.Image(shape=(224,224)), [gr.outputs.Label(num_top_classes=3), gr.Image(shape=(224,224))],examples=["samples/augreg_base/tank.png", "samples/augreg_base/sundial.png", "samples/augreg_base/lizard.png", "samples/augreg_base/storck.png", "samples/augreg_base/hummingbird2.png", "samples/augreg_base/hummingbird.png"], capture_session=True)
|
112 |
+
demo.launch(debug=True)
|
requirements.txt
CHANGED
@@ -1,2 +1,4 @@
|
|
1 |
torch==1.7.1
|
2 |
-
timm
|
|
|
|
|
|
1 |
torch==1.7.1
|
2 |
+
timm
|
3 |
+
torchvision==0.8.2
|
4 |
+
opencv-python
|
samples/augreg_base/{a_2.png → hummingbird.png}
RENAMED
File without changes
|
samples/augreg_base/{a_3.png → hummingbird2.png}
RENAMED
File without changes
|
samples/augreg_base/{3_in.png → lizard.png}
RENAMED
File without changes
|
samples/augreg_base/{2_in.png → storck.png}
RENAMED
File without changes
|
samples/augreg_base/{1_in.png → sundial.png}
RENAMED
File without changes
|
samples/augreg_base/{a.png → tank.png}
RENAMED
File without changes
|