Spaces:
Runtime error
Runtime error
Commit
·
b059e33
1
Parent(s):
64c2f52
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
from matplotlib.pyplot import imread
|
4 |
+
import numpy as np
|
5 |
+
from tensorflow import keras
|
6 |
+
from keras.layers import Input, Lambda, Dense, Flatten, Rescaling
|
7 |
+
from keras.models import Model
|
8 |
+
from sklearn.metrics import confusion_matrix
|
9 |
+
from sklearn.metrics import ConfusionMatrixDisplay
|
10 |
+
from sklearn.metrics import auc
|
11 |
+
from sklearn.metrics import precision_score
|
12 |
+
from sklearn.metrics import recall_score
|
13 |
+
from sklearn.metrics import classification_report
|
14 |
+
from sklearn.metrics import roc_auc_score
|
15 |
+
from sklearn.metrics import roc_curve
|
16 |
+
from sklearn.metrics import RocCurveDisplay
|
17 |
+
from os import listdir, path
|
18 |
+
from skimage.util import random_noise
|
19 |
+
from skimage import img_as_float
|
20 |
+
import math
|
21 |
+
import cv2
|
22 |
+
import os
|
23 |
+
import PIL
|
24 |
+
import shutil
|
25 |
+
import matplotlib.cm as cm
|
26 |
+
|
27 |
+
|
28 |
+
#function for creating model
|
29 |
+
#returns model, its inputs, Xception's last conv output, the whole model's outputs
|
30 |
+
def create_model_mod():
|
31 |
+
inputs = keras.Input(shape = (160,160,3))
|
32 |
+
#normalizing pixel values
|
33 |
+
r = Rescaling(scale = 1./255)(inputs)
|
34 |
+
x = base_model(r, training = False)
|
35 |
+
gap = keras.layers.GlobalAveragePooling2D()(x)
|
36 |
+
outputs = keras.layers.Dense(1,activation = 'linear')(gap)
|
37 |
+
model = keras.Model(inputs, outputs)
|
38 |
+
|
39 |
+
model.compile(
|
40 |
+
loss = keras.losses.BinaryCrossentropy(from_logits = True),
|
41 |
+
optimizer = keras.optimizers.Adam(0.001),
|
42 |
+
metrics = ["accuracy"]
|
43 |
+
)
|
44 |
+
|
45 |
+
return model, inputs, x, outputs
|
46 |
+
|
47 |
+
def create_heatmap(model, imgs):
|
48 |
+
#predicting the images and getting the conv outputs and predictions
|
49 |
+
with tf.GradientTape() as tape:
|
50 |
+
maps, preds = model(imgs);
|
51 |
+
|
52 |
+
#computing gradients of predictions w.r.t the feature maps
|
53 |
+
grads = tape.gradient(preds, maps)
|
54 |
+
|
55 |
+
# global average pooling of each feature map
|
56 |
+
gap_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
|
57 |
+
|
58 |
+
#multiplying each pooled value with its correponding feature map
|
59 |
+
# maps = maps[0]
|
60 |
+
heatmap = maps @ gap_grads[..., tf.newaxis]
|
61 |
+
|
62 |
+
#removing the extra dimension of value 1
|
63 |
+
heatmap = tf.squeeze(heatmap)
|
64 |
+
|
65 |
+
#applying relu activation
|
66 |
+
heatmap = tf.keras.activations.relu(heatmap)
|
67 |
+
|
68 |
+
return heatmap, preds.numpy()
|
69 |
+
|
70 |
+
def superimpose_single(heatmap, img, alpha = 0.4):
|
71 |
+
heatmap = np.uint8(255 * heatmap)
|
72 |
+
|
73 |
+
# Use jet colormap to colorize heatmap
|
74 |
+
jet = cm.get_cmap("jet")
|
75 |
+
|
76 |
+
# Use RGB values of the colormap
|
77 |
+
jet_colors = jet(np.arange(256))[:, :3]
|
78 |
+
jet_heatmap = jet_colors[heatmap]
|
79 |
+
|
80 |
+
# Create an image with RGB colorized heatmap
|
81 |
+
jet_heatmap = keras.utils.array_to_img(jet_heatmap)
|
82 |
+
jet_heatmap = jet_heatmap.resize((160,160))
|
83 |
+
jet_heatmap = keras.utils.img_to_array(jet_heatmap)
|
84 |
+
|
85 |
+
# Superimpose the heatmap on original image
|
86 |
+
superimposed_img = jet_heatmap * alpha + img
|
87 |
+
# superimposed_img = keras.utils.array_to_img(superimposed_img)
|
88 |
+
|
89 |
+
return superimposed_img
|
90 |
+
|
91 |
+
def gen_grad_img_single(weights, img, alpha = 0.4):
|
92 |
+
model_mod, input, x, output = create_model_mod()
|
93 |
+
model_mod.load_weights(weights)
|
94 |
+
grad_model = Model(input, [x, output])
|
95 |
+
heatmaps, y_pred = create_heatmap(grad_model, img)
|
96 |
+
|
97 |
+
for i in range(len(y_pred)):
|
98 |
+
if y_pred[i] > 0.5: y_pred[i] = 1
|
99 |
+
else: y_pred[i] = 0
|
100 |
+
|
101 |
+
img = superimpose_single(heatmaps, img[0])
|
102 |
+
return np.array(img).astype('uint8'), y_pred
|
103 |
+
|
104 |
+
|
105 |
+
weights = "weights.h5"
|
106 |
+
img, y_pred = gen_grad_img_single(weights, img)
|
107 |
+
|
108 |
+
demo = gr.Interface(
|
109 |
+
fn = gen_grad_img_single,
|
110 |
+
inputs = gr.Image(type = "pil", shape = (224,224)),
|
111 |
+
# outputs = [gr.outputs.Label(num_top_classes = 2, label = 'Classifiaction'), gr.Textbox('infer_time', label = 'Inference Time(ms)')]
|
112 |
+
outputs = ["image", gr.Textbox('y_pred', label = 'Prediction')]
|
113 |
+
)
|
114 |
+
demo.launch()
|
115 |
+
|