File size: 2,209 Bytes
6fc1dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69


import torch
from monai.networks.nets import DenseNet121

import gradio as gr

#from PIL import Image

model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=6)
model.load_state_dict(torch.load('weights/mednist_model.pth', map_location=torch.device('cpu')))

from monai.transforms import (
    EnsureChannelFirst,
    Compose,
    LoadImage,
    ScaleIntensity,
)

test_transforms = Compose(
        [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity()]
    )

class_names = [
    'AbdomenCT', 'BreastMRI', 'CXR', 'ChestCT', 'Hand', 'HeadCT'
]

import os, glob

#examples_dir = './samples'
#example_files = glob.glob(os.path.join(examples_dir, '*.jpg'))

def classify_image(image_filepath):
    input = test_transforms(image_filepath)

    model.eval()
    with torch.no_grad():
        pred = model(input.unsqueeze(dim=0))
    
    prob = torch.nn.functional.softmax(pred[0], dim=0)
 
    confidences = {class_names[i]: float(prob[i]) for i in range(6)} 
    print(confidences)

    return confidences


with gr.Blocks(title="Medical Image Classification- ClassCat",
            css=".gradio-container {background:mintcream;}"
        ) as demo:
    gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:royalblue;">Medical Image Classification with MONAI</div>""")

    with gr.Row(): 
        input_image = gr.Image(type="filepath", image_mode="L", shape=(64, 64))        
        output_label=gr.Label(label="Probabilities", num_top_classes=3)

    send_btn = gr.Button("Infer")
    send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label)

    with gr.Row():
        gr.Examples(['./samples/mednist_AbdomenCT00.png'], label='Sample images : AbdomenCT', inputs=input_image)
        gr.Examples(['./samples/mednist_CXR02.png'], label='CXR', inputs=input_image)
        gr.Examples(['./samples/mednist_ChestCT08.png'], label='ChestCT', inputs=input_image)
        gr.Examples(['./samples/mednist_Hand01.png'], label='Hand', inputs=input_image)
        gr.Examples(['./samples/mednist_HeadCT07.png'], label='HeadCT', inputs=input_image)

#demo.queue(concurrency_count=3)
demo.launch(debug=True)