File size: 1,555 Bytes
807aa27
 
7a7548a
807aa27
 
70aa6d4
7a7548a
 
 
 
807aa27
7a7548a
 
 
807aa27
7a7548a
807aa27
7a7548a
 
70aa6d4
807aa27
70aa6d4
7a7548a
 
6b37358
807aa27
 
70aa6d4
 
 
 
 
 
807aa27
 
8d497a2
b345345
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import numpy as np
import gradio as gr
import torch
from huggingface_hub import hf_hub_download

def inference(repo_id, model_name, img):
    #model_path = hf_hub_download(repo_id=repo_id, filename=model_name)
    model_path = 'trained_models/cFOS_in_HC/cFOS_in_HC_ensemble_1.pt'
    model = torch.jit.load(model_path, map_location='cpu')
    n_channels = len(model.norm.mean)
    
    # Remove redundant channels
    img = img[...,:n_channels]
    inp = torch.from_numpy(img).float()

    argmax, softmax, stdeviation = model(inp)
    
    return argmax*255, stdeviation
 
    
title="deepflash2"
description='deepflash2 is a deep-learning pipeline for the segmentation of ambiguous microscopic images.\n deepflash2 uses deep model ensembles to achieve more accurate and reliable results. Thus, inference time will be more than a minute in this space.'
examples=[['matjesg/deepflash2_demo', 'cFOS_in_HC_ensemble.pt', 'cFOS_example.png'],
          ['matjesg/deepflash2_demo', 'YFP_in_CTX_ensemble.pt', 'YFP_example.png']
          ]

gr.Interface(inference,
             [gr.inputs.Textbox(placeholder='e.g., matjesg/cFOS_in_HC', label='repo_id'), 
              gr.inputs.Textbox(placeholder='e.g., ensemble.onnx', label='model_name'),
              gr.inputs.Image(type='numpy', label='Input image')
             ],
             [gr.outputs.Image(label='Segmentation Mask'),
              gr.outputs.Image(label='Uncertainty Map')],
             title=title,
             description=description,
             examples=examples,
             ).launch()