Started setting up the app + docker builds [no ci]
Browse files- .dockerignore +1 -0
- .github/workflows/deploy.yml +20 -0
- .github/workflows/filesize.yml +16 -0
- .gitignore +1 -0
- Dockerfile +70 -0
- README.md +28 -1
- app.py +19 -0
- neukit/__init__.py +0 -0
- neukit/gui.py +94 -0
- neukit/utils.py +61 -0
- requirements.txt +3 -0
.dockerignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
venv/
|
.github/workflows/deploy.yml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face hub
|
2 |
+
on:
|
3 |
+
push:
|
4 |
+
branches: [ main ]
|
5 |
+
|
6 |
+
# to run this workflow manually from the Actions tab
|
7 |
+
workflow_dispatch:
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
sync-to-hub:
|
11 |
+
runs-on: ubuntu-latest
|
12 |
+
steps:
|
13 |
+
- uses: actions/checkout@v3
|
14 |
+
with:
|
15 |
+
fetch-depth: 0
|
16 |
+
lfs: true
|
17 |
+
- name: Push to hub
|
18 |
+
env:
|
19 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
20 |
+
run: git push https://andreped:[email protected]/spaces/andreped/neukit main
|
.github/workflows/filesize.yml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Check file size
|
2 |
+
on: # or directly `on: [push]` to run the action on every push on any branch
|
3 |
+
pull_request:
|
4 |
+
branches: [ main ]
|
5 |
+
|
6 |
+
# to run this workflow manually from the Actions tab
|
7 |
+
workflow_dispatch:
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
sync-to-hub:
|
11 |
+
runs-on: ubuntu-latest
|
12 |
+
steps:
|
13 |
+
- name: Check large files
|
14 |
+
uses: ActionsDesk/[email protected]
|
15 |
+
with:
|
16 |
+
filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
venv/
|
Dockerfile
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
# creates virtual ubuntu in docker image
|
5 |
+
FROM ubuntu:22.04
|
6 |
+
|
7 |
+
# set language, format and stuff
|
8 |
+
ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
|
9 |
+
|
10 |
+
# NOTE: using -y is conveniently to automatically answer yes to all the questions
|
11 |
+
# installing python3 with a specific version
|
12 |
+
RUN apt-get update -y
|
13 |
+
RUN apt-get upgrade -y
|
14 |
+
RUN apt install software-properties-common -y
|
15 |
+
RUN add-apt-repository ppa:deadsnakes/ppa -y
|
16 |
+
RUN apt update
|
17 |
+
RUN apt install python3.7 -y
|
18 |
+
RUN apt install python3.7-distutils -y
|
19 |
+
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 1
|
20 |
+
|
21 |
+
# installing other libraries
|
22 |
+
RUN apt-get install python3-pip -y && \
|
23 |
+
apt-get -y install sudo
|
24 |
+
RUN apt-get install curl -y
|
25 |
+
RUN apt-get install nano -y
|
26 |
+
RUN apt-get update && apt-get install -y git
|
27 |
+
RUN apt-get install libblas-dev -y && apt-get install liblapack-dev -y
|
28 |
+
RUN apt-get install gfortran -y
|
29 |
+
RUN apt-get install libpng-dev -y
|
30 |
+
RUN apt-get install python3-dev -y
|
31 |
+
# RUN apt-get -y install cmake curl
|
32 |
+
|
33 |
+
WORKDIR /code
|
34 |
+
|
35 |
+
# install dependencies
|
36 |
+
COPY ./requirements.txt /code/requirements.txt
|
37 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
38 |
+
|
39 |
+
# resolve issue with tf==2.4 and gradio dependency collision issue
|
40 |
+
RUN pip install --force-reinstall typing_extensions==4.0.0
|
41 |
+
|
42 |
+
# Install wget
|
43 |
+
RUN apt install wget -y && \
|
44 |
+
apt install unzip
|
45 |
+
|
46 |
+
# Set up a new user named "user" with user ID 1000
|
47 |
+
RUN useradd -m -u 1000 user
|
48 |
+
|
49 |
+
# Switch to the "user" user
|
50 |
+
USER user
|
51 |
+
|
52 |
+
# Set home to the user's home directory
|
53 |
+
ENV HOME=/home/user \
|
54 |
+
PATH=/home/user/.local/bin:$PATH
|
55 |
+
|
56 |
+
# Set the working directory to the user's home directory
|
57 |
+
WORKDIR $HOME/app
|
58 |
+
|
59 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
60 |
+
COPY --chown=user . $HOME/app
|
61 |
+
|
62 |
+
# Download pretrained parenchyma model
|
63 |
+
RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_Meningioma-ONNX-v12.zip" && \
|
64 |
+
unzip "Raidionics-MRI_Meningioma-ONNX-v12.zip" && mkdir -p resources/models/ && mv MRI_Meningioma/ resources/models/MRI_Meningioma/
|
65 |
+
|
66 |
+
# Download test sample
|
67 |
+
RUN pip install gdown && gdown "https://drive.google.com/uc?id=1shjSrFjS4PHE5sTku30PZTLPZpGu24o3"
|
68 |
+
|
69 |
+
# CMD ["/bin/bash"]
|
70 |
+
CMD ["python3", "demo/app.py"]
|
README.md
CHANGED
@@ -1 +1,28 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: 'neukit: automatic meningioma segmentation from T1-weighted MRI'
|
3 |
+
colorFrom: indigo
|
4 |
+
colorTo: indigo
|
5 |
+
sdk: docker
|
6 |
+
app_port: 7860
|
7 |
+
emoji: 🔎
|
8 |
+
pinned: false
|
9 |
+
license: mit
|
10 |
+
app_file: app.py
|
11 |
+
---
|
12 |
+
|
13 |
+
# neukit
|
14 |
+
|
15 |
+
## Usage
|
16 |
+
|
17 |
+
The software will be made openly available on Hugging Face spaces very soon. Stay tuned for more!
|
18 |
+
|
19 |
+
## Setup
|
20 |
+
|
21 |
+
For development of this software, follow these steps to build the docker image and run the app through it:
|
22 |
+
|
23 |
+
```
|
24 |
+
docker build -t neukit ..
|
25 |
+
docker run -it -p 7860:7860 neukit
|
26 |
+
```
|
27 |
+
|
28 |
+
Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.
|
app.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from neukit.gui import WebUI
|
2 |
+
|
3 |
+
|
4 |
+
def main():
|
5 |
+
print("Launching demo...")
|
6 |
+
|
7 |
+
# cwd = "/Users/andreped/workspace/livermask/" # local testing -> macOS
|
8 |
+
cwd = "/home/user/app/" # production -> docker
|
9 |
+
|
10 |
+
model_name = "model.h5" # assumed to lie in `cwd` directory
|
11 |
+
class_name = "parenchyma"
|
12 |
+
|
13 |
+
# initialize and run app
|
14 |
+
app = WebUI(model_name=model_name, class_name=class_name, cwd=cwd)
|
15 |
+
app.run()
|
16 |
+
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
main()
|
neukit/__init__.py
ADDED
File without changes
|
neukit/gui.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from .utils import load_ct_to_numpy, load_pred_volume_to_numpy
|
3 |
+
from .compute import run_model
|
4 |
+
from .convert import nifti_to_glb
|
5 |
+
|
6 |
+
|
7 |
+
class WebUI:
|
8 |
+
def __init__(self, model_name:str = None, class_name:str = None, cwd:str = None):
|
9 |
+
# global states
|
10 |
+
self.images = []
|
11 |
+
self.pred_images = []
|
12 |
+
|
13 |
+
# @TODO: This should be dynamically set based on chosen volume size
|
14 |
+
self.nb_slider_items = 100
|
15 |
+
|
16 |
+
self.model_name = model_name
|
17 |
+
self.class_name = class_name
|
18 |
+
self.cwd = cwd
|
19 |
+
|
20 |
+
# define widgets not to be rendered immediantly, but later on
|
21 |
+
self.slider = gr.Slider(1, self.nb_slider_items, value=1, step=1, label="Which 2D slice to show")
|
22 |
+
self.volume_renderer = gr.Model3D(
|
23 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
24 |
+
label="3D Model",
|
25 |
+
visible=True
|
26 |
+
).style(height=512)
|
27 |
+
|
28 |
+
def combine_ct_and_seg(self, img, pred):
|
29 |
+
return (img, [(pred, self.class_name)])
|
30 |
+
|
31 |
+
def upload_file(self, file):
|
32 |
+
return file.name
|
33 |
+
|
34 |
+
def load_mesh(self, mesh_file_name, model_name):
|
35 |
+
path = mesh_file_name.name
|
36 |
+
run_model(path, model_name)
|
37 |
+
nifti_to_glb("prediction-livermask.nii")
|
38 |
+
self.images = load_ct_to_numpy(path)
|
39 |
+
self.pred_images = load_pred_volume_to_numpy("./prediction-livermask.nii")
|
40 |
+
self.slider = self.slider.update(value=2)
|
41 |
+
return "./prediction.obj"
|
42 |
+
|
43 |
+
def get_img_pred_pair(self, k):
|
44 |
+
k = int(k) - 1
|
45 |
+
out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
|
46 |
+
out[k] = gr.AnnotatedImage.update(self.combine_ct_and_seg(self.images[k], self.pred_images[k]), visible=True)
|
47 |
+
return out
|
48 |
+
|
49 |
+
def run(self):
|
50 |
+
with gr.Blocks() as demo:
|
51 |
+
|
52 |
+
with gr.Row().style(equal_height=True):
|
53 |
+
file_output = gr.File(
|
54 |
+
file_types=[".nii", ".nii.nz"],
|
55 |
+
file_count="single"
|
56 |
+
).style(full_width=False, size="sm")
|
57 |
+
file_output.upload(self.upload_file, file_output, file_output)
|
58 |
+
|
59 |
+
run_btn = gr.Button("Run analysis").style(full_width=False, size="sm")
|
60 |
+
run_btn.click(
|
61 |
+
fn=lambda x: self.load_mesh(x, model_name=self.cwd + self.model_name),
|
62 |
+
inputs=file_output,
|
63 |
+
outputs=self.volume_renderer
|
64 |
+
)
|
65 |
+
|
66 |
+
with gr.Row().style(equal_height=True):
|
67 |
+
gr.Examples(
|
68 |
+
examples=[self.cwd + "test-volume.nii"],
|
69 |
+
inputs=file_output,
|
70 |
+
outputs=file_output,
|
71 |
+
fn=self.upload_file,
|
72 |
+
cache_examples=True,
|
73 |
+
)
|
74 |
+
|
75 |
+
with gr.Row().style(equal_height=True):
|
76 |
+
with gr.Box():
|
77 |
+
image_boxes = []
|
78 |
+
for i in range(self.nb_slider_items):
|
79 |
+
visibility = True if i == 1 else False
|
80 |
+
t = gr.AnnotatedImage(visible=visibility)\
|
81 |
+
.style(color_map={self.class_name: "#ffae00"}, height=512, width=512)
|
82 |
+
image_boxes.append(t)
|
83 |
+
|
84 |
+
self.slider.change(self.get_img_pred_pair, self.slider, image_boxes)
|
85 |
+
|
86 |
+
with gr.Box():
|
87 |
+
self.volume_renderer.render()
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
self.slider.render()
|
91 |
+
|
92 |
+
# sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
|
93 |
+
# inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
|
94 |
+
demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
|
neukit/utils.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import nibabel as nib
|
3 |
+
from nibabel.processing import resample_to_output
|
4 |
+
from skimage.measure import marching_cubes
|
5 |
+
|
6 |
+
|
7 |
+
def load_ct_to_numpy(data_path):
|
8 |
+
if type(data_path) != str:
|
9 |
+
data_path = data_path.name
|
10 |
+
|
11 |
+
image = nib.load(data_path)
|
12 |
+
data = image.get_fdata()
|
13 |
+
|
14 |
+
data = np.rot90(data, k=1, axes=(0, 1))
|
15 |
+
|
16 |
+
data[data < -150] = -150
|
17 |
+
data[data > 250] = 250
|
18 |
+
|
19 |
+
data = data - np.amin(data)
|
20 |
+
data = data / np.amax(data) * 255
|
21 |
+
data = data.astype("uint8")
|
22 |
+
|
23 |
+
print(data.shape)
|
24 |
+
return [data[..., i] for i in range(data.shape[-1])]
|
25 |
+
|
26 |
+
|
27 |
+
def load_pred_volume_to_numpy(data_path):
|
28 |
+
if type(data_path) != str:
|
29 |
+
data_path = data_path.name
|
30 |
+
|
31 |
+
image = nib.load(data_path)
|
32 |
+
data = image.get_fdata()
|
33 |
+
|
34 |
+
data = np.rot90(data, k=1, axes=(0, 1))
|
35 |
+
|
36 |
+
data[data > 0] = 1
|
37 |
+
data = data.astype("uint8")
|
38 |
+
|
39 |
+
print(data.shape)
|
40 |
+
return [data[..., i] for i in range(data.shape[-1])]
|
41 |
+
|
42 |
+
|
43 |
+
def nifti_to_glb(path, output="prediction.obj"):
|
44 |
+
# load NIFTI into numpy array
|
45 |
+
image = nib.load(path)
|
46 |
+
resampled = resample_to_output(image, [1, 1, 1], order=1)
|
47 |
+
data = resampled.get_fdata().astype("uint8")
|
48 |
+
|
49 |
+
# extract surface
|
50 |
+
verts, faces, normals, values = marching_cubes(data, 0)
|
51 |
+
faces += 1
|
52 |
+
|
53 |
+
with open(output, 'w') as thefile:
|
54 |
+
for item in verts:
|
55 |
+
thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
|
56 |
+
|
57 |
+
for item in normals:
|
58 |
+
thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
|
59 |
+
|
60 |
+
for item in faces:
|
61 |
+
thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
raidionicsrads @ https://github.com/dbouget/raidionics_rads_lib/releases/download/v1.1.0/raidionicsrads-1.1.0-py3-none-manylinux1_x86_64.whl
|
2 |
+
onnxruntime-gpu==1.12.1
|
3 |
+
gradio==3.32.0
|