andreped commited on
Commit
9c90e37
1 Parent(s): b72a232

Working prototype - to be refactored

Browse files
Files changed (6) hide show
  1. .dockerignore +9 -0
  2. .gitignore +9 -0
  3. app.py +3 -6
  4. neukit/gui.py +25 -19
  5. neukit/inference.py +73 -0
  6. requirements.txt +4 -2
.dockerignore CHANGED
@@ -1 +1,10 @@
1
  venv/
 
 
 
 
 
 
 
 
 
 
1
  venv/
2
+ *__pycache__/
3
+ resources/
4
+ *.DS_Store
5
+ *.nii
6
+ *.nii.gz
7
+ *.nrrd
8
+ *.obj
9
+ *log.csv
10
+ *.ini
.gitignore CHANGED
@@ -1 +1,10 @@
1
  venv/
 
 
 
 
 
 
 
 
 
 
1
  venv/
2
+ *__pycache__/
3
+ resources/
4
+ *.DS_Store
5
+ *.nii
6
+ *.nii.gz
7
+ *.nrrd
8
+ *.obj
9
+ *log.csv
10
+ *.ini
app.py CHANGED
@@ -4,14 +4,11 @@ from neukit.gui import WebUI
4
  def main():
5
  print("Launching demo...")
6
 
7
- # cwd = "/Users/andreped/workspace/livermask/" # local testing -> macOS
8
- cwd = "/home/user/app/" # production -> docker
9
-
10
- model_name = "model.h5" # assumed to lie in `cwd` directory
11
- class_name = "parenchyma"
12
 
13
  # initialize and run app
14
- app = WebUI(model_name=model_name, class_name=class_name, cwd=cwd)
15
  app.run()
16
 
17
 
 
4
  def main():
5
  print("Launching demo...")
6
 
7
+ cwd = "/Users/andreped/workspace/neukit/" # local testing -> macOS
8
+ # cwd = "/home/user/app/" # production -> docker
 
 
 
9
 
10
  # initialize and run app
11
+ app = WebUI(cwd=cwd)
12
  app.run()
13
 
14
 
neukit/gui.py CHANGED
@@ -1,17 +1,16 @@
1
  import gradio as gr
2
- from .utils import load_ct_to_numpy, load_pred_volume_to_numpy
3
- from .compute import run_model
4
- from .convert import nifti_to_glb
5
 
6
 
7
  class WebUI:
8
- def __init__(self, model_name:str = None, class_name:str = None, cwd:str = None):
9
  # global states
10
  self.images = []
11
  self.pred_images = []
12
 
13
  # @TODO: This should be dynamically set based on chosen volume size
14
- self.nb_slider_items = 100
15
 
16
  self.model_name = model_name
17
  self.class_name = class_name
@@ -31,12 +30,19 @@ class WebUI:
31
  def upload_file(self, file):
32
  return file.name
33
 
34
- def load_mesh(self, mesh_file_name, model_name):
 
 
 
 
 
35
  path = mesh_file_name.name
36
- run_model(path, model_name)
37
- nifti_to_glb("prediction-livermask.nii")
 
 
38
  self.images = load_ct_to_numpy(path)
39
- self.pred_images = load_pred_volume_to_numpy("./prediction-livermask.nii")
40
  self.slider = self.slider.update(value=2)
41
  return "./prediction.obj"
42
 
@@ -51,26 +57,26 @@ class WebUI:
51
 
52
  with gr.Row().style(equal_height=True):
53
  file_output = gr.File(
54
- file_types=[".nii", ".nii.nz"],
55
  file_count="single"
56
  ).style(full_width=False, size="sm")
57
  file_output.upload(self.upload_file, file_output, file_output)
58
 
59
  run_btn = gr.Button("Run analysis").style(full_width=False, size="sm")
60
  run_btn.click(
61
- fn=lambda x: self.load_mesh(x, model_name=self.cwd + self.model_name),
62
  inputs=file_output,
63
  outputs=self.volume_renderer
64
  )
65
 
66
- with gr.Row().style(equal_height=True):
67
- gr.Examples(
68
- examples=[self.cwd + "test-volume.nii"],
69
- inputs=file_output,
70
- outputs=file_output,
71
- fn=self.upload_file,
72
- cache_examples=True,
73
- )
74
 
75
  with gr.Row().style(equal_height=True):
76
  with gr.Box():
 
1
  import gradio as gr
2
+ from .utils import load_ct_to_numpy, load_pred_volume_to_numpy, nifti_to_glb
3
+ from .inference import run_model
 
4
 
5
 
6
  class WebUI:
7
+ def __init__(self, model_name:str = None, class_name:str = "meningioma", cwd:str = "/home/user/app/"):
8
  # global states
9
  self.images = []
10
  self.pred_images = []
11
 
12
  # @TODO: This should be dynamically set based on chosen volume size
13
+ self.nb_slider_items = 150
14
 
15
  self.model_name = model_name
16
  self.class_name = class_name
 
30
  def upload_file(self, file):
31
  return file.name
32
 
33
+ def get_file_extension(self, file):
34
+ filename = file.split("/")[-1]
35
+ splits = filename.split(".")
36
+ return ".".join(splits[1:])
37
+
38
+ def load_mesh(self, mesh_file_name):
39
  path = mesh_file_name.name
40
+ extension = self.get_file_extension(path)
41
+ #run_model(path, model_path=self.cwd + "resources/models/")
42
+ #nifti_to_glb("prediction." + extension)
43
+
44
  self.images = load_ct_to_numpy(path)
45
+ self.pred_images = load_pred_volume_to_numpy("./prediction." + extension)
46
  self.slider = self.slider.update(value=2)
47
  return "./prediction.obj"
48
 
 
57
 
58
  with gr.Row().style(equal_height=True):
59
  file_output = gr.File(
60
+ #file_types=[".nii", ".nii.gz"],
61
  file_count="single"
62
  ).style(full_width=False, size="sm")
63
  file_output.upload(self.upload_file, file_output, file_output)
64
 
65
  run_btn = gr.Button("Run analysis").style(full_width=False, size="sm")
66
  run_btn.click(
67
+ fn=lambda x: self.load_mesh(x),
68
  inputs=file_output,
69
  outputs=self.volume_renderer
70
  )
71
 
72
+ #with gr.Row().style(equal_height=True):
73
+ #gr.Examples(
74
+ # examples=[self.cwd + "test-volume.nii"],
75
+ # inputs=file_output,
76
+ # outputs=file_output,
77
+ # fn=self.upload_file,
78
+ # cache_examples=True,
79
+ #)
80
 
81
  with gr.Row().style(equal_height=True):
82
  with gr.Box():
neukit/inference.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import configparser
4
+ import logging
5
+ import traceback
6
+
7
+
8
+ def run_model(input_path: str, model_path: str, verbose: str = "info", task: str = "MRI_Meningioma"):
9
+ logging.basicConfig()
10
+ logging.getLogger().setLevel(logging.WARNING)
11
+
12
+ if verbose == 'debug':
13
+ logging.getLogger().setLevel(logging.DEBUG)
14
+ elif verbose == 'info':
15
+ logging.getLogger().setLevel(logging.INFO)
16
+ elif verbose == 'error':
17
+ logging.getLogger().setLevel(logging.ERROR)
18
+ else:
19
+ raise ValueError("Unsupported verbose value provided:", verbose)
20
+
21
+ # create sequence folder, rename patient, and add to temporary patient directory
22
+ filename = input_path.split("/")[-1]
23
+ splits = filename.split(".")
24
+ extension = ".".join(splits[1:])
25
+ patient_directory = "./patient/"
26
+ os.makedirs(patient_directory + "T0/", exist_ok=True)
27
+ os.rename(input_path, patient_directory + "T0/" + splits[0] + "-t1gd." + extension)
28
+
29
+ # define output directory to save results
30
+ output_path = "./result/prediction-" + splits[0] + "/"
31
+ os.makedirs(output_path, exist_ok=True)
32
+
33
+ print("orig input:", input_path)
34
+ print("updated input:", patient_directory + "T0/" + splits[0] + "-t1gd." + extension)
35
+ print("patient_dir:", patient_directory)
36
+ print("output path:", output_path)
37
+ print("model path:", model_path)
38
+
39
+ # Setting up the configuration file
40
+ rads_config = configparser.ConfigParser()
41
+ rads_config.add_section('Default')
42
+ rads_config.set('Default', 'task', 'neuro_diagnosis')
43
+ rads_config.set('Default', 'caller', '')
44
+ rads_config.add_section('System')
45
+ rads_config.set('System', 'gpu_id', "-1")
46
+ rads_config.set('System', 'input_folder', patient_directory)
47
+ rads_config.set('System', 'output_folder', output_path)
48
+ rads_config.set('System', 'model_folder', model_path)
49
+ rads_config.set('System', 'pipeline_filename', os.path.join(model_path, task, 'pipeline.json'))
50
+ rads_config.add_section('Runtime')
51
+ rads_config.set('Runtime', 'reconstruction_method', 'thresholding') # thresholding, probabilities
52
+ rads_config.set('Runtime', 'reconstruction_order', 'resample_first')
53
+ rads_config.set('Runtime', 'use_preprocessed_data', 'False')
54
+
55
+ with open("rads_config.ini", "w") as f:
56
+ rads_config.write(f)
57
+
58
+ # finally, run inference
59
+ from raidionicsrads.compute import run_rads
60
+
61
+ try:
62
+ run_rads(config_filename='rads_config.ini')
63
+ except Exception as e:
64
+ print(e)
65
+
66
+ # rename and move final result
67
+ os.rename("./result/prediction-" + splits[0] + "/T0/" + splits[0] + "-t1gd_annotation-Tumor." + extension, "./prediction." + extension)
68
+
69
+ # Clean-up
70
+ if os.path.exists(patient_directory):
71
+ shutil.rmtree(patient_directory)
72
+ if os.path.exists(output_path):
73
+ shutil.rmtree(output_path)
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- raidionicsrads @ https://github.com/dbouget/raidionics_rads_lib/releases/download/v1.1.0/raidionicsrads-1.1.0-py3-none-manylinux1_x86_64.whl
2
- onnxruntime-gpu==1.12.1
 
 
3
  gradio==3.32.0
 
1
+ #raidionicsrads @ https://github.com/dbouget/raidionics_rads_lib/releases/download/v1.1.0/raidionicsrads-1.1.0-py3-none-manylinux1_x86_64.whl
2
+ raidionicsrads @ https://github.com/dbouget/raidionics_rads_lib/releases/download/v1.1.0/raidionicsrads-1.1.0-py3-none-macosx_10_15_x86_64.whl
3
+ #onnxruntime-gpu==1.12.1
4
+ #onnxruntime==1.12.1
5
  gradio==3.32.0