andreped commited on
Commit
a41b9f5
1 Parent(s): b472795

Refactored code base according to linting style

Browse files
Files changed (3) hide show
  1. neukit/gui.py +54 -22
  2. neukit/inference.py +51 -28
  3. neukit/utils.py +9 -5
neukit/gui.py CHANGED
@@ -1,10 +1,13 @@
1
  import gradio as gr
2
- from .utils import load_ct_to_numpy, load_pred_volume_to_numpy, nifti_to_glb
3
  from .inference import run_model
 
 
 
4
 
5
 
6
  class WebUI:
7
- def __init__(self, model_name:str = None, cwd:str = "/home/user/app/"):
8
  # global states
9
  self.images = []
10
  self.pred_images = []
@@ -15,7 +18,7 @@ class WebUI:
15
  self.model_name = model_name
16
  self.cwd = cwd
17
 
18
- self.class_name = "meningioma" # default - but can be updated based on which task is chosen from dropdown
19
  self.class_names = {
20
  "meningioma": "MRI_Meningioma",
21
  "low-grade": "MRI_LGGlioma",
@@ -33,41 +36,55 @@ class WebUI:
33
  }
34
 
35
  # define widgets not to be rendered immediantly, but later on
36
- self.slider = gr.Slider(1, self.nb_slider_items, value=1, step=1, label="Which 2D slice to show")
 
 
 
 
 
 
37
  self.volume_renderer = gr.Model3D(
38
  clear_color=[0.0, 0.0, 0.0, 0.0],
39
  label="3D Model",
40
  visible=True,
41
  elem_id="model-3d",
42
  ).style(height=512)
43
-
44
  def set_class_name(self, value):
45
  print("Changed task to:", value)
46
  self.class_name = value
47
 
48
  def combine_ct_and_seg(self, img, pred):
49
  return (img, [(pred, self.class_name)])
50
-
51
  def upload_file(self, file):
52
  return file.name
53
-
54
  def process(self, mesh_file_name):
55
  path = mesh_file_name.name
56
- run_model(path, model_path=self.cwd + "resources/models/", task=self.class_names[self.class_name], name=self.result_names[self.class_name])
 
 
 
 
 
57
  nifti_to_glb("prediction.nii.gz")
58
 
59
  self.images = load_ct_to_numpy(path)
60
  self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
61
  return "./prediction.obj"
62
-
63
  def get_img_pred_pair(self, k):
64
  k = int(k) - 1
65
  out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
66
- out[k] = gr.AnnotatedImage.update(self.combine_ct_and_seg(self.images[k], self.pred_images[k]), visible=True)
 
 
 
67
  return out
68
 
69
  def run(self):
70
- css="""
71
  #model-3d {
72
  height: 512px;
73
  }
@@ -87,7 +104,8 @@ class WebUI:
87
  model_selector = gr.Dropdown(
88
  list(self.class_names.keys()),
89
  label="Task",
90
- info="Which task to perform - one model for each brain tumor type and brain extraction",
 
91
  multiselect=False,
92
  size="sm",
93
  )
@@ -97,39 +115,53 @@ class WebUI:
97
  outputs=None,
98
  )
99
 
100
- run_btn = gr.Button("Run analysis").style(full_width=False, size="lg")
 
 
101
  run_btn.click(
102
  fn=lambda x: self.process(x),
103
  inputs=file_output,
104
  outputs=self.volume_renderer,
105
  )
106
-
107
  with gr.Row():
108
  gr.Examples(
109
- examples=[self.cwd + "RegLib_C01_1.nii", self.cwd + "RegLib_C01_2.nii"],
 
 
 
110
  inputs=file_output,
111
  outputs=file_output,
112
  fn=self.upload_file,
113
  cache_examples=True,
114
  )
115
-
116
  with gr.Row():
117
  with gr.Box():
118
  with gr.Column():
119
  image_boxes = []
120
  for i in range(self.nb_slider_items):
121
  visibility = True if i == 1 else False
122
- t = gr.AnnotatedImage(visible=visibility, elem_id="model-2d")\
123
- .style(color_map={self.class_name: "#ffae00"}, height=512, width=512)
 
 
 
 
 
124
  image_boxes.append(t)
125
 
126
- self.slider.input(self.get_img_pred_pair, self.slider, image_boxes)
 
 
127
 
128
  self.slider.render()
129
-
130
  with gr.Box():
131
  self.volume_renderer.render()
132
 
133
- # sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
134
- # inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
 
 
135
  demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
 
1
  import gradio as gr
2
+
3
  from .inference import run_model
4
+ from .utils import load_ct_to_numpy
5
+ from .utils import load_pred_volume_to_numpy
6
+ from .utils import nifti_to_glb
7
 
8
 
9
  class WebUI:
10
+ def __init__(self, model_name: str = None, cwd: str = "/home/user/app/"):
11
  # global states
12
  self.images = []
13
  self.pred_images = []
 
18
  self.model_name = model_name
19
  self.cwd = cwd
20
 
21
+ self.class_name = "meningioma" # default
22
  self.class_names = {
23
  "meningioma": "MRI_Meningioma",
24
  "low-grade": "MRI_LGGlioma",
 
36
  }
37
 
38
  # define widgets not to be rendered immediantly, but later on
39
+ self.slider = gr.Slider(
40
+ 1,
41
+ self.nb_slider_items,
42
+ value=1,
43
+ step=1,
44
+ label="Which 2D slice to show",
45
+ )
46
  self.volume_renderer = gr.Model3D(
47
  clear_color=[0.0, 0.0, 0.0, 0.0],
48
  label="3D Model",
49
  visible=True,
50
  elem_id="model-3d",
51
  ).style(height=512)
52
+
53
  def set_class_name(self, value):
54
  print("Changed task to:", value)
55
  self.class_name = value
56
 
57
  def combine_ct_and_seg(self, img, pred):
58
  return (img, [(pred, self.class_name)])
59
+
60
  def upload_file(self, file):
61
  return file.name
62
+
63
  def process(self, mesh_file_name):
64
  path = mesh_file_name.name
65
+ run_model(
66
+ path,
67
+ model_path=self.cwd + "resources/models/",
68
+ task=self.class_names[self.class_name],
69
+ name=self.result_names[self.class_name],
70
+ )
71
  nifti_to_glb("prediction.nii.gz")
72
 
73
  self.images = load_ct_to_numpy(path)
74
  self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
75
  return "./prediction.obj"
76
+
77
  def get_img_pred_pair(self, k):
78
  k = int(k) - 1
79
  out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
80
+ out[k] = gr.AnnotatedImage.update(
81
+ self.combine_ct_and_seg(self.images[k], self.pred_images[k]),
82
+ visible=True,
83
+ )
84
  return out
85
 
86
  def run(self):
87
+ css = """
88
  #model-3d {
89
  height: 512px;
90
  }
 
104
  model_selector = gr.Dropdown(
105
  list(self.class_names.keys()),
106
  label="Task",
107
+ info="Which task to perform - one model for"
108
+ "each brain tumor type and brain extraction",
109
  multiselect=False,
110
  size="sm",
111
  )
 
115
  outputs=None,
116
  )
117
 
118
+ run_btn = gr.Button("Run analysis").style(
119
+ full_width=False, size="lg"
120
+ )
121
  run_btn.click(
122
  fn=lambda x: self.process(x),
123
  inputs=file_output,
124
  outputs=self.volume_renderer,
125
  )
126
+
127
  with gr.Row():
128
  gr.Examples(
129
+ examples=[
130
+ self.cwd + "RegLib_C01_1.nii",
131
+ self.cwd + "RegLib_C01_2.nii",
132
+ ],
133
  inputs=file_output,
134
  outputs=file_output,
135
  fn=self.upload_file,
136
  cache_examples=True,
137
  )
138
+
139
  with gr.Row():
140
  with gr.Box():
141
  with gr.Column():
142
  image_boxes = []
143
  for i in range(self.nb_slider_items):
144
  visibility = True if i == 1 else False
145
+ t = gr.AnnotatedImage(
146
+ visible=visibility, elem_id="model-2d"
147
+ ).style(
148
+ color_map={self.class_name: "#ffae00"},
149
+ height=512,
150
+ width=512,
151
+ )
152
  image_boxes.append(t)
153
 
154
+ self.slider.input(
155
+ self.get_img_pred_pair, self.slider, image_boxes
156
+ )
157
 
158
  self.slider.render()
159
+
160
  with gr.Box():
161
  self.volume_renderer.render()
162
 
163
+ # sharing app publicly -> share=True:
164
+ # https://gradio.app/sharing-your-app/
165
+ # inference times > 60 seconds -> need queue():
166
+ # https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
167
  demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
neukit/inference.py CHANGED
@@ -1,23 +1,28 @@
1
- import os
2
- import shutil
3
  import configparser
4
  import logging
5
- import traceback
 
6
 
7
 
8
- def run_model(input_path: str, model_path: str, verbose: str = "info", task: str = "MRI_Meningioma", name: str = "Tumor"):
 
 
 
 
 
 
9
  logging.basicConfig()
10
  logging.getLogger().setLevel(logging.WARNING)
11
 
12
- if verbose == 'debug':
13
  logging.getLogger().setLevel(logging.DEBUG)
14
- elif verbose == 'info':
15
  logging.getLogger().setLevel(logging.INFO)
16
- elif verbose == 'error':
17
  logging.getLogger().setLevel(logging.ERROR)
18
  else:
19
  raise ValueError("Unsupported verbose value provided:", verbose)
20
-
21
  # delete patient/result folder if they exist
22
  if os.path.exists("./patient/"):
23
  shutil.rmtree("./patient/")
@@ -25,33 +30,42 @@ def run_model(input_path: str, model_path: str, verbose: str = "info", task: str
25
  shutil.rmtree("./result/")
26
 
27
  try:
28
- # create sequence folder, rename patient, and add to temporary patient directory
29
  filename = input_path.split("/")[-1]
30
  splits = filename.split(".")
31
  extension = ".".join(splits[1:])
32
  patient_directory = "./patient/"
33
  os.makedirs(patient_directory + "T0/", exist_ok=True)
34
- shutil.copy(input_path, patient_directory + "T0/" + splits[0] + "-t1gd." + extension)
35
-
 
 
 
36
  # define output directory to save results
37
  output_path = "./result/prediction-" + splits[0] + "/"
38
  os.makedirs(output_path, exist_ok=True)
39
 
40
  # Setting up the configuration file
41
  rads_config = configparser.ConfigParser()
42
- rads_config.add_section('Default')
43
- rads_config.set('Default', 'task', 'neuro_diagnosis')
44
- rads_config.set('Default', 'caller', '')
45
- rads_config.add_section('System')
46
- rads_config.set('System', 'gpu_id', "-1")
47
- rads_config.set('System', 'input_folder', patient_directory)
48
- rads_config.set('System', 'output_folder', output_path)
49
- rads_config.set('System', 'model_folder', model_path)
50
- rads_config.set('System', 'pipeline_filename', os.path.join(model_path, task, 'pipeline.json'))
51
- rads_config.add_section('Runtime')
52
- rads_config.set('Runtime', 'reconstruction_method', 'thresholding') # thresholding, probabilities
53
- rads_config.set('Runtime', 'reconstruction_order', 'resample_first')
54
- rads_config.set('Runtime', 'use_preprocessed_data', 'False')
 
 
 
 
 
 
55
 
56
  with open("rads_config.ini", "w") as f:
57
  rads_config.write(f)
@@ -59,11 +73,20 @@ def run_model(input_path: str, model_path: str, verbose: str = "info", task: str
59
  # finally, run inference
60
  from raidionicsrads.compute import run_rads
61
 
62
- run_rads(config_filename='rads_config.ini')
63
-
64
  # rename and move final result
65
- os.rename("./result/prediction-" + splits[0] + "/T0/" + splits[0] + "-t1gd_annotation-" + name + ".nii.gz", "./prediction.nii.gz")
66
-
 
 
 
 
 
 
 
 
 
67
  except Exception as e:
68
  print(e)
69
 
 
 
 
1
  import configparser
2
  import logging
3
+ import os
4
+ import shutil
5
 
6
 
7
+ def run_model(
8
+ input_path: str,
9
+ model_path: str,
10
+ verbose: str = "info",
11
+ task: str = "MRI_Meningioma",
12
+ name: str = "Tumor",
13
+ ):
14
  logging.basicConfig()
15
  logging.getLogger().setLevel(logging.WARNING)
16
 
17
+ if verbose == "debug":
18
  logging.getLogger().setLevel(logging.DEBUG)
19
+ elif verbose == "info":
20
  logging.getLogger().setLevel(logging.INFO)
21
+ elif verbose == "error":
22
  logging.getLogger().setLevel(logging.ERROR)
23
  else:
24
  raise ValueError("Unsupported verbose value provided:", verbose)
25
+
26
  # delete patient/result folder if they exist
27
  if os.path.exists("./patient/"):
28
  shutil.rmtree("./patient/")
 
30
  shutil.rmtree("./result/")
31
 
32
  try:
33
+ # setup temporary patient directory
34
  filename = input_path.split("/")[-1]
35
  splits = filename.split(".")
36
  extension = ".".join(splits[1:])
37
  patient_directory = "./patient/"
38
  os.makedirs(patient_directory + "T0/", exist_ok=True)
39
+ shutil.copy(
40
+ input_path,
41
+ patient_directory + "T0/" + splits[0] + "-t1gd." + extension,
42
+ )
43
+
44
  # define output directory to save results
45
  output_path = "./result/prediction-" + splits[0] + "/"
46
  os.makedirs(output_path, exist_ok=True)
47
 
48
  # Setting up the configuration file
49
  rads_config = configparser.ConfigParser()
50
+ rads_config.add_section("Default")
51
+ rads_config.set("Default", "task", "neuro_diagnosis")
52
+ rads_config.set("Default", "caller", "")
53
+ rads_config.add_section("System")
54
+ rads_config.set("System", "gpu_id", "-1")
55
+ rads_config.set("System", "input_folder", patient_directory)
56
+ rads_config.set("System", "output_folder", output_path)
57
+ rads_config.set("System", "model_folder", model_path)
58
+ rads_config.set(
59
+ "System",
60
+ "pipeline_filename",
61
+ os.path.join(model_path, task, "pipeline.json"),
62
+ )
63
+ rads_config.add_section("Runtime")
64
+ rads_config.set(
65
+ "Runtime", "reconstruction_method", "thresholding"
66
+ ) # thresholding, probabilities
67
+ rads_config.set("Runtime", "reconstruction_order", "resample_first")
68
+ rads_config.set("Runtime", "use_preprocessed_data", "False")
69
 
70
  with open("rads_config.ini", "w") as f:
71
  rads_config.write(f)
 
73
  # finally, run inference
74
  from raidionicsrads.compute import run_rads
75
 
76
+ run_rads(config_filename="rads_config.ini")
77
+
78
  # rename and move final result
79
+ os.rename(
80
+ "./result/prediction-"
81
+ + splits[0]
82
+ + "/T0/"
83
+ + splits[0]
84
+ + "-t1gd_annotation-"
85
+ + name
86
+ + ".nii.gz",
87
+ "./prediction.nii.gz",
88
+ )
89
+
90
  except Exception as e:
91
  print(e)
92
 
neukit/utils.py CHANGED
@@ -1,5 +1,5 @@
1
- import numpy as np
2
  import nibabel as nib
 
3
  from nibabel.processing import resample_to_output
4
  from skimage.measure import marching_cubes
5
 
@@ -52,12 +52,16 @@ def nifti_to_glb(path, output="prediction.obj"):
52
  verts, faces, normals, values = marching_cubes(data, 0)
53
  faces += 1
54
 
55
- with open(output, 'w') as thefile:
56
  for item in verts:
57
- thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
58
 
59
  for item in normals:
60
- thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
61
 
62
  for item in faces:
63
- thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
 
 
 
 
 
 
1
  import nibabel as nib
2
+ import numpy as np
3
  from nibabel.processing import resample_to_output
4
  from skimage.measure import marching_cubes
5
 
 
52
  verts, faces, normals, values = marching_cubes(data, 0)
53
  faces += 1
54
 
55
+ with open(output, "w") as thefile:
56
  for item in verts:
57
+ thefile.write("v {0} {1} {2}\n".format(item[0], item[1], item[2]))
58
 
59
  for item in normals:
60
+ thefile.write("vn {0} {1} {2}\n".format(item[0], item[1], item[2]))
61
 
62
  for item in faces:
63
+ thefile.write(
64
+ "f {0}//{0} {1}//{1} {2}//{2}\n".format(
65
+ item[0], item[1], item[2]
66
+ )
67
+ )