yuxindu commited on
Commit
1ed116c
·
1 Parent(s): d812c73

add config

Browse files
Files changed (1) hide show
  1. model_segvol_single.py +14 -26
model_segvol_single.py CHANGED
@@ -126,7 +126,6 @@ class SegVolProcessor():
126
  self.img_loader = transforms.LoadImage()
127
  self.transform4test = transforms.Compose(
128
  [
129
- ForegroundNormalization(keys=["image"]),
130
  DimTranspose(keys=["image", "label"]),
131
  MinMaxNormalization(),
132
  transforms.CropForegroundd(keys=["image", "label"], source_key="image"),
@@ -174,6 +173,7 @@ class SegVolProcessor():
174
  ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()
175
  ct_shape = ct_voxel_ndarray.shape
176
  ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)
 
177
  item['image'] = ct_voxel_ndarray
178
 
179
  # generate gt_voxel_ndarray
@@ -199,6 +199,19 @@ class SegVolProcessor():
199
  # transform
200
  return item['image'], item['label']
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  def zoom_transform(self, ct_npy, gt_npy):
203
  item = {
204
  'image': ct_npy,
@@ -282,31 +295,6 @@ class DimTranspose(transforms.Transform):
282
  d[key] = np.swapaxes(d[key], -1, -3)
283
  return d
284
 
285
- class ForegroundNormalization(transforms.Transform):
286
- def __init__(self, keys):
287
- self.keys = keys
288
-
289
- def __call__(self, data):
290
- d = dict(data)
291
-
292
- for key in self.keys:
293
- d[key] = self.normalize(d[key])
294
- return d
295
-
296
- def normalize(self, ct_narray):
297
- ct_voxel_ndarray = ct_narray.copy()
298
- ct_voxel_ndarray = ct_voxel_ndarray.flatten()
299
- thred = np.mean(ct_voxel_ndarray)
300
- voxel_filtered = ct_voxel_ndarray[(ct_voxel_ndarray > thred)]
301
- upper_bound = np.percentile(voxel_filtered, 99.95)
302
- lower_bound = np.percentile(voxel_filtered, 00.05)
303
- mean = np.mean(voxel_filtered)
304
- std = np.std(voxel_filtered)
305
- ### transform ###
306
- ct_narray = np.clip(ct_narray, lower_bound, upper_bound)
307
- ct_narray = (ct_narray - mean) / max(std, 1e-8)
308
- return ct_narray
309
-
310
  # prompts
311
  def generate_box(pred_pre, bbox_shift=None):
312
  meaning_post_label = pred_pre # [h, w, d]
 
126
  self.img_loader = transforms.LoadImage()
127
  self.transform4test = transforms.Compose(
128
  [
 
129
  DimTranspose(keys=["image", "label"]),
130
  MinMaxNormalization(),
131
  transforms.CropForegroundd(keys=["image", "label"], source_key="image"),
 
173
  ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()
174
  ct_shape = ct_voxel_ndarray.shape
175
  ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)
176
+ ct_voxel_ndarray = self.ForegroundNorm(ct_voxel_ndarray)
177
  item['image'] = ct_voxel_ndarray
178
 
179
  # generate gt_voxel_ndarray
 
199
  # transform
200
  return item['image'], item['label']
201
 
202
+ def ForegroundNorm(self, ct_narray):
203
+ ct_voxel_ndarray = ct_narray.copy()
204
+ ct_voxel_ndarray = ct_voxel_ndarray.flatten()
205
+ thred = np.mean(ct_voxel_ndarray)
206
+ voxel_filtered = ct_voxel_ndarray[(ct_voxel_ndarray > thred)]
207
+ upper_bound = np.percentile(voxel_filtered, 99.95)
208
+ lower_bound = np.percentile(voxel_filtered, 00.05)
209
+ mean = np.mean(voxel_filtered)
210
+ std = np.std(voxel_filtered)
211
+ ct_narray = np.clip(ct_narray, lower_bound, upper_bound)
212
+ ct_narray = (ct_narray - mean) / max(std, 1e-8)
213
+ return ct_narray
214
+
215
  def zoom_transform(self, ct_npy, gt_npy):
216
  item = {
217
  'image': ct_npy,
 
295
  d[key] = np.swapaxes(d[key], -1, -3)
296
  return d
297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  # prompts
299
  def generate_box(pred_pre, bbox_shift=None):
300
  meaning_post_label = pred_pre # [h, w, d]