not-lain commited on
Commit
16f833a
·
1 Parent(s): 767912f

put sam2 cuda utils in its tab

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -20,12 +20,6 @@ def float32_high_matmul_precision():
20
  finally:
21
  torch.set_float32_matmul_precision("highest")
22
 
23
- # use bfloat16 for the entire notebook
24
- torch.autocast("cuda", dtype=torch.bfloat16).__enter__()
25
- # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
26
- if torch.cuda.get_device_properties(0).major >= 8:
27
- torch.backends.cuda.matmul.allow_tf32 = True
28
- torch.backends.cudnn.allow_tf32 = True
29
 
30
  pipe = FluxFillPipeline.from_pretrained(
31
  "black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16
@@ -140,6 +134,12 @@ def rmbg(image=None, url=None):
140
 
141
 
142
  def mask_generation(image=None, d=None):
 
 
 
 
 
 
143
  d = eval(d) # convert this to dictionary
144
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
145
  predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2.1-hiera-large")
 
20
  finally:
21
  torch.set_float32_matmul_precision("highest")
22
 
 
 
 
 
 
 
23
 
24
  pipe = FluxFillPipeline.from_pretrained(
25
  "black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16
 
134
 
135
 
136
  def mask_generation(image=None, d=None):
137
+ # use bfloat16 for the entire notebook
138
+ # torch.autocast("cuda", dtype=torch.bfloat16).__enter__()
139
+ # # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
140
+ # if torch.cuda.get_device_properties(0).major >= 8:
141
+ # torch.backends.cuda.matmul.allow_tf32 = True
142
+ # torch.backends.cudnn.allow_tf32 = True
143
  d = eval(d) # convert this to dictionary
144
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
145
  predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2.1-hiera-large")