remove sam2
Browse files
app.py
CHANGED
@@ -6,8 +6,9 @@ from torchvision import transforms
|
|
6 |
from transformers import AutoModelForImageSegmentation
|
7 |
from diffusers import FluxFillPipeline
|
8 |
from PIL import Image, ImageOps
|
9 |
-
|
10 |
-
|
|
|
11 |
from simple_lama_inpainting import SimpleLama
|
12 |
from contextlib import contextmanager
|
13 |
|
@@ -134,35 +135,36 @@ def rmbg(image=None, url=None):
|
|
134 |
|
135 |
|
136 |
def mask_generation(image=None, d=None):
|
137 |
-
|
138 |
-
#
|
139 |
-
# #
|
140 |
-
#
|
141 |
-
#
|
142 |
-
# torch.backends.
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
166 |
|
167 |
|
168 |
def erase(image=None, mask=None):
|
|
|
6 |
from transformers import AutoModelForImageSegmentation
|
7 |
from diffusers import FluxFillPipeline
|
8 |
from PIL import Image, ImageOps
|
9 |
+
|
10 |
+
# from sam2.sam2_image_predictor import SAM2ImagePredictor
|
11 |
+
# import numpy as np
|
12 |
from simple_lama_inpainting import SimpleLama
|
13 |
from contextlib import contextmanager
|
14 |
|
|
|
135 |
|
136 |
|
137 |
def mask_generation(image=None, d=None):
|
138 |
+
return image
|
139 |
+
# # use bfloat16 for the entire notebook
|
140 |
+
# # torch.autocast("cuda", dtype=torch.bfloat16).__enter__()
|
141 |
+
# # # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
|
142 |
+
# # if torch.cuda.get_device_properties(0).major >= 8:
|
143 |
+
# # torch.backends.cuda.matmul.allow_tf32 = True
|
144 |
+
# # torch.backends.cudnn.allow_tf32 = True
|
145 |
+
# d = eval(d) # convert this to dictionary
|
146 |
+
# with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
|
147 |
+
# predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2.1-hiera-large")
|
148 |
+
# predictor.set_image(image)
|
149 |
+
# input_point = np.array(d["input_points"])
|
150 |
+
# input_label = np.array(d["input_labels"])
|
151 |
+
# masks, scores, logits = predictor.predict(
|
152 |
+
# point_coords=input_point,
|
153 |
+
# point_labels=input_label,
|
154 |
+
# multimask_output=True,
|
155 |
+
# )
|
156 |
+
# sorted_ind = np.argsort(scores)[::-1]
|
157 |
+
# masks = masks[sorted_ind]
|
158 |
+
# scores = scores[sorted_ind]
|
159 |
+
# logits = logits[sorted_ind]
|
160 |
+
|
161 |
+
# out = []
|
162 |
+
# for i in range(len(masks)):
|
163 |
+
# m = Image.fromarray(masks[i] * 255).convert("L")
|
164 |
+
# comp = Image.composite(image, m, m)
|
165 |
+
# out.append((comp, f"image {i}"))
|
166 |
+
|
167 |
+
# return out
|
168 |
|
169 |
|
170 |
def erase(image=None, mask=None):
|