jiachenl commited on
Commit
faeeb7e
1 Parent(s): 87d838c

update page

Browse files
Files changed (1) hide show
  1. app.py +17 -28
app.py CHANGED
@@ -48,10 +48,6 @@ output_dir="outputs"
48
  device = 'cuda'
49
  background_list = os.listdir('assets/backgrounds')
50
 
51
- #groundingdino_model = None
52
- #mam_predictor = None
53
- #generator = None
54
-
55
  # initialize MAM
56
  mam_model = networks.get_generator_m2m(seg='sam', m2m='sam_decoder_deep')
57
  mam_model.to(device)
@@ -68,31 +64,9 @@ generator.to(device)
68
 
69
  def run_grounded_sam(input_image, text_prompt, task_type, background_prompt, background_type, box_threshold, text_threshold, iou_threshold, scribble_mode, guidance_mode):
70
 
71
- #global groundingdino_model, sam_predictor, generator
72
-
73
  # make dir
74
  os.makedirs(output_dir, exist_ok=True)
75
 
76
- #if mam_predictor is None:
77
- # initialize MAM
78
- # build model
79
- # mam_model = networks.get_generator_m2m(seg='sam', m2m='sam_decoder_deep')
80
- # mam_model.to(device)
81
-
82
- # load checkpoint
83
- # checkpoint = torch.load(mam_checkpoint, map_location=device)
84
- # mam_model.load_state_dict(utils.remove_prefix_state_dict(checkpoint['state_dict']), strict=True)
85
-
86
- # inference
87
- # mam_model = mam_model.eval()
88
-
89
- #if groundingdino_model is None:
90
- # grounding_dino_model = Model(model_config_path=GROUNDING_DINO_CONFIG_PATH, model_checkpoint_path=GROUNDING_DINO_CHECKPOINT_PATH, device=device)
91
-
92
- #if generator is None:
93
- # generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
94
- # generator.to(device)
95
-
96
  # load image
97
  image_ori = input_image["image"]
98
  scribble = input_image["mask"]
@@ -245,8 +219,20 @@ if __name__ == "__main__":
245
  with block:
246
  gr.Markdown(
247
  """
248
- # Matting Anything Demo
249
- Welcome to the Matting Anything demo and upload your image to get started <br/> You may select different prompt types to get the alpha matte of target instance, and select different backgrounds for image composition.
 
 
 
 
 
 
 
 
 
 
 
 
250
  ## Usage
251
  You may check the <a href='https://www.youtube.com/watch?v=XY2Q0HATGOk'>video</a> to see how to play with the demo, or check the details below.
252
  <details>
@@ -263,6 +249,9 @@ if __name__ == "__main__":
263
  **real_world_sample**: Randomly select a real-world image from `assets/backgrounds` for composition.
264
 
265
  **generated_by_text**: Send background text prompt to create a background image with stable diffusion model in the `Background prompt` box.
 
 
 
266
  </details>
267
  """)
268
 
 
48
  device = 'cuda'
49
  background_list = os.listdir('assets/backgrounds')
50
 
 
 
 
 
51
  # initialize MAM
52
  mam_model = networks.get_generator_m2m(seg='sam', m2m='sam_decoder_deep')
53
  mam_model.to(device)
 
64
 
65
  def run_grounded_sam(input_image, text_prompt, task_type, background_prompt, background_type, box_threshold, text_threshold, iou_threshold, scribble_mode, guidance_mode):
66
 
 
 
67
  # make dir
68
  os.makedirs(output_dir, exist_ok=True)
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  # load image
71
  image_ori = input_image["image"]
72
  scribble = input_image["mask"]
 
219
  with block:
220
  gr.Markdown(
221
  """
222
+ # Matting Anything
223
+
224
+ [Jiachen Li](https://chrisjuniorli.github.io/),
225
+ [Jitesh Jain](https://praeclarumjj3.github.io/),
226
+ [Humphrey Shi](https://www.humphreyshi.com/home)
227
+
228
+ [[`Project page`](https://chrisjuniorli.github.io/project/Matting-Anything/)]
229
+ [[`ArXiv`](https://arxiv.org/abs/2306.05399)]
230
+ [[`Code`](https://github.com/SHI-Labs/Matting-Anything)]
231
+ [[`Video`](https://www.youtube.com/watch?v=XY2Q0HATGOk)]
232
+
233
+ Welcome to the Matting Anything demo and upload your image to get started <br/>
234
+ You may select different prompt types to get the alpha matte of target instance, and select different backgrounds for image composition. The local setup instructions of the demo is available at: https://github.com/SHI-Labs/Matting-Anything
235
+
236
  ## Usage
237
  You may check the <a href='https://www.youtube.com/watch?v=XY2Q0HATGOk'>video</a> to see how to play with the demo, or check the details below.
238
  <details>
 
249
  **real_world_sample**: Randomly select a real-world image from `assets/backgrounds` for composition.
250
 
251
  **generated_by_text**: Send background text prompt to create a background image with stable diffusion model in the `Background prompt` box.
252
+
253
+ **guidance_mode**: Try mask guidance if alpha guidacne didn't return satisfying outputs
254
+
255
  </details>
256
  """)
257