SkalskiP commited on
Commit
aa009f7
1 Parent(s): ec0b3c1

points per side argument added

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. app.py +27 -8
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
- title: Segment Anything Model 2
3
- emoji: 💻
4
- colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
  sdk_version: 4.39.0
 
1
  ---
2
+ title: Segment Anything 2
3
+ emoji: 🦄
4
+ colorFrom: pink
5
  colorTo: red
6
  sdk: gradio
7
  sdk_version: 4.39.0
app.py CHANGED
@@ -36,9 +36,10 @@ dataset to date. SAM 2, trained on this extensive dataset, delivers robust perfo
36
  across diverse tasks and visual domains.
37
  """
38
  EXAMPLES = [
39
- ["tiny", "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"],
40
- ["small", "https://media.roboflow.com/notebooks/examples/dog-3.jpeg"],
41
- ["large", "https://media.roboflow.com/notebooks/examples/dog-3.jpeg"],
 
42
  ]
43
 
44
  DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@@ -46,9 +47,11 @@ MASK_ANNOTATOR = sv.MaskAnnotator(color_lookup=sv.ColorLookup.INDEX)
46
  MODELS = load_models(device=DEVICE)
47
 
48
 
49
- def process(checkpoint_dropdown, image_input) -> Optional[Image.Image]:
50
- sam2_model = MODELS[checkpoint_dropdown]
51
- mask_generator = SAM2AutomaticMaskGenerator(sam2_model)
 
 
52
  image = np.array(image_input.convert("RGB"))
53
  sam_result = mask_generator.generate(image)
54
  detections = sv.Detections.from_sam(sam_result=sam_result)
@@ -64,6 +67,14 @@ with gr.Blocks() as demo:
64
  label="Checkpoint", info="Select a SAM2 checkpoint to use.",
65
  interactive=True
66
  )
 
 
 
 
 
 
 
 
67
  with gr.Row():
68
  with gr.Column():
69
  image_input_component = gr.Image(type='pil', label='Upload image')
@@ -74,14 +85,22 @@ with gr.Blocks() as demo:
74
  gr.Examples(
75
  fn=process,
76
  examples=EXAMPLES,
77
- inputs=[checkpoint_dropdown_component, image_input_component],
 
 
 
 
78
  outputs=[image_output_component],
79
  run_on_click=True
80
  )
81
 
82
  submit_button_component.click(
83
  fn=process,
84
- inputs=[checkpoint_dropdown_component, image_input_component],
 
 
 
 
85
  outputs=[image_output_component]
86
  )
87
 
 
36
  across diverse tasks and visual domains.
37
  """
38
  EXAMPLES = [
39
+ ["tiny", "https://media.roboflow.com/notebooks/examples/dog-2.jpeg", 16],
40
+ ["small", "https://media.roboflow.com/notebooks/examples/dog-3.jpeg", 16],
41
+ ["large", "https://media.roboflow.com/notebooks/examples/dog-3.jpeg", 16],
42
+ ["large", "https://media.roboflow.com/notebooks/examples/dog-3.jpeg", 64],
43
  ]
44
 
45
  DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
47
  MODELS = load_models(device=DEVICE)
48
 
49
 
50
+ def process(checkpoint_dropdown, image_input, points_per_side) -> Optional[Image.Image]:
51
+ model = MODELS[checkpoint_dropdown]
52
+ mask_generator = SAM2AutomaticMaskGenerator(
53
+ model=model,
54
+ points_per_side=points_per_side)
55
  image = np.array(image_input.convert("RGB"))
56
  sam_result = mask_generator.generate(image)
57
  detections = sv.Detections.from_sam(sam_result=sam_result)
 
67
  label="Checkpoint", info="Select a SAM2 checkpoint to use.",
68
  interactive=True
69
  )
70
+ points_per_side_component = gr.Slider(
71
+ minimum=16,
72
+ maximum=64,
73
+ value=16,
74
+ step=16,
75
+ label="Points per side",
76
+ info="the number of points to be sampled along one side of the image."
77
+ )
78
  with gr.Row():
79
  with gr.Column():
80
  image_input_component = gr.Image(type='pil', label='Upload image')
 
85
  gr.Examples(
86
  fn=process,
87
  examples=EXAMPLES,
88
+ inputs=[
89
+ checkpoint_dropdown_component,
90
+ image_input_component,
91
+ points_per_side_component
92
+ ],
93
  outputs=[image_output_component],
94
  run_on_click=True
95
  )
96
 
97
  submit_button_component.click(
98
  fn=process,
99
+ inputs=[
100
+ checkpoint_dropdown_component,
101
+ image_input_component,
102
+ points_per_side_component
103
+ ],
104
  outputs=[image_output_component]
105
  )
106