MakiAi commited on
Commit
666dde7
1 Parent(s): 6b42d57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -26
app.py CHANGED
@@ -2,25 +2,26 @@ import torch
2
  import spaces
3
  import gradio as gr
4
  from diffusers import FluxInpaintPipeline
 
 
5
 
6
  MARKDOWN = """
7
- # FLUX.1 Inpainting 🔥
8
  Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) team for
9
  creating this amazing model, and a big thanks to [Gothos](https://github.com/Gothos)
10
  for taking it to the next level by enabling inpainting with the FLUX.
11
  """
12
 
 
13
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
  pipe = FluxInpaintPipeline.from_pretrained(
16
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
17
 
18
-
19
  @spaces.GPU()
20
- def process(input_image_editor, uploaded_mask, input_text, strength, progress=gr.Progress(track_tqdm=True)):
21
  if not input_text:
22
- gr.Info("Please enter a text prompt.")
23
- return None
24
 
25
  image = input_image_editor['background']
26
  if uploaded_mask is None:
@@ -29,29 +30,34 @@ def process(input_image_editor, uploaded_mask, input_text, strength, progress=gr
29
  mask_image = uploaded_mask
30
 
31
  if not image:
32
- gr.Info("Please upload an image.")
33
- return None
34
 
35
  if not mask_image:
36
- gr.Info("Please draw or upload a mask on the image.")
37
- return None
38
 
39
  width, height = image.size
40
 
41
- return pipe(
 
 
 
 
42
  prompt=input_text,
43
  image=image,
44
  mask_image=mask_image,
45
  width=width,
46
  height=height,
47
- strength=strength
 
 
48
  ).images[0]
49
 
 
50
 
51
- with gr.Blocks() as demo:
52
  gr.Markdown(MARKDOWN)
53
  with gr.Row():
54
- with gr.Column():
55
  input_image_editor_component = gr.ImageEditor(
56
  label='Image',
57
  type='pil',
@@ -66,20 +72,38 @@ with gr.Blocks() as demo:
66
  placeholder="Enter your prompt",
67
  container=False,
68
  )
69
- strength_slider = gr.Slider(
70
- minimum=0.0,
71
- maximum=1.0,
72
- value=0.7,
73
- step=0.01,
74
- label="Strength"
75
- )
76
- with gr.Accordion("Upload a mask", open = False):
77
- uploaded_mask_component = gr.Image(label = "Already made mask (black pixels will be preserved, white pixels will be redrawn)", sources = ["upload"], type = "pil")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  submit_button_component = gr.Button(
79
- value='Submit', variant='primary')
80
- with gr.Column():
81
  output_image_component = gr.Image(
82
  type='pil', image_mode='RGB', label='Generated image')
 
 
 
 
83
 
84
  submit_button_component.click(
85
  fn=process,
@@ -87,10 +111,15 @@ with gr.Blocks() as demo:
87
  input_image_editor_component,
88
  uploaded_mask_component,
89
  input_text_component,
90
- strength_slider
 
 
 
91
  ],
92
  outputs=[
93
- output_image_component
 
 
94
  ]
95
  )
96
 
 
2
  import spaces
3
  import gradio as gr
4
  from diffusers import FluxInpaintPipeline
5
+ import random
6
+ import numpy as np
7
 
8
  MARKDOWN = """
9
+ # FLUX.1 Inpainting 🎨
10
  Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) team for
11
  creating this amazing model, and a big thanks to [Gothos](https://github.com/Gothos)
12
  for taking it to the next level by enabling inpainting with the FLUX.
13
  """
14
 
15
+ MAX_SEED = np.iinfo(np.int32).max
16
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
17
 
18
  pipe = FluxInpaintPipeline.from_pretrained(
19
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
20
 
 
21
  @spaces.GPU()
22
+ def process(input_image_editor, uploaded_mask, input_text, strength, seed, randomize_seed, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
23
  if not input_text:
24
+ raise gr.Error("Please enter a text prompt.")
 
25
 
26
  image = input_image_editor['background']
27
  if uploaded_mask is None:
 
30
  mask_image = uploaded_mask
31
 
32
  if not image:
33
+ raise gr.Error("Please upload an image.")
 
34
 
35
  if not mask_image:
36
+ raise gr.Error("Please draw or upload a mask on the image.")
 
37
 
38
  width, height = image.size
39
 
40
+ if randomize_seed:
41
+ seed = random.randint(0, MAX_SEED)
42
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
43
+
44
+ result = pipe(
45
  prompt=input_text,
46
  image=image,
47
  mask_image=mask_image,
48
  width=width,
49
  height=height,
50
+ strength=strength,
51
+ num_inference_steps=num_inference_steps,
52
+ generator=generator
53
  ).images[0]
54
 
55
+ return result, mask_image, seed
56
 
57
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
58
  gr.Markdown(MARKDOWN)
59
  with gr.Row():
60
+ with gr.Column(scale=1):
61
  input_image_editor_component = gr.ImageEditor(
62
  label='Image',
63
  type='pil',
 
72
  placeholder="Enter your prompt",
73
  container=False,
74
  )
75
+ with gr.Accordion("Advanced Settings", open=False):
76
+ strength_slider = gr.Slider(
77
+ minimum=0.0,
78
+ maximum=1.0,
79
+ value=0.7,
80
+ step=0.01,
81
+ label="Strength"
82
+ )
83
+ seed_number = gr.Number(
84
+ label="Seed",
85
+ value=42,
86
+ precision=0
87
+ )
88
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
89
+ num_inference_steps = gr.Slider(
90
+ minimum=1,
91
+ maximum=100,
92
+ value=30,
93
+ step=1,
94
+ label="Number of inference steps"
95
+ )
96
+ with gr.Accordion("Upload a mask", open=False):
97
+ uploaded_mask_component = gr.Image(label="Already made mask (black pixels will be preserved, white pixels will be redrawn)", sources=["upload"], type="pil")
98
  submit_button_component = gr.Button(
99
+ value='Generate', variant='primary')
100
+ with gr.Column(scale=1):
101
  output_image_component = gr.Image(
102
  type='pil', image_mode='RGB', label='Generated image')
103
+ with gr.Accordion("Debug Info", open=False):
104
+ output_mask_component = gr.Image(
105
+ type='pil', image_mode='RGB', label='Input mask')
106
+ output_seed = gr.Number(label="Used Seed")
107
 
108
  submit_button_component.click(
109
  fn=process,
 
111
  input_image_editor_component,
112
  uploaded_mask_component,
113
  input_text_component,
114
+ strength_slider,
115
+ seed_number,
116
+ randomize_seed,
117
+ num_inference_steps
118
  ],
119
  outputs=[
120
+ output_image_component,
121
+ output_mask_component,
122
+ output_seed
123
  ]
124
  )
125