Spaces:
openfree
/
Running on Zero

openfree commited on
Commit
275b762
β€’
1 Parent(s): b9cdb0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -46
app.py CHANGED
@@ -74,7 +74,7 @@ def generate_image(
74
 
75
  if opts.seed is None:
76
  opts.seed = torch.Generator(device="cpu").seed()
77
- print(f"Generating '{opts.prompt}' with seed {opts.seed}")
78
  t0 = time.perf_counter()
79
 
80
  use_true_cfg = abs(true_cfg - 1.0) > 1e-2
@@ -86,8 +86,6 @@ def generate_image(
86
  id_embeddings = None
87
  uncond_id_embeddings = None
88
 
89
- print(id_embeddings)
90
-
91
  # prepare input
92
  x = get_noise(
93
  1,
@@ -97,7 +95,6 @@ def generate_image(
97
  dtype=torch.bfloat16,
98
  seed=opts.seed,
99
  )
100
- print(x)
101
  timesteps = get_schedule(
102
  opts.num_steps,
103
  x.shape[-1] * x.shape[-2] // 4,
@@ -142,10 +139,8 @@ def generate_image(
142
 
143
  t1 = time.perf_counter()
144
 
145
- print(f"Done in {t1 - t0:.1f}s.")
146
  # bring into PIL format
147
  x = x.clamp(-1, 1)
148
- # x = embed_watermark(x.float())
149
  x = rearrange(x[0], "c h w -> h w c")
150
 
151
  img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
@@ -167,55 +162,31 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
167
  with gr.Column():
168
  prompt = gr.Textbox(label="Prompt", value="portrait, color, cinematic")
169
  id_image = gr.Image(label="ID Image")
170
- id_weight = gr.Slider(0.0, 3.0, 1, step=0.05, label="id weight")
171
-
172
- width = gr.Slider(256, 1536, 896, step=16, label="Width")
173
- height = gr.Slider(256, 1536, 1152, step=16, label="Height")
174
- num_steps = gr.Slider(1, 20, 20, step=1, label="Number of steps")
175
- start_step = gr.Slider(0, 10, 0, step=1, label="timestep to start inserting ID")
176
- guidance = gr.Slider(1.0, 10.0, 4, step=0.1, label="Guidance")
177
- seed = gr.Textbox(-1, label="Seed (-1 for random)")
178
- max_sequence_length = gr.Slider(128, 512, 128, step=128,
179
- label="max_sequence_length for prompt (T5), small will be faster")
180
-
181
- with gr.Accordion("Advanced Options (True CFG, true_cfg_scale=1 means use fake CFG, >1 means use true CFG, if using true CFG, we recommend set the guidance scale to 1)", open=False): # noqa E501
182
- neg_prompt = gr.Textbox(
183
- label="Negative Prompt",
184
- value="bad quality, worst quality, text, signature, watermark, extra limbs")
185
- true_cfg = gr.Slider(1.0, 10.0, 1, step=0.1, label="true CFG scale")
186
- timestep_to_start_cfg = gr.Slider(0, 20, 1, step=1, label="timestep to start cfg", visible=args.dev)
187
-
188
  generate_btn = gr.Button("Generate")
189
 
190
  with gr.Column():
191
  output_image = gr.Image(label="Generated Image")
192
- seed_output = gr.Textbox(label="Used Seed")
193
- intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
194
-
195
 
196
  with gr.Row():
197
  with gr.Column():
198
  gr.Markdown("## Examples")
199
 
200
- # λͺ¨λ“  μ˜ˆμ‹œλ₯Ό ν•˜λ‚˜μ˜ 리슀트둜 ν•©μΉ©λ‹ˆλ‹€
201
  all_examples = [
202
- ['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png', 4, 4, 2680261499100305976, 1],
203
- ['portrait, side view', 'example_inputs/liuyifei.png', 4, 4, 1205240166692517553, 1],
204
- ['white-haired woman with vr technology atmosphere, revolutionary exceptional magnum with remarkable details', 'example_inputs/liuyifei.png', 4, 4, 6349424134217931066, 1],
205
- ['a young child is eating Icecream', 'example_inputs/liuyifei.png', 4, 4, 10606046113565776207, 1],
206
- ['a man is holding a sign with text \"PuLID for FLUX\", winter, snowing, top of the mountain', 'example_inputs/pengwei.jpg', 4, 4, 2410129802683836089, 1],
207
- ['portrait, candle light', 'example_inputs/pengwei.jpg', 4, 4, 17522759474323955700, 1],
208
- ['profile shot dark photo of a 25-year-old male with smoke escaping from his mouth, the backlit smoke gives the image an ephemeral quality, natural face, natural eyebrows, natural skin texture, award winning photo, highly detailed face, atmospheric lighting, film grain, monochrome', 'example_inputs/pengwei.jpg', 4, 4, 17733156847328193625, 1],
209
- ['American Comics, 1boy', 'example_inputs/pengwei.jpg', 1, 4, 13223174453874179686, 1],
210
- ['portrait, pixar', 'example_inputs/pengwei.jpg', 1, 4, 9445036702517583939, 1],
211
- ['portrait, made of ice sculpture', 'example_inputs/lecun.jpg', 1, 1, 3811899118709451814, 5],
212
  ]
213
 
214
- # μ˜ˆμ‹œ 이미지와 μΊ‘μ…˜μ„ μ€€λΉ„ν•©λ‹ˆλ‹€
215
  example_images = [example[1] for example in all_examples]
216
  example_captions = [example[0] for example in all_examples]
217
 
218
- # Gallery μ»΄ν¬λ„ŒνŠΈλ₯Ό μ‚¬μš©ν•˜μ—¬ κ·Έλ¦¬λ“œ ν˜•νƒœλ‘œ μ˜ˆμ‹œλ“€μ„ ν‘œμ‹œν•©λ‹ˆλ‹€
219
  gallery = gr.Gallery(
220
  value=list(zip(example_images, example_captions)),
221
  label="Example Gallery",
@@ -227,21 +198,33 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
227
  height="auto"
228
  )
229
 
230
- # μ˜ˆμ‹œ 클릭 μ‹œ μž…λ ₯ ν•„λ“œμ— 값을 μ±„μš°λŠ” ν•¨μˆ˜
231
  def fill_example(evt: gr.SelectData):
232
- return [all_examples[evt.index][i] for i in [0, 1, 2, 3, 4, 5]]
233
 
234
  gallery.select(
235
  fill_example,
236
  None,
237
- [prompt, id_image, start_step, guidance, seed, true_cfg],
238
  )
239
 
240
  generate_btn.click(
241
  fn=generate_image,
242
- inputs=[width, height, num_steps, start_step, guidance, seed, prompt, id_image, id_weight, neg_prompt,
243
- true_cfg, timestep_to_start_cfg, max_sequence_length],
244
- outputs=[output_image, seed_output, intermediate_output],
 
 
 
 
 
 
 
 
 
 
 
 
 
245
  )
246
 
247
  return demo
 
74
 
75
  if opts.seed is None:
76
  opts.seed = torch.Generator(device="cpu").seed()
77
+
78
  t0 = time.perf_counter()
79
 
80
  use_true_cfg = abs(true_cfg - 1.0) > 1e-2
 
86
  id_embeddings = None
87
  uncond_id_embeddings = None
88
 
 
 
89
  # prepare input
90
  x = get_noise(
91
  1,
 
95
  dtype=torch.bfloat16,
96
  seed=opts.seed,
97
  )
 
98
  timesteps = get_schedule(
99
  opts.num_steps,
100
  x.shape[-1] * x.shape[-2] // 4,
 
139
 
140
  t1 = time.perf_counter()
141
 
 
142
  # bring into PIL format
143
  x = x.clamp(-1, 1)
 
144
  x = rearrange(x[0], "c h w -> h w c")
145
 
146
  img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
 
162
  with gr.Column():
163
  prompt = gr.Textbox(label="Prompt", value="portrait, color, cinematic")
164
  id_image = gr.Image(label="ID Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  generate_btn = gr.Button("Generate")
166
 
167
  with gr.Column():
168
  output_image = gr.Image(label="Generated Image")
 
 
 
169
 
170
  with gr.Row():
171
  with gr.Column():
172
  gr.Markdown("## Examples")
173
 
 
174
  all_examples = [
175
+ ['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png'],
176
+ ['portrait, side view', 'example_inputs/liuyifei.png'],
177
+ ['white-haired woman with vr technology atmosphere', 'example_inputs/liuyifei.png'],
178
+ ['a young child is eating Icecream', 'example_inputs/liuyifei.png'],
179
+ ['a man is holding a sign with text \"PuLID for FLUX\", winter, snowing', 'example_inputs/pengwei.jpg'],
180
+ ['portrait, candle light', 'example_inputs/pengwei.jpg'],
181
+ ['profile shot dark photo of a 25-year-old male with smoke', 'example_inputs/pengwei.jpg'],
182
+ ['American Comics, 1boy', 'example_inputs/pengwei.jpg'],
183
+ ['portrait, pixar', 'example_inputs/pengwei.jpg'],
184
+ ['portrait, made of ice sculpture', 'example_inputs/lecun.jpg'],
185
  ]
186
 
 
187
  example_images = [example[1] for example in all_examples]
188
  example_captions = [example[0] for example in all_examples]
189
 
 
190
  gallery = gr.Gallery(
191
  value=list(zip(example_images, example_captions)),
192
  label="Example Gallery",
 
198
  height="auto"
199
  )
200
 
 
201
  def fill_example(evt: gr.SelectData):
202
+ return [all_examples[evt.index][i] for i in [0, 1]]
203
 
204
  gallery.select(
205
  fill_example,
206
  None,
207
+ [prompt, id_image],
208
  )
209
 
210
  generate_btn.click(
211
  fn=generate_image,
212
+ inputs=[
213
+ gr.Slider(256, 1536, 896, step=16, visible=False), # width
214
+ gr.Slider(256, 1536, 1152, step=16, visible=False), # height
215
+ gr.Slider(1, 20, 20, step=1, visible=False), # num_steps
216
+ gr.Slider(0, 10, 0, step=1, visible=False), # start_step
217
+ gr.Slider(1.0, 10.0, 4, step=0.1, visible=False), # guidance
218
+ gr.Textbox(-1, visible=False), # seed
219
+ prompt,
220
+ id_image,
221
+ gr.Slider(0.0, 3.0, 1, step=0.05, visible=False), # id_weight
222
+ gr.Textbox("bad quality, worst quality, text, signature, watermark, extra limbs", visible=False), # neg_prompt
223
+ gr.Slider(1.0, 10.0, 1, step=0.1, visible=False), # true_cfg
224
+ gr.Slider(0, 20, 1, step=1, visible=False), # timestep_to_start_cfg
225
+ gr.Slider(128, 512, 128, step=128, visible=False), # max_sequence_length
226
+ ],
227
+ outputs=[output_image],
228
  )
229
 
230
  return demo