Spaces:
openfree
/
Running on Zero

openfree commited on
Commit
d559f02
β€’
1 Parent(s): 6a93034

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -12
app.py CHANGED
@@ -74,7 +74,7 @@ def generate_image(
74
 
75
  if opts.seed is None:
76
  opts.seed = torch.Generator(device="cpu").seed()
77
- print(f"Generating '{opts.prompt}' with seed {opts.seed}")
78
  t0 = time.perf_counter()
79
 
80
  use_true_cfg = abs(true_cfg - 1.0) > 1e-2
@@ -86,8 +86,6 @@ def generate_image(
86
  id_embeddings = None
87
  uncond_id_embeddings = None
88
 
89
- print(id_embeddings)
90
-
91
  # prepare input
92
  x = get_noise(
93
  1,
@@ -97,7 +95,6 @@ def generate_image(
97
  dtype=torch.bfloat16,
98
  seed=opts.seed,
99
  )
100
- print(x)
101
  timesteps = get_schedule(
102
  opts.num_steps,
103
  x.shape[-1] * x.shape[-2] // 4,
@@ -142,10 +139,8 @@ def generate_image(
142
 
143
  t1 = time.perf_counter()
144
 
145
- print(f"Done in {t1 - t0:.1f}s.")
146
  # bring into PIL format
147
  x = x.clamp(-1, 1)
148
- # x = embed_watermark(x.float())
149
  x = rearrange(x[0], "c h w -> h w c")
150
 
151
  img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
@@ -178,7 +173,7 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
178
  max_sequence_length = gr.Slider(128, 512, 128, step=128,
179
  label="max_sequence_length for prompt (T5), small will be faster")
180
 
181
- with gr.Accordion("Advanced Options (True CFG, true_cfg_scale=1 means use fake CFG, >1 means use true CFG, if using true CFG, we recommend set the guidance scale to 1)", open=False): # noqa E501
182
  neg_prompt = gr.Textbox(
183
  label="Negative Prompt",
184
  value="bad quality, worst quality, text, signature, watermark, extra limbs")
@@ -192,12 +187,10 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
192
  seed_output = gr.Textbox(label="Used Seed")
193
  intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
194
 
195
-
196
  with gr.Row():
197
  with gr.Column():
198
  gr.Markdown("## Examples")
199
 
200
- # λͺ¨λ“  μ˜ˆμ‹œλ₯Ό ν•˜λ‚˜μ˜ 리슀트둜 ν•©μΉ©λ‹ˆλ‹€
201
  all_examples = [
202
  ['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png', 4, 4, 2680261499100305976, 1],
203
  ['portrait, side view', 'example_inputs/liuyifei.png', 4, 4, 1205240166692517553, 1],
@@ -211,11 +204,9 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
211
  ['portrait, made of ice sculpture', 'example_inputs/lecun.jpg', 1, 1, 3811899118709451814, 5],
212
  ]
213
 
214
- # μ˜ˆμ‹œ 이미지와 μΊ‘μ…˜μ„ μ€€λΉ„ν•©λ‹ˆλ‹€
215
  example_images = [example[1] for example in all_examples]
216
  example_captions = [example[0] for example in all_examples]
217
 
218
- # Gallery μ»΄ν¬λ„ŒνŠΈλ₯Ό μ‚¬μš©ν•˜μ—¬ κ·Έλ¦¬λ“œ ν˜•νƒœλ‘œ μ˜ˆμ‹œλ“€μ„ ν‘œμ‹œν•©λ‹ˆλ‹€
219
  gallery = gr.Gallery(
220
  value=list(zip(example_images, example_captions)),
221
  label="Example Gallery",
@@ -227,7 +218,6 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
227
  height="auto"
228
  )
229
 
230
- # μ˜ˆμ‹œ 클릭 μ‹œ μž…λ ₯ ν•„λ“œμ— 값을 μ±„μš°λŠ” ν•¨μˆ˜
231
  def fill_example(evt: gr.SelectData):
232
  return [all_examples[evt.index][i] for i in [0, 1, 2, 3, 4, 5]]
233
 
 
74
 
75
  if opts.seed is None:
76
  opts.seed = torch.Generator(device="cpu").seed()
77
+
78
  t0 = time.perf_counter()
79
 
80
  use_true_cfg = abs(true_cfg - 1.0) > 1e-2
 
86
  id_embeddings = None
87
  uncond_id_embeddings = None
88
 
 
 
89
  # prepare input
90
  x = get_noise(
91
  1,
 
95
  dtype=torch.bfloat16,
96
  seed=opts.seed,
97
  )
 
98
  timesteps = get_schedule(
99
  opts.num_steps,
100
  x.shape[-1] * x.shape[-2] // 4,
 
139
 
140
  t1 = time.perf_counter()
141
 
 
142
  # bring into PIL format
143
  x = x.clamp(-1, 1)
 
144
  x = rearrange(x[0], "c h w -> h w c")
145
 
146
  img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
 
173
  max_sequence_length = gr.Slider(128, 512, 128, step=128,
174
  label="max_sequence_length for prompt (T5), small will be faster")
175
 
176
+ with gr.Accordion("Advanced Options", open=False):
177
  neg_prompt = gr.Textbox(
178
  label="Negative Prompt",
179
  value="bad quality, worst quality, text, signature, watermark, extra limbs")
 
187
  seed_output = gr.Textbox(label="Used Seed")
188
  intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
189
 
 
190
  with gr.Row():
191
  with gr.Column():
192
  gr.Markdown("## Examples")
193
 
 
194
  all_examples = [
195
  ['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png', 4, 4, 2680261499100305976, 1],
196
  ['portrait, side view', 'example_inputs/liuyifei.png', 4, 4, 1205240166692517553, 1],
 
204
  ['portrait, made of ice sculpture', 'example_inputs/lecun.jpg', 1, 1, 3811899118709451814, 5],
205
  ]
206
 
 
207
  example_images = [example[1] for example in all_examples]
208
  example_captions = [example[0] for example in all_examples]
209
 
 
210
  gallery = gr.Gallery(
211
  value=list(zip(example_images, example_captions)),
212
  label="Example Gallery",
 
218
  height="auto"
219
  )
220
 
 
221
  def fill_example(evt: gr.SelectData):
222
  return [all_examples[evt.index][i] for i in [0, 1, 2, 3, 4, 5]]
223