Update app.py
Browse files
app.py
CHANGED
@@ -74,7 +74,7 @@ def generate_image(
|
|
74 |
|
75 |
if opts.seed is None:
|
76 |
opts.seed = torch.Generator(device="cpu").seed()
|
77 |
-
|
78 |
t0 = time.perf_counter()
|
79 |
|
80 |
use_true_cfg = abs(true_cfg - 1.0) > 1e-2
|
@@ -86,8 +86,6 @@ def generate_image(
|
|
86 |
id_embeddings = None
|
87 |
uncond_id_embeddings = None
|
88 |
|
89 |
-
print(id_embeddings)
|
90 |
-
|
91 |
# prepare input
|
92 |
x = get_noise(
|
93 |
1,
|
@@ -97,7 +95,6 @@ def generate_image(
|
|
97 |
dtype=torch.bfloat16,
|
98 |
seed=opts.seed,
|
99 |
)
|
100 |
-
print(x)
|
101 |
timesteps = get_schedule(
|
102 |
opts.num_steps,
|
103 |
x.shape[-1] * x.shape[-2] // 4,
|
@@ -142,10 +139,8 @@ def generate_image(
|
|
142 |
|
143 |
t1 = time.perf_counter()
|
144 |
|
145 |
-
print(f"Done in {t1 - t0:.1f}s.")
|
146 |
# bring into PIL format
|
147 |
x = x.clamp(-1, 1)
|
148 |
-
# x = embed_watermark(x.float())
|
149 |
x = rearrange(x[0], "c h w -> h w c")
|
150 |
|
151 |
img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
|
@@ -178,7 +173,7 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
178 |
max_sequence_length = gr.Slider(128, 512, 128, step=128,
|
179 |
label="max_sequence_length for prompt (T5), small will be faster")
|
180 |
|
181 |
-
with gr.Accordion("Advanced Options
|
182 |
neg_prompt = gr.Textbox(
|
183 |
label="Negative Prompt",
|
184 |
value="bad quality, worst quality, text, signature, watermark, extra limbs")
|
@@ -192,12 +187,10 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
192 |
seed_output = gr.Textbox(label="Used Seed")
|
193 |
intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
|
194 |
|
195 |
-
|
196 |
with gr.Row():
|
197 |
with gr.Column():
|
198 |
gr.Markdown("## Examples")
|
199 |
|
200 |
-
# λͺ¨λ μμλ₯Ό νλμ 리μ€νΈλ‘ ν©μΉ©λλ€
|
201 |
all_examples = [
|
202 |
['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png', 4, 4, 2680261499100305976, 1],
|
203 |
['portrait, side view', 'example_inputs/liuyifei.png', 4, 4, 1205240166692517553, 1],
|
@@ -211,11 +204,9 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
211 |
['portrait, made of ice sculpture', 'example_inputs/lecun.jpg', 1, 1, 3811899118709451814, 5],
|
212 |
]
|
213 |
|
214 |
-
# μμ μ΄λ―Έμ§μ μΊ‘μ
μ μ€λΉν©λλ€
|
215 |
example_images = [example[1] for example in all_examples]
|
216 |
example_captions = [example[0] for example in all_examples]
|
217 |
|
218 |
-
# Gallery μ»΄ν¬λνΈλ₯Ό μ¬μ©νμ¬ κ·Έλ¦¬λ ννλ‘ μμλ€μ νμν©λλ€
|
219 |
gallery = gr.Gallery(
|
220 |
value=list(zip(example_images, example_captions)),
|
221 |
label="Example Gallery",
|
@@ -227,7 +218,6 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
227 |
height="auto"
|
228 |
)
|
229 |
|
230 |
-
# μμ ν΄λ¦ μ μ
λ ₯ νλμ κ°μ μ±μ°λ ν¨μ
|
231 |
def fill_example(evt: gr.SelectData):
|
232 |
return [all_examples[evt.index][i] for i in [0, 1, 2, 3, 4, 5]]
|
233 |
|
|
|
74 |
|
75 |
if opts.seed is None:
|
76 |
opts.seed = torch.Generator(device="cpu").seed()
|
77 |
+
|
78 |
t0 = time.perf_counter()
|
79 |
|
80 |
use_true_cfg = abs(true_cfg - 1.0) > 1e-2
|
|
|
86 |
id_embeddings = None
|
87 |
uncond_id_embeddings = None
|
88 |
|
|
|
|
|
89 |
# prepare input
|
90 |
x = get_noise(
|
91 |
1,
|
|
|
95 |
dtype=torch.bfloat16,
|
96 |
seed=opts.seed,
|
97 |
)
|
|
|
98 |
timesteps = get_schedule(
|
99 |
opts.num_steps,
|
100 |
x.shape[-1] * x.shape[-2] // 4,
|
|
|
139 |
|
140 |
t1 = time.perf_counter()
|
141 |
|
|
|
142 |
# bring into PIL format
|
143 |
x = x.clamp(-1, 1)
|
|
|
144 |
x = rearrange(x[0], "c h w -> h w c")
|
145 |
|
146 |
img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
|
|
|
173 |
max_sequence_length = gr.Slider(128, 512, 128, step=128,
|
174 |
label="max_sequence_length for prompt (T5), small will be faster")
|
175 |
|
176 |
+
with gr.Accordion("Advanced Options", open=False):
|
177 |
neg_prompt = gr.Textbox(
|
178 |
label="Negative Prompt",
|
179 |
value="bad quality, worst quality, text, signature, watermark, extra limbs")
|
|
|
187 |
seed_output = gr.Textbox(label="Used Seed")
|
188 |
intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
|
189 |
|
|
|
190 |
with gr.Row():
|
191 |
with gr.Column():
|
192 |
gr.Markdown("## Examples")
|
193 |
|
|
|
194 |
all_examples = [
|
195 |
['a woman holding sign with glowing green text \"PuLID for FLUX\"', 'example_inputs/liuyifei.png', 4, 4, 2680261499100305976, 1],
|
196 |
['portrait, side view', 'example_inputs/liuyifei.png', 4, 4, 1205240166692517553, 1],
|
|
|
204 |
['portrait, made of ice sculpture', 'example_inputs/lecun.jpg', 1, 1, 3811899118709451814, 5],
|
205 |
]
|
206 |
|
|
|
207 |
example_images = [example[1] for example in all_examples]
|
208 |
example_captions = [example[0] for example in all_examples]
|
209 |
|
|
|
210 |
gallery = gr.Gallery(
|
211 |
value=list(zip(example_images, example_captions)),
|
212 |
label="Example Gallery",
|
|
|
218 |
height="auto"
|
219 |
)
|
220 |
|
|
|
221 |
def fill_example(evt: gr.SelectData):
|
222 |
return [all_examples[evt.index][i] for i in [0, 1, 2, 3, 4, 5]]
|
223 |
|