Spaces:
Running
on
Zero
Running
on
Zero
foivospar
commited on
Commit
·
cdf0274
1
Parent(s):
06c6140
add lcm-lora support
Browse files- app.py +41 -6
- requirements.txt +2 -1
app.py
CHANGED
@@ -5,6 +5,7 @@ from diffusers import (
|
|
5 |
StableDiffusionPipeline,
|
6 |
UNet2DConditionModel,
|
7 |
DPMSolverMultistepScheduler,
|
|
|
8 |
)
|
9 |
|
10 |
from arc2face import CLIPTextModelWrapper, project_face_embs
|
@@ -59,6 +60,22 @@ pipeline = StableDiffusionPipeline.from_pretrained(
|
|
59 |
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
60 |
pipeline = pipeline.to(device)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
63 |
if randomize_seed:
|
64 |
seed = random.randint(0, MAX_SEED)
|
@@ -88,10 +105,17 @@ def get_example():
|
|
88 |
return case
|
89 |
|
90 |
def run_example(img_file):
|
91 |
-
return generate_image(img_file, 25, 3, 23, 2)
|
92 |
|
93 |
@spaces.GPU
|
94 |
-
def generate_image(image_path, num_steps, guidance_scale, seed, num_images, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
if image_path is None:
|
97 |
raise gr.Error(f"Cannot find any input face image! Please upload a face image.")
|
@@ -168,11 +192,16 @@ with gr.Blocks(css=css) as demo:
|
|
168 |
img_file = gr.Image(label="Upload a photo with a face", type="filepath")
|
169 |
|
170 |
submit = gr.Button("Submit", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
with gr.Accordion(open=False, label="Advanced Options"):
|
173 |
num_steps = gr.Slider(
|
174 |
label="Number of sample steps",
|
175 |
-
minimum=
|
176 |
maximum=100,
|
177 |
step=1,
|
178 |
value=25,
|
@@ -182,7 +211,7 @@ with gr.Blocks(css=css) as demo:
|
|
182 |
minimum=0.1,
|
183 |
maximum=10.0,
|
184 |
step=0.1,
|
185 |
-
value=3,
|
186 |
)
|
187 |
num_images = gr.Slider(
|
188 |
label="Number of output images",
|
@@ -211,10 +240,16 @@ with gr.Blocks(css=css) as demo:
|
|
211 |
api_name=False,
|
212 |
).then(
|
213 |
fn=generate_image,
|
214 |
-
inputs=[img_file, num_steps, guidance_scale, seed, num_images],
|
215 |
outputs=[gallery]
|
216 |
)
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
gr.Examples(
|
220 |
examples=get_example(),
|
|
|
5 |
StableDiffusionPipeline,
|
6 |
UNet2DConditionModel,
|
7 |
DPMSolverMultistepScheduler,
|
8 |
+
LCMScheduler
|
9 |
)
|
10 |
|
11 |
from arc2face import CLIPTextModelWrapper, project_face_embs
|
|
|
60 |
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
61 |
pipeline = pipeline.to(device)
|
62 |
|
63 |
+
# load and disable LCM
|
64 |
+
pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
65 |
+
pipeline.disable_lora()
|
66 |
+
|
67 |
+
def toggle_lcm_ui(value):
|
68 |
+
if value:
|
69 |
+
return (
|
70 |
+
gr.update(minimum=1, maximum=20, step=1, value=3),
|
71 |
+
gr.update(minimum=0.1, maximum=10.0, step=0.1, value=1.0),
|
72 |
+
)
|
73 |
+
else:
|
74 |
+
return (
|
75 |
+
gr.update(minimum=1, maximum=100, step=1, value=25),
|
76 |
+
gr.update(minimum=0.1, maximum=10.0, step=0.1, value=3.0),
|
77 |
+
)
|
78 |
+
|
79 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
80 |
if randomize_seed:
|
81 |
seed = random.randint(0, MAX_SEED)
|
|
|
105 |
return case
|
106 |
|
107 |
def run_example(img_file):
|
108 |
+
return generate_image(img_file, 25, 3, 23, 2, False)
|
109 |
|
110 |
@spaces.GPU
|
111 |
+
def generate_image(image_path, num_steps, guidance_scale, seed, num_images, use_lcm, progress=gr.Progress(track_tqdm=True)):
|
112 |
+
|
113 |
+
if use_lcm:
|
114 |
+
pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
|
115 |
+
pipeline.enable_lora()
|
116 |
+
else:
|
117 |
+
pipeline.disable_lora()
|
118 |
+
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
119 |
|
120 |
if image_path is None:
|
121 |
raise gr.Error(f"Cannot find any input face image! Please upload a face image.")
|
|
|
192 |
img_file = gr.Image(label="Upload a photo with a face", type="filepath")
|
193 |
|
194 |
submit = gr.Button("Submit", variant="primary")
|
195 |
+
|
196 |
+
use_lcm = gr.Checkbox(
|
197 |
+
label="Use LCM-LoRA to accelerate sampling", value=False,
|
198 |
+
info="Reduces sampling steps significantly, but may decrease quality.",
|
199 |
+
)
|
200 |
|
201 |
with gr.Accordion(open=False, label="Advanced Options"):
|
202 |
num_steps = gr.Slider(
|
203 |
label="Number of sample steps",
|
204 |
+
minimum=1,
|
205 |
maximum=100,
|
206 |
step=1,
|
207 |
value=25,
|
|
|
211 |
minimum=0.1,
|
212 |
maximum=10.0,
|
213 |
step=0.1,
|
214 |
+
value=3.0,
|
215 |
)
|
216 |
num_images = gr.Slider(
|
217 |
label="Number of output images",
|
|
|
240 |
api_name=False,
|
241 |
).then(
|
242 |
fn=generate_image,
|
243 |
+
inputs=[img_file, num_steps, guidance_scale, seed, num_images, use_lcm],
|
244 |
outputs=[gallery]
|
245 |
)
|
246 |
+
|
247 |
+
use_lcm.input(
|
248 |
+
fn=toggle_lcm_ui,
|
249 |
+
inputs=[use_lcm],
|
250 |
+
outputs=[num_steps, guidance_scale],
|
251 |
+
queue=False,
|
252 |
+
)
|
253 |
|
254 |
gr.Examples(
|
255 |
examples=get_example(),
|
requirements.txt
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
numpy<1.24.0
|
2 |
torch==2.0.1
|
3 |
torchvision==0.15.2
|
4 |
-
diffusers==0.
|
5 |
transformers==4.34.1
|
|
|
6 |
accelerate
|
7 |
insightface
|
8 |
onnxruntime-gpu
|
|
|
1 |
numpy<1.24.0
|
2 |
torch==2.0.1
|
3 |
torchvision==0.15.2
|
4 |
+
diffusers==0.23.0
|
5 |
transformers==4.34.1
|
6 |
+
peft
|
7 |
accelerate
|
8 |
insightface
|
9 |
onnxruntime-gpu
|