Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -121,7 +121,7 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
|
|
121 |
global young
|
122 |
global pointy
|
123 |
global wavy
|
124 |
-
global
|
125 |
|
126 |
original_weights = network.proj.clone()
|
127 |
|
@@ -132,10 +132,10 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
|
|
132 |
young_pad = torch.cat((young, padding), 1)
|
133 |
pointy_pad = torch.cat((pointy, padding), 1)
|
134 |
wavy_pad = torch.cat((wavy, padding), 1)
|
135 |
-
|
136 |
|
137 |
|
138 |
-
edited_weights = original_weights+a1*1e6*young_pad+a2*1e6*pointy_pad+a3*1e6*wavy_pad+a4*2e6*
|
139 |
|
140 |
generator = generator.manual_seed(seed)
|
141 |
latents = torch.randn(
|
@@ -200,7 +200,7 @@ def sample_then_run():
|
|
200 |
negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
|
201 |
seed = 5
|
202 |
cfg = 3.0
|
203 |
-
steps =
|
204 |
image = inference( prompt, negative_prompt, cfg, steps, seed)
|
205 |
torch.save(network.proj, "model.pt" )
|
206 |
return image, "model.pt"
|
@@ -209,7 +209,7 @@ def sample_then_run():
|
|
209 |
global young
|
210 |
global pointy
|
211 |
global wavy
|
212 |
-
global
|
213 |
|
214 |
young = get_direction(df, "Young", pinverse, 1000, device)
|
215 |
young = debias(young, "Male", df, pinverse, device)
|
@@ -235,20 +235,20 @@ wavy = debias(wavy, "Chubby", df, pinverse, device)
|
|
235 |
wavy = debias(wavy, "Heavy_Makeup", df, pinverse, device)
|
236 |
|
237 |
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
|
253 |
|
254 |
|
@@ -343,7 +343,7 @@ def run_inversion(dict, pcs, epochs, weight_decay,lr):
|
|
343 |
negative_prompt = "low quality, blurry, unfinished, nudity"
|
344 |
seed = 5
|
345 |
cfg = 3.0
|
346 |
-
steps =
|
347 |
image = inference( prompt, negative_prompt, cfg, steps, seed)
|
348 |
torch.save(network.proj, "model.pt" )
|
349 |
return image, "model.pt"
|
@@ -368,7 +368,7 @@ def file_upload(file):
|
|
368 |
unet, _, _, _, _ = load_models(device)
|
369 |
|
370 |
|
371 |
-
network = LoRAw2w( proj, mean, std, v[:, :
|
372 |
unet,
|
373 |
rank=1,
|
374 |
multiplier=1.0,
|
@@ -381,7 +381,7 @@ def file_upload(file):
|
|
381 |
negative_prompt = "low quality, blurry, unfinished, nudity"
|
382 |
seed = 5
|
383 |
cfg = 3.0
|
384 |
-
steps =
|
385 |
image = inference( prompt, negative_prompt, cfg, steps, seed)
|
386 |
return image
|
387 |
|
|
|
121 |
global young
|
122 |
global pointy
|
123 |
global wavy
|
124 |
+
global thick
|
125 |
|
126 |
original_weights = network.proj.clone()
|
127 |
|
|
|
132 |
young_pad = torch.cat((young, padding), 1)
|
133 |
pointy_pad = torch.cat((pointy, padding), 1)
|
134 |
wavy_pad = torch.cat((wavy, padding), 1)
|
135 |
+
thick_pad = torch.cat((thick, padding), 1)
|
136 |
|
137 |
|
138 |
+
edited_weights = original_weights+a1*1e6*young_pad+a2*1e6*pointy_pad+a3*1e6*wavy_pad+a4*2e6*thick_pad
|
139 |
|
140 |
generator = generator.manual_seed(seed)
|
141 |
latents = torch.randn(
|
|
|
200 |
negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
|
201 |
seed = 5
|
202 |
cfg = 3.0
|
203 |
+
steps = 25
|
204 |
image = inference( prompt, negative_prompt, cfg, steps, seed)
|
205 |
torch.save(network.proj, "model.pt" )
|
206 |
return image, "model.pt"
|
|
|
209 |
global young
|
210 |
global pointy
|
211 |
global wavy
|
212 |
+
global thick
|
213 |
|
214 |
young = get_direction(df, "Young", pinverse, 1000, device)
|
215 |
young = debias(young, "Male", df, pinverse, device)
|
|
|
235 |
wavy = debias(wavy, "Heavy_Makeup", df, pinverse, device)
|
236 |
|
237 |
|
238 |
+
thick = get_direction(df, "Bushy_Eyebrows", pinverse, 1000, device)
|
239 |
+
thick = debias(thick, "Male", df, pinverse, device)
|
240 |
+
thick = debias(thick, "Young", df, pinverse, device)
|
241 |
+
thick = debias(thick, "Pointy_Nose", df, pinverse, device)
|
242 |
+
thick = debias(thick, "Wavy_Hair", df, pinverse, device)
|
243 |
+
thick = debias(thick, "Mustache", df, pinverse, device)
|
244 |
+
thick = debias(thick, "No_Beard", df, pinverse, device)
|
245 |
+
thick = debias(thick, "Sideburns", df, pinverse, device)
|
246 |
+
thick = debias(thick, "Big_Nose", df, pinverse, device)
|
247 |
+
thick = debias(thick, "Big_Lips", df, pinverse, device)
|
248 |
+
thick = debias(thick, "Black_Hair", df, pinverse, device)
|
249 |
+
thick = debias(thick, "Brown_Hair", df, pinverse, device)
|
250 |
+
thick = debias(thick, "Pale_Skin", df, pinverse, device)
|
251 |
+
thick = debias(thick, "Heavy_Makeup", df, pinverse, device)
|
252 |
|
253 |
|
254 |
|
|
|
343 |
negative_prompt = "low quality, blurry, unfinished, nudity"
|
344 |
seed = 5
|
345 |
cfg = 3.0
|
346 |
+
steps = 25
|
347 |
image = inference( prompt, negative_prompt, cfg, steps, seed)
|
348 |
torch.save(network.proj, "model.pt" )
|
349 |
return image, "model.pt"
|
|
|
368 |
unet, _, _, _, _ = load_models(device)
|
369 |
|
370 |
|
371 |
+
network = LoRAw2w( proj, mean, std, v[:, :10000],
|
372 |
unet,
|
373 |
rank=1,
|
374 |
multiplier=1.0,
|
|
|
381 |
negative_prompt = "low quality, blurry, unfinished, nudity"
|
382 |
seed = 5
|
383 |
cfg = 3.0
|
384 |
+
steps = 25
|
385 |
image = inference( prompt, negative_prompt, cfg, steps, seed)
|
386 |
return image
|
387 |
|