Spaces:
Sleeping
Sleeping
Commit
·
0a8222c
1
Parent(s):
165cd0f
update
Browse files
app.py
CHANGED
@@ -59,13 +59,13 @@ m1_model_path = 'JingyeChen22/textdiffuser2_layout_planner'
|
|
59 |
|
60 |
m1_tokenizer = AutoTokenizer.from_pretrained(m1_model_path, use_fast=False)
|
61 |
m1_model = AutoModelForCausalLM.from_pretrained(
|
62 |
-
m1_model_path, low_cpu_mem_usage=True
|
63 |
).cuda()
|
64 |
|
65 |
#### import diffusion models
|
66 |
text_encoder = CLIPTextModel.from_pretrained(
|
67 |
'JingyeChen22/textdiffuser2-full-ft', subfolder="text_encoder", ignore_mismatched_sizes=True
|
68 |
-
).cuda()
|
69 |
tokenizer = CLIPTokenizer.from_pretrained(
|
70 |
'runwayml/stable-diffusion-v1-5', subfolder="tokenizer"
|
71 |
)
|
@@ -83,20 +83,20 @@ for c in alphabet:
|
|
83 |
print(len(tokenizer))
|
84 |
print('***************')
|
85 |
|
86 |
-
vae = AutoencoderKL.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="vae").cuda()
|
87 |
unet = UNet2DConditionModel.from_pretrained(
|
88 |
'JingyeChen22/textdiffuser2-full-ft', subfolder="unet"
|
89 |
-
).cuda()
|
90 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
91 |
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
|
101 |
|
102 |
#### for interactive
|
@@ -339,11 +339,11 @@ def text_to_image(prompt,keywords,radio,slider_step,slider_guidance,slider_batch
|
|
339 |
|
340 |
scheduler = DDPMScheduler.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="scheduler")
|
341 |
scheduler.set_timesteps(slider_step)
|
342 |
-
noise = torch.randn((slider_batch, 4, 64, 64)).to("cuda")
|
343 |
input = noise
|
344 |
|
345 |
-
encoder_hidden_states_cond = text_encoder(prompts_cond)[0]
|
346 |
-
encoder_hidden_states_nocond = text_encoder(prompts_nocond)[0]
|
347 |
|
348 |
|
349 |
for t in tqdm(scheduler.timesteps):
|
|
|
59 |
|
60 |
m1_tokenizer = AutoTokenizer.from_pretrained(m1_model_path, use_fast=False)
|
61 |
m1_model = AutoModelForCausalLM.from_pretrained(
|
62 |
+
m1_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
|
63 |
).cuda()
|
64 |
|
65 |
#### import diffusion models
|
66 |
text_encoder = CLIPTextModel.from_pretrained(
|
67 |
'JingyeChen22/textdiffuser2-full-ft', subfolder="text_encoder", ignore_mismatched_sizes=True
|
68 |
+
).cuda().half()
|
69 |
tokenizer = CLIPTokenizer.from_pretrained(
|
70 |
'runwayml/stable-diffusion-v1-5', subfolder="tokenizer"
|
71 |
)
|
|
|
83 |
print(len(tokenizer))
|
84 |
print('***************')
|
85 |
|
86 |
+
vae = AutoencoderKL.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="vae").half().cuda()
|
87 |
unet = UNet2DConditionModel.from_pretrained(
|
88 |
'JingyeChen22/textdiffuser2-full-ft', subfolder="unet"
|
89 |
+
).half().cuda()
|
90 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
91 |
|
92 |
|
93 |
+
#### load lcm components
|
94 |
+
model_id = "lambdalabs/sd-pokemon-diffusers"
|
95 |
+
lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
|
96 |
+
pipe = DiffusionPipeline.from_pretrained(model_id, unet=unet, tokenizer=tokenizer, text_encoder=text_encoder, torch_dtype=torch.float16)
|
97 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
98 |
+
pipe.load_lora_weights(lcm_lora_id)
|
99 |
+
pipe.to(device="cuda")
|
100 |
|
101 |
|
102 |
#### for interactive
|
|
|
339 |
|
340 |
scheduler = DDPMScheduler.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="scheduler")
|
341 |
scheduler.set_timesteps(slider_step)
|
342 |
+
noise = torch.randn((slider_batch, 4, 64, 64)).to("cuda").half()
|
343 |
input = noise
|
344 |
|
345 |
+
encoder_hidden_states_cond = text_encoder(prompts_cond)[0].half()
|
346 |
+
encoder_hidden_states_nocond = text_encoder(prompts_nocond)[0].half()
|
347 |
|
348 |
|
349 |
for t in tqdm(scheduler.timesteps):
|