diffusers-benchmarking-bot
commited on
Upload folder using huggingface_hub
Browse files- main/README.md +5 -6
- main/README_community_scripts.md +84 -1
- main/matryoshka.py +16 -12
main/README.md
CHANGED
@@ -4336,19 +4336,19 @@ The Abstract of the paper:
|
|
4336 |
|
4337 |
**64x64**
|
4338 |
:-------------------------:
|
4339 |
-
| <img src="https://github.com/user-attachments/assets/
|
4340 |
|
4341 |
- `256×256, nesting_level=1`: 1.776 GiB. With `150` DDIM inference steps:
|
4342 |
|
4343 |
**64x64** | **256x256**
|
4344 |
:-------------------------:|:-------------------------:
|
4345 |
-
| <img src="https://github.com/user-attachments/assets/
|
4346 |
|
4347 |
-
- `1024×1024, nesting_level=2`: 1.792 GiB. As one can realize the cost of adding another layer is really negligible
|
4348 |
|
4349 |
**64x64** | **256x256** | **1024x1024**
|
4350 |
:-------------------------:|:-------------------------:|:-------------------------:
|
4351 |
-
| <img src="https://github.com/user-attachments/assets/
|
4352 |
|
4353 |
```py
|
4354 |
from diffusers import DiffusionPipeline
|
@@ -4362,8 +4362,7 @@ pipe = DiffusionPipeline.from_pretrained("tolgacangoz/matryoshka-diffusion-model
|
|
4362 |
|
4363 |
prompt0 = "a blue jay stops on the top of a helmet of Japanese samurai, background with sakura tree"
|
4364 |
prompt = f"breathtaking {prompt0}. award-winning, professional, highly detailed"
|
4365 |
-
|
4366 |
-
image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50).images
|
4367 |
make_image_grid(image, rows=1, cols=len(image))
|
4368 |
|
4369 |
# pipe.change_nesting_level(<int>) # 0, 1, or 2
|
|
|
4336 |
|
4337 |
**64x64**
|
4338 |
:-------------------------:
|
4339 |
+
| <img src="https://github.com/user-attachments/assets/032738eb-c6cd-4fd9-b4d7-a7317b4b6528" width="222" height="222" alt="bird_64_64"> |
|
4340 |
|
4341 |
- `256×256, nesting_level=1`: 1.776 GiB. With `150` DDIM inference steps:
|
4342 |
|
4343 |
**64x64** | **256x256**
|
4344 |
:-------------------------:|:-------------------------:
|
4345 |
+
| <img src="https://github.com/user-attachments/assets/21b9ad8b-eea6-4603-80a2-31180f391589" width="222" height="222" alt="bird_256_64"> | <img src="https://github.com/user-attachments/assets/fc411682-8a36-422c-9488-395b77d4406e" width="222" height="222" alt="bird_256_256"> |
|
4346 |
|
4347 |
+
- `1024×1024, nesting_level=2`: 1.792 GiB. As one can realize the cost of adding another layer is really negligible in this context! With `250` DDIM inference steps:
|
4348 |
|
4349 |
**64x64** | **256x256** | **1024x1024**
|
4350 |
:-------------------------:|:-------------------------:|:-------------------------:
|
4351 |
+
| <img src="https://github.com/user-attachments/assets/febf4b98-3dee-4a8e-9946-fd42e1f232e6" width="222" height="222" alt="bird_1024_64"> | <img src="https://github.com/user-attachments/assets/c5f85b40-5d6d-4267-a92a-c89dff015b9b" width="222" height="222" alt="bird_1024_256"> | <img src="https://github.com/user-attachments/assets/ad66b913-4367-4cb9-889e-bc06f4d96148" width="222" height="222" alt="bird_1024_1024"> |
|
4352 |
|
4353 |
```py
|
4354 |
from diffusers import DiffusionPipeline
|
|
|
4362 |
|
4363 |
prompt0 = "a blue jay stops on the top of a helmet of Japanese samurai, background with sakura tree"
|
4364 |
prompt = f"breathtaking {prompt0}. award-winning, professional, highly detailed"
|
4365 |
+
image = pipe(prompt, num_inference_steps=50).images
|
|
|
4366 |
make_image_grid(image, rows=1, cols=len(image))
|
4367 |
|
4368 |
# pipe.change_nesting_level(<int>) # 0, 1, or 2
|
main/README_community_scripts.md
CHANGED
@@ -8,6 +8,7 @@ If a community script doesn't work as expected, please open an issue and ping th
|
|
8 |
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
|
9 |
| Using IP-Adapter with negative noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) | | [Álvaro Somoza](https://github.com/asomoza)|
|
10 |
| asymmetric tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#asymmetric-tiling ) | | [alexisrolland](https://github.com/alexisrolland)|
|
|
|
11 |
|
12 |
|
13 |
## Example usages
|
@@ -229,4 +230,86 @@ seamless_tiling(pipeline=pipeline, x_axis=False, y_axis=False)
|
|
229 |
|
230 |
torch.cuda.empty_cache()
|
231 |
image.save('image.png')
|
232 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
|
9 |
| Using IP-Adapter with negative noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) | | [Álvaro Somoza](https://github.com/asomoza)|
|
10 |
| asymmetric tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#asymmetric-tiling ) | | [alexisrolland](https://github.com/alexisrolland)|
|
11 |
+
| Prompt scheduling callback |Allows changing prompts during a generation | [Prompt Scheduling](#prompt-scheduling ) | | [hlky](https://github.com/hlky)|
|
12 |
|
13 |
|
14 |
## Example usages
|
|
|
230 |
|
231 |
torch.cuda.empty_cache()
|
232 |
image.save('image.png')
|
233 |
+
```
|
234 |
+
|
235 |
+
### Prompt Scheduling callback
|
236 |
+
|
237 |
+
Prompt scheduling callback allows changing prompts during a generation, like [prompt editing in A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-editing)
|
238 |
+
|
239 |
+
```python
|
240 |
+
from diffusers import StableDiffusionPipeline
|
241 |
+
from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
|
242 |
+
from diffusers.configuration_utils import register_to_config
|
243 |
+
import torch
|
244 |
+
from typing import Any, Dict, Optional
|
245 |
+
|
246 |
+
|
247 |
+
pipeline: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
|
248 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
249 |
+
torch_dtype=torch.float16,
|
250 |
+
variant="fp16",
|
251 |
+
use_safetensors=True,
|
252 |
+
).to("cuda")
|
253 |
+
pipeline.safety_checker = None
|
254 |
+
pipeline.requires_safety_checker = False
|
255 |
+
|
256 |
+
|
257 |
+
class SDPromptScheduleCallback(PipelineCallback):
|
258 |
+
@register_to_config
|
259 |
+
def __init__(
|
260 |
+
self,
|
261 |
+
prompt: str,
|
262 |
+
negative_prompt: Optional[str] = None,
|
263 |
+
num_images_per_prompt: int = 1,
|
264 |
+
cutoff_step_ratio=1.0,
|
265 |
+
cutoff_step_index=None,
|
266 |
+
):
|
267 |
+
super().__init__(
|
268 |
+
cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
|
269 |
+
)
|
270 |
+
|
271 |
+
tensor_inputs = ["prompt_embeds"]
|
272 |
+
|
273 |
+
def callback_fn(
|
274 |
+
self, pipeline, step_index, timestep, callback_kwargs
|
275 |
+
) -> Dict[str, Any]:
|
276 |
+
cutoff_step_ratio = self.config.cutoff_step_ratio
|
277 |
+
cutoff_step_index = self.config.cutoff_step_index
|
278 |
+
|
279 |
+
# Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
|
280 |
+
cutoff_step = (
|
281 |
+
cutoff_step_index
|
282 |
+
if cutoff_step_index is not None
|
283 |
+
else int(pipeline.num_timesteps * cutoff_step_ratio)
|
284 |
+
)
|
285 |
+
|
286 |
+
if step_index == cutoff_step:
|
287 |
+
prompt_embeds, negative_prompt_embeds = pipeline.encode_prompt(
|
288 |
+
prompt=self.config.prompt,
|
289 |
+
negative_prompt=self.config.negative_prompt,
|
290 |
+
device=pipeline._execution_device,
|
291 |
+
num_images_per_prompt=self.config.num_images_per_prompt,
|
292 |
+
do_classifier_free_guidance=pipeline.do_classifier_free_guidance,
|
293 |
+
)
|
294 |
+
if pipeline.do_classifier_free_guidance:
|
295 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
296 |
+
callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
|
297 |
+
return callback_kwargs
|
298 |
+
|
299 |
+
callback = MultiPipelineCallbacks(
|
300 |
+
[
|
301 |
+
SDPromptScheduleCallback(
|
302 |
+
prompt="Official portrait of a smiling world war ii general, female, cheerful, happy, detailed face, 20th century, highly detailed, cinematic lighting, digital art painting by Greg Rutkowski",
|
303 |
+
negative_prompt="Deformed, ugly, bad anatomy",
|
304 |
+
cutoff_step_ratio=0.25,
|
305 |
+
)
|
306 |
+
]
|
307 |
+
)
|
308 |
+
|
309 |
+
image = pipeline(
|
310 |
+
prompt="Official portrait of a smiling world war ii general, male, cheerful, happy, detailed face, 20th century, highly detailed, cinematic lighting, digital art painting by Greg Rutkowski",
|
311 |
+
negative_prompt="Deformed, ugly, bad anatomy",
|
312 |
+
callback_on_step_end=callback,
|
313 |
+
callback_on_step_end_tensor_inputs=["prompt_embeds"],
|
314 |
+
).images[0]
|
315 |
+
```
|
main/matryoshka.py
CHANGED
@@ -107,15 +107,16 @@ EXAMPLE_DOC_STRING = """
|
|
107 |
|
108 |
>>> # nesting_level=0 -> 64x64; nesting_level=1 -> 256x256 - 64x64; nesting_level=2 -> 1024x1024 - 256x256 - 64x64
|
109 |
>>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/matryoshka-diffusion-models",
|
110 |
-
|
|
|
|
|
111 |
|
112 |
>>> prompt0 = "a blue jay stops on the top of a helmet of Japanese samurai, background with sakura tree"
|
113 |
>>> prompt = f"breathtaking {prompt0}. award-winning, professional, highly detailed"
|
114 |
-
>>>
|
115 |
-
>>> image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50).images
|
116 |
>>> make_image_grid(image, rows=1, cols=len(image))
|
117 |
|
118 |
-
>>> pipe.change_nesting_level(<int>) # 0, 1, or 2
|
119 |
>>> # 50+, 100+, and 250+ num_inference_steps are recommended for nesting levels 0, 1, and 2 respectively.
|
120 |
```
|
121 |
"""
|
@@ -420,6 +421,7 @@ class MatryoshkaDDIMScheduler(SchedulerMixin, ConfigMixin):
|
|
420 |
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
|
421 |
|
422 |
self.scales = None
|
|
|
423 |
|
424 |
def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
|
425 |
"""
|
@@ -532,6 +534,7 @@ class MatryoshkaDDIMScheduler(SchedulerMixin, ConfigMixin):
|
|
532 |
|
533 |
def get_schedule_shifted(self, alpha_prod, scale_factor=None):
|
534 |
if (scale_factor is not None) and (scale_factor > 1): # rescale noise schedule
|
|
|
535 |
snr = alpha_prod / (1 - alpha_prod)
|
536 |
scaled_snr = snr / scale_factor
|
537 |
alpha_prod = 1 / (1 + 1 / scaled_snr)
|
@@ -639,17 +642,14 @@ class MatryoshkaDDIMScheduler(SchedulerMixin, ConfigMixin):
|
|
639 |
# 4. Clip or threshold "predicted x_0"
|
640 |
if self.config.thresholding:
|
641 |
if len(model_output) > 1:
|
642 |
-
pred_original_sample = [
|
643 |
-
self._threshold_sample(p_o_s * scale) / scale
|
644 |
-
for p_o_s, scale in zip(pred_original_sample, self.scales)
|
645 |
-
]
|
646 |
else:
|
647 |
pred_original_sample = self._threshold_sample(pred_original_sample)
|
648 |
elif self.config.clip_sample:
|
649 |
if len(model_output) > 1:
|
650 |
pred_original_sample = [
|
651 |
-
|
652 |
-
for p_o_s
|
653 |
]
|
654 |
else:
|
655 |
pred_original_sample = pred_original_sample.clamp(
|
@@ -3816,6 +3816,8 @@ class MatryoshkaPipeline(
|
|
3816 |
|
3817 |
if hasattr(unet, "nest_ratio"):
|
3818 |
scheduler.scales = unet.nest_ratio + [1]
|
|
|
|
|
3819 |
|
3820 |
self.register_modules(
|
3821 |
text_encoder=text_encoder,
|
@@ -3842,12 +3844,14 @@ class MatryoshkaPipeline(
|
|
3842 |
).to(self.device)
|
3843 |
self.config.nesting_level = 1
|
3844 |
self.scheduler.scales = self.unet.nest_ratio + [1]
|
|
|
3845 |
elif nesting_level == 2:
|
3846 |
self.unet = NestedUNet2DConditionModel.from_pretrained(
|
3847 |
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_2"
|
3848 |
).to(self.device)
|
3849 |
self.config.nesting_level = 2
|
3850 |
self.scheduler.scales = self.unet.nest_ratio + [1]
|
|
|
3851 |
else:
|
3852 |
raise ValueError("Currently, nesting levels 0, 1, and 2 are supported.")
|
3853 |
|
@@ -4627,8 +4631,8 @@ class MatryoshkaPipeline(
|
|
4627 |
image = latents
|
4628 |
|
4629 |
if self.scheduler.scales is not None:
|
4630 |
-
for i,
|
4631 |
-
image[i] = self.image_processor.postprocess(img
|
4632 |
else:
|
4633 |
image = self.image_processor.postprocess(image, output_type=output_type)
|
4634 |
|
|
|
107 |
|
108 |
>>> # nesting_level=0 -> 64x64; nesting_level=1 -> 256x256 - 64x64; nesting_level=2 -> 1024x1024 - 256x256 - 64x64
|
109 |
>>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/matryoshka-diffusion-models",
|
110 |
+
... nesting_level=0,
|
111 |
+
... trust_remote_code=False, # One needs to give permission for this code to run
|
112 |
+
... ).to("cuda")
|
113 |
|
114 |
>>> prompt0 = "a blue jay stops on the top of a helmet of Japanese samurai, background with sakura tree"
|
115 |
>>> prompt = f"breathtaking {prompt0}. award-winning, professional, highly detailed"
|
116 |
+
>>> image = pipe(prompt, num_inference_steps=50).images
|
|
|
117 |
>>> make_image_grid(image, rows=1, cols=len(image))
|
118 |
|
119 |
+
>>> # pipe.change_nesting_level(<int>) # 0, 1, or 2
|
120 |
>>> # 50+, 100+, and 250+ num_inference_steps are recommended for nesting levels 0, 1, and 2 respectively.
|
121 |
```
|
122 |
"""
|
|
|
421 |
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
|
422 |
|
423 |
self.scales = None
|
424 |
+
self.schedule_shifted_power = 1.0
|
425 |
|
426 |
def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
|
427 |
"""
|
|
|
534 |
|
535 |
def get_schedule_shifted(self, alpha_prod, scale_factor=None):
|
536 |
if (scale_factor is not None) and (scale_factor > 1): # rescale noise schedule
|
537 |
+
scale_factor = scale_factor**self.schedule_shifted_power
|
538 |
snr = alpha_prod / (1 - alpha_prod)
|
539 |
scaled_snr = snr / scale_factor
|
540 |
alpha_prod = 1 / (1 + 1 / scaled_snr)
|
|
|
642 |
# 4. Clip or threshold "predicted x_0"
|
643 |
if self.config.thresholding:
|
644 |
if len(model_output) > 1:
|
645 |
+
pred_original_sample = [self._threshold_sample(p_o_s) for p_o_s in pred_original_sample]
|
|
|
|
|
|
|
646 |
else:
|
647 |
pred_original_sample = self._threshold_sample(pred_original_sample)
|
648 |
elif self.config.clip_sample:
|
649 |
if len(model_output) > 1:
|
650 |
pred_original_sample = [
|
651 |
+
p_o_s.clamp(-self.config.clip_sample_range, self.config.clip_sample_range)
|
652 |
+
for p_o_s in pred_original_sample
|
653 |
]
|
654 |
else:
|
655 |
pred_original_sample = pred_original_sample.clamp(
|
|
|
3816 |
|
3817 |
if hasattr(unet, "nest_ratio"):
|
3818 |
scheduler.scales = unet.nest_ratio + [1]
|
3819 |
+
if nesting_level == 2:
|
3820 |
+
scheduler.schedule_shifted_power = 2.0
|
3821 |
|
3822 |
self.register_modules(
|
3823 |
text_encoder=text_encoder,
|
|
|
3844 |
).to(self.device)
|
3845 |
self.config.nesting_level = 1
|
3846 |
self.scheduler.scales = self.unet.nest_ratio + [1]
|
3847 |
+
self.scheduler.schedule_shifted_power = 1.0
|
3848 |
elif nesting_level == 2:
|
3849 |
self.unet = NestedUNet2DConditionModel.from_pretrained(
|
3850 |
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_2"
|
3851 |
).to(self.device)
|
3852 |
self.config.nesting_level = 2
|
3853 |
self.scheduler.scales = self.unet.nest_ratio + [1]
|
3854 |
+
self.scheduler.schedule_shifted_power = 2.0
|
3855 |
else:
|
3856 |
raise ValueError("Currently, nesting levels 0, 1, and 2 are supported.")
|
3857 |
|
|
|
4631 |
image = latents
|
4632 |
|
4633 |
if self.scheduler.scales is not None:
|
4634 |
+
for i, img in enumerate(image):
|
4635 |
+
image[i] = self.image_processor.postprocess(img, output_type=output_type)[0]
|
4636 |
else:
|
4637 |
image = self.image_processor.postprocess(image, output_type=output_type)
|
4638 |
|