kadirnar commited on
Commit
b89b677
·
1 Parent(s): e1cc700

Upload 4 files

Browse files
deepcache_stable_diffusion.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import time
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ import numpy as np
20
+ from packaging import version
21
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
22
+
23
+ from diffusers.configuration_utils import FrozenDict
24
+ from diffusers.image_processor import VaeImageProcessor, PipelineImageInput
25
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
+
28
+ from diffusers.models import AutoencoderKL
29
+ from diffusers.models.attention_processor import FusedAttnProcessor2_0
30
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
31
+ from diffusers.schedulers import KarrasDiffusionSchedulers
32
+ from diffusers.utils import (
33
+ USE_PEFT_BACKEND,
34
+ deprecate,
35
+ logging,
36
+ replace_example_docstring,
37
+ scale_lora_layers,
38
+ unscale_lora_layers,
39
+ )
40
+ from diffusers.utils.torch_utils import randn_tensor
41
+
42
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
43
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
44
+
45
+ from .unet_2d_condition import UNet2DConditionModel, ImageProjection
46
+ from .pipeline_utils import DiffusionPipeline
47
+
48
+
49
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
50
+
51
+ EXAMPLE_DOC_STRING = """
52
+ Examples:
53
+ ```py
54
+ >>> import torch
55
+ >>> from diffusers import StableDiffusionPipeline
56
+
57
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
58
+ >>> pipe = pipe.to("cuda")
59
+
60
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
61
+ >>> image = pipe(prompt).images[0]
62
+ ```
63
+ """
64
+
65
+ def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100):
66
+ samples = []
67
+
68
+ while len(samples) < sample_size:
69
+ # Sample from a Gaussian centered at n/2
70
+ sample = int(np.random.normal(loc=n/2, scale=std_dev))
71
+
72
+ # Check if the sample is in bounds
73
+ if 1 <= sample < n and sample not in samples:
74
+ samples.append(sample)
75
+
76
+ return samples
77
+
78
+ def sample_from_quad(total_numbers, n_samples, pow=1.2):
79
+ while pow > 1:
80
+ # Generate linearly spaced values between 0 and a max value
81
+ x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1)
82
+
83
+ # Raise these values to the power of 1.5 to get a non-linear distribution
84
+ indices = np.unique(np.int32(x_values**pow))[:-1]
85
+ if len(indices) == n_samples:
86
+ break
87
+ pow -=0.02
88
+ if pow <= 1:
89
+ raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.")
90
+ return indices, pow
91
+
92
+ def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2):
93
+ while pow > 1:
94
+ # Generate linearly spaced values between 0 and a max value
95
+ x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1)
96
+ indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]]
97
+ if len(indices) == n_samples:
98
+ break
99
+ pow -=0.02
100
+ if pow <= 1:
101
+ raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.")
102
+ return indices, pow
103
+
104
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
105
+ """
106
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
107
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
108
+ """
109
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
110
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
111
+ # rescale the results from guidance (fixes overexposure)
112
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
113
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
114
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
115
+ return noise_cfg
116
+
117
+
118
+ def retrieve_timesteps(
119
+ scheduler,
120
+ num_inference_steps: Optional[int] = None,
121
+ device: Optional[Union[str, torch.device]] = None,
122
+ timesteps: Optional[List[int]] = None,
123
+ **kwargs,
124
+ ):
125
+ """
126
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
127
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
128
+
129
+ Args:
130
+ scheduler (`SchedulerMixin`):
131
+ The scheduler to get timesteps from.
132
+ num_inference_steps (`int`):
133
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
134
+ `timesteps` must be `None`.
135
+ device (`str` or `torch.device`, *optional*):
136
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
137
+ timesteps (`List[int]`, *optional*):
138
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
139
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
140
+ must be `None`.
141
+
142
+ Returns:
143
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
144
+ second element is the number of inference steps.
145
+ """
146
+ if timesteps is not None:
147
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
148
+ if not accepts_timesteps:
149
+ raise ValueError(
150
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
151
+ f" timestep schedules. Please check whether you are using the correct scheduler."
152
+ )
153
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
154
+ timesteps = scheduler.timesteps
155
+ num_inference_steps = len(timesteps)
156
+ else:
157
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
158
+ timesteps = scheduler.timesteps
159
+ return timesteps, num_inference_steps
160
+
161
+ class StableDiffusionPipeline(
162
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
163
+ ):
164
+ r"""
165
+ Pipeline for text-to-image generation using Stable Diffusion.
166
+
167
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
168
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
169
+
170
+ The pipeline also inherits the following loading methods:
171
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
172
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
173
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
174
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
175
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
176
+
177
+ Args:
178
+ vae ([`AutoencoderKL`]):
179
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
180
+ text_encoder ([`~transformers.CLIPTextModel`]):
181
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
182
+ tokenizer ([`~transformers.CLIPTokenizer`]):
183
+ A `CLIPTokenizer` to tokenize text.
184
+ unet ([`UNet2DConditionModel`]):
185
+ A `UNet2DConditionModel` to denoise the encoded image latents.
186
+ scheduler ([`SchedulerMixin`]):
187
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
188
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
189
+ safety_checker ([`StableDiffusionSafetyChecker`]):
190
+ Classification module that estimates whether generated images could be considered offensive or harmful.
191
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
192
+ about a model's potential harms.
193
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
194
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
195
+ """
196
+
197
+ model_cpu_offload_seq = "text_encoder->unet->vae"
198
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
199
+ _exclude_from_cpu_offload = ["safety_checker"]
200
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
201
+
202
+ def __init__(
203
+ self,
204
+ vae: AutoencoderKL,
205
+ text_encoder: CLIPTextModel,
206
+ tokenizer: CLIPTokenizer,
207
+ unet: UNet2DConditionModel,
208
+ scheduler: KarrasDiffusionSchedulers,
209
+ safety_checker: StableDiffusionSafetyChecker,
210
+ feature_extractor: CLIPImageProcessor,
211
+ image_encoder: CLIPVisionModelWithProjection = None,
212
+ requires_safety_checker: bool = True,
213
+ ):
214
+ super().__init__()
215
+
216
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
217
+ deprecation_message = (
218
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
219
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
220
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
221
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
222
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
223
+ " file"
224
+ )
225
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
226
+ new_config = dict(scheduler.config)
227
+ new_config["steps_offset"] = 1
228
+ scheduler._internal_dict = FrozenDict(new_config)
229
+
230
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
231
+ deprecation_message = (
232
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
233
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
234
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
235
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
236
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
237
+ )
238
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
239
+ new_config = dict(scheduler.config)
240
+ new_config["clip_sample"] = False
241
+ scheduler._internal_dict = FrozenDict(new_config)
242
+
243
+ if safety_checker is None and requires_safety_checker:
244
+ logger.warning(
245
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
246
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
247
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
248
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
249
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
250
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
251
+ )
252
+
253
+ if safety_checker is not None and feature_extractor is None:
254
+ raise ValueError(
255
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
256
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
257
+ )
258
+
259
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
260
+ version.parse(unet.config._diffusers_version).base_version
261
+ ) < version.parse("0.9.0.dev0")
262
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
263
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
264
+ deprecation_message = (
265
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
266
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
267
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
268
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
269
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
270
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
271
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
272
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
273
+ " the `unet/config.json` file"
274
+ )
275
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
276
+ new_config = dict(unet.config)
277
+ new_config["sample_size"] = 64
278
+ unet._internal_dict = FrozenDict(new_config)
279
+
280
+ self.register_modules(
281
+ vae=vae,
282
+ text_encoder=text_encoder,
283
+ tokenizer=tokenizer,
284
+ unet=unet,
285
+ scheduler=scheduler,
286
+ safety_checker=safety_checker,
287
+ feature_extractor=feature_extractor,
288
+ image_encoder=image_encoder,
289
+ )
290
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
291
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
292
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
293
+
294
+ def enable_vae_slicing(self):
295
+ r"""
296
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
297
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
298
+ """
299
+ self.vae.enable_slicing()
300
+
301
+ def disable_vae_slicing(self):
302
+ r"""
303
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
304
+ computing decoding in one step.
305
+ """
306
+ self.vae.disable_slicing()
307
+
308
+ def enable_vae_tiling(self):
309
+ r"""
310
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
311
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
312
+ processing larger images.
313
+ """
314
+ self.vae.enable_tiling()
315
+
316
+ def disable_vae_tiling(self):
317
+ r"""
318
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
319
+ computing decoding in one step.
320
+ """
321
+ self.vae.disable_tiling()
322
+
323
+ def _encode_prompt(
324
+ self,
325
+ prompt,
326
+ device,
327
+ num_images_per_prompt,
328
+ do_classifier_free_guidance,
329
+ negative_prompt=None,
330
+ prompt_embeds: Optional[torch.FloatTensor] = None,
331
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
332
+ lora_scale: Optional[float] = None,
333
+ **kwargs,
334
+ ):
335
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
336
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
337
+
338
+ prompt_embeds_tuple = self.encode_prompt(
339
+ prompt=prompt,
340
+ device=device,
341
+ num_images_per_prompt=num_images_per_prompt,
342
+ do_classifier_free_guidance=do_classifier_free_guidance,
343
+ negative_prompt=negative_prompt,
344
+ prompt_embeds=prompt_embeds,
345
+ negative_prompt_embeds=negative_prompt_embeds,
346
+ lora_scale=lora_scale,
347
+ **kwargs,
348
+ )
349
+
350
+ # concatenate for backwards comp
351
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
352
+
353
+ return prompt_embeds
354
+
355
+ def encode_prompt(
356
+ self,
357
+ prompt,
358
+ device,
359
+ num_images_per_prompt,
360
+ do_classifier_free_guidance,
361
+ negative_prompt=None,
362
+ prompt_embeds: Optional[torch.FloatTensor] = None,
363
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
364
+ lora_scale: Optional[float] = None,
365
+ clip_skip: Optional[int] = None,
366
+ ):
367
+ r"""
368
+ Encodes the prompt into text encoder hidden states.
369
+
370
+ Args:
371
+ prompt (`str` or `List[str]`, *optional*):
372
+ prompt to be encoded
373
+ device: (`torch.device`):
374
+ torch device
375
+ num_images_per_prompt (`int`):
376
+ number of images that should be generated per prompt
377
+ do_classifier_free_guidance (`bool`):
378
+ whether to use classifier free guidance or not
379
+ negative_prompt (`str` or `List[str]`, *optional*):
380
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
381
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
382
+ less than `1`).
383
+ prompt_embeds (`torch.FloatTensor`, *optional*):
384
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
385
+ provided, text embeddings will be generated from `prompt` input argument.
386
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
387
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
388
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
389
+ argument.
390
+ lora_scale (`float`, *optional*):
391
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
392
+ clip_skip (`int`, *optional*):
393
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
394
+ the output of the pre-final layer will be used for computing the prompt embeddings.
395
+ """
396
+ # set lora scale so that monkey patched LoRA
397
+ # function of text encoder can correctly access it
398
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
399
+ self._lora_scale = lora_scale
400
+
401
+ # dynamically adjust the LoRA scale
402
+ if not USE_PEFT_BACKEND:
403
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
404
+ else:
405
+ scale_lora_layers(self.text_encoder, lora_scale)
406
+
407
+ if prompt is not None and isinstance(prompt, str):
408
+ batch_size = 1
409
+ elif prompt is not None and isinstance(prompt, list):
410
+ batch_size = len(prompt)
411
+ else:
412
+ batch_size = prompt_embeds.shape[0]
413
+
414
+ if prompt_embeds is None:
415
+ # textual inversion: procecss multi-vector tokens if necessary
416
+ if isinstance(self, TextualInversionLoaderMixin):
417
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
418
+
419
+ text_inputs = self.tokenizer(
420
+ prompt,
421
+ padding="max_length",
422
+ max_length=self.tokenizer.model_max_length,
423
+ truncation=True,
424
+ return_tensors="pt",
425
+ )
426
+ text_input_ids = text_inputs.input_ids
427
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
428
+
429
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
430
+ text_input_ids, untruncated_ids
431
+ ):
432
+ removed_text = self.tokenizer.batch_decode(
433
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
434
+ )
435
+ logger.warning(
436
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
437
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
438
+ )
439
+
440
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
441
+ attention_mask = text_inputs.attention_mask.to(device)
442
+ else:
443
+ attention_mask = None
444
+
445
+ if clip_skip is None:
446
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
447
+ prompt_embeds = prompt_embeds[0]
448
+ else:
449
+ prompt_embeds = self.text_encoder(
450
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
451
+ )
452
+ # Access the `hidden_states` first, that contains a tuple of
453
+ # all the hidden states from the encoder layers. Then index into
454
+ # the tuple to access the hidden states from the desired layer.
455
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
456
+ # We also need to apply the final LayerNorm here to not mess with the
457
+ # representations. The `last_hidden_states` that we typically use for
458
+ # obtaining the final prompt representations passes through the LayerNorm
459
+ # layer.
460
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
461
+
462
+ if self.text_encoder is not None:
463
+ prompt_embeds_dtype = self.text_encoder.dtype
464
+ elif self.unet is not None:
465
+ prompt_embeds_dtype = self.unet.dtype
466
+ else:
467
+ prompt_embeds_dtype = prompt_embeds.dtype
468
+
469
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
470
+
471
+ bs_embed, seq_len, _ = prompt_embeds.shape
472
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
473
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
474
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
475
+
476
+ # get unconditional embeddings for classifier free guidance
477
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
478
+ uncond_tokens: List[str]
479
+ if negative_prompt is None:
480
+ uncond_tokens = [""] * batch_size
481
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
482
+ raise TypeError(
483
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
484
+ f" {type(prompt)}."
485
+ )
486
+ elif isinstance(negative_prompt, str):
487
+ uncond_tokens = [negative_prompt]
488
+ elif batch_size != len(negative_prompt):
489
+ raise ValueError(
490
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
491
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
492
+ " the batch size of `prompt`."
493
+ )
494
+ else:
495
+ uncond_tokens = negative_prompt
496
+
497
+ # textual inversion: procecss multi-vector tokens if necessary
498
+ if isinstance(self, TextualInversionLoaderMixin):
499
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
500
+
501
+ max_length = prompt_embeds.shape[1]
502
+ uncond_input = self.tokenizer(
503
+ uncond_tokens,
504
+ padding="max_length",
505
+ max_length=max_length,
506
+ truncation=True,
507
+ return_tensors="pt",
508
+ )
509
+
510
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
511
+ attention_mask = uncond_input.attention_mask.to(device)
512
+ else:
513
+ attention_mask = None
514
+
515
+ negative_prompt_embeds = self.text_encoder(
516
+ uncond_input.input_ids.to(device),
517
+ attention_mask=attention_mask,
518
+ )
519
+ negative_prompt_embeds = negative_prompt_embeds[0]
520
+
521
+ if do_classifier_free_guidance:
522
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
523
+ seq_len = negative_prompt_embeds.shape[1]
524
+
525
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
526
+
527
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
528
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
529
+
530
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
531
+ # Retrieve the original scale by scaling back the LoRA layers
532
+ unscale_lora_layers(self.text_encoder, lora_scale)
533
+
534
+ return prompt_embeds, negative_prompt_embeds
535
+
536
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
537
+ dtype = next(self.image_encoder.parameters()).dtype
538
+
539
+ if not isinstance(image, torch.Tensor):
540
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
541
+
542
+ image = image.to(device=device, dtype=dtype)
543
+ if output_hidden_states:
544
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
545
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
546
+ uncond_image_enc_hidden_states = self.image_encoder(
547
+ torch.zeros_like(image), output_hidden_states=True
548
+ ).hidden_states[-2]
549
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
550
+ num_images_per_prompt, dim=0
551
+ )
552
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
553
+ else:
554
+ image_embeds = self.image_encoder(image).image_embeds
555
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
556
+ uncond_image_embeds = torch.zeros_like(image_embeds)
557
+
558
+ return image_embeds, uncond_image_embeds
559
+
560
+ def run_safety_checker(self, image, device, dtype):
561
+ if self.safety_checker is None:
562
+ has_nsfw_concept = None
563
+ else:
564
+ if torch.is_tensor(image):
565
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
566
+ else:
567
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
568
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
569
+ image, has_nsfw_concept = self.safety_checker(
570
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
571
+ )
572
+ return image, has_nsfw_concept
573
+
574
+ def decode_latents(self, latents):
575
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
576
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
577
+
578
+ latents = 1 / self.vae.config.scaling_factor * latents
579
+ image = self.vae.decode(latents, return_dict=False)[0]
580
+ image = (image / 2 + 0.5).clamp(0, 1)
581
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
582
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
583
+ return image
584
+
585
+ def prepare_extra_step_kwargs(self, generator, eta):
586
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
587
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
588
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
589
+ # and should be between [0, 1]
590
+
591
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
592
+ extra_step_kwargs = {}
593
+ if accepts_eta:
594
+ extra_step_kwargs["eta"] = eta
595
+
596
+ # check if the scheduler accepts generator
597
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
598
+ if accepts_generator:
599
+ extra_step_kwargs["generator"] = generator
600
+ return extra_step_kwargs
601
+
602
+ def check_inputs(
603
+ self,
604
+ prompt,
605
+ height,
606
+ width,
607
+ callback_steps,
608
+ negative_prompt=None,
609
+ prompt_embeds=None,
610
+ negative_prompt_embeds=None,
611
+ callback_on_step_end_tensor_inputs=None,
612
+ ):
613
+ if height % 8 != 0 or width % 8 != 0:
614
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
615
+
616
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
617
+ raise ValueError(
618
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
619
+ f" {type(callback_steps)}."
620
+ )
621
+ if callback_on_step_end_tensor_inputs is not None and not all(
622
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
623
+ ):
624
+ raise ValueError(
625
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
626
+ )
627
+
628
+ if prompt is not None and prompt_embeds is not None:
629
+ raise ValueError(
630
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
631
+ " only forward one of the two."
632
+ )
633
+ elif prompt is None and prompt_embeds is None:
634
+ raise ValueError(
635
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
636
+ )
637
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
638
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
639
+
640
+ if negative_prompt is not None and negative_prompt_embeds is not None:
641
+ raise ValueError(
642
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
643
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
644
+ )
645
+
646
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
647
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
648
+ raise ValueError(
649
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
650
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
651
+ f" {negative_prompt_embeds.shape}."
652
+ )
653
+
654
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
655
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
656
+ if isinstance(generator, list) and len(generator) != batch_size:
657
+ raise ValueError(
658
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
659
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
660
+ )
661
+
662
+ if latents is None:
663
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
664
+ else:
665
+ latents = latents.to(device)
666
+
667
+ # scale the initial noise by the standard deviation required by the scheduler
668
+ latents = latents * self.scheduler.init_noise_sigma
669
+ return latents
670
+
671
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
672
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
673
+
674
+ The suffixes after the scaling factors represent the stages where they are being applied.
675
+
676
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
677
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
678
+
679
+ Args:
680
+ s1 (`float`):
681
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
682
+ mitigate "oversmoothing effect" in the enhanced denoising process.
683
+ s2 (`float`):
684
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
685
+ mitigate "oversmoothing effect" in the enhanced denoising process.
686
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
687
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
688
+ """
689
+ if not hasattr(self, "unet"):
690
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
691
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
692
+
693
+ def disable_freeu(self):
694
+ """Disables the FreeU mechanism if enabled."""
695
+ self.unet.disable_freeu()
696
+
697
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
698
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
699
+ """
700
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
701
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
702
+
703
+ <Tip warning={true}>
704
+
705
+ This API is 🧪 experimental.
706
+
707
+ </Tip>
708
+
709
+ Args:
710
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
711
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
712
+ """
713
+ self.fusing_unet = False
714
+ self.fusing_vae = False
715
+
716
+ if unet:
717
+ self.fusing_unet = True
718
+ self.unet.fuse_qkv_projections()
719
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
720
+
721
+ if vae:
722
+ if not isinstance(self.vae, AutoencoderKL):
723
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
724
+
725
+ self.fusing_vae = True
726
+ self.vae.fuse_qkv_projections()
727
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
728
+
729
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
730
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
731
+ """Disable QKV projection fusion if enabled.
732
+
733
+ <Tip warning={true}>
734
+
735
+ This API is 🧪 experimental.
736
+
737
+ </Tip>
738
+
739
+ Args:
740
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
741
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
742
+
743
+ """
744
+ if unet:
745
+ if not self.fusing_unet:
746
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
747
+ else:
748
+ self.unet.unfuse_qkv_projections()
749
+ self.fusing_unet = False
750
+
751
+ if vae:
752
+ if not self.fusing_vae:
753
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
754
+ else:
755
+ self.vae.unfuse_qkv_projections()
756
+ self.fusing_vae = False
757
+
758
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
759
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
760
+ """
761
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
762
+
763
+ Args:
764
+ timesteps (`torch.Tensor`):
765
+ generate embedding vectors at these timesteps
766
+ embedding_dim (`int`, *optional*, defaults to 512):
767
+ dimension of the embeddings to generate
768
+ dtype:
769
+ data type of the generated embeddings
770
+
771
+ Returns:
772
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
773
+ """
774
+ assert len(w.shape) == 1
775
+ w = w * 1000.0
776
+
777
+ half_dim = embedding_dim // 2
778
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
779
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
780
+ emb = w.to(dtype)[:, None] * emb[None, :]
781
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
782
+ if embedding_dim % 2 == 1: # zero pad
783
+ emb = torch.nn.functional.pad(emb, (0, 1))
784
+ assert emb.shape == (w.shape[0], embedding_dim)
785
+ return emb
786
+
787
+ @property
788
+ def guidance_scale(self):
789
+ return self._guidance_scale
790
+
791
+ @property
792
+ def guidance_rescale(self):
793
+ return self._guidance_rescale
794
+
795
+ @property
796
+ def clip_skip(self):
797
+ return self._clip_skip
798
+
799
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
800
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
801
+ # corresponds to doing no classifier free guidance.
802
+ @property
803
+ def do_classifier_free_guidance(self):
804
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
805
+
806
+ @property
807
+ def cross_attention_kwargs(self):
808
+ return self._cross_attention_kwargs
809
+
810
+ @property
811
+ def num_timesteps(self):
812
+ return self._num_timesteps
813
+
814
+ @torch.no_grad()
815
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
816
+ def __call__(
817
+ self,
818
+ prompt: Union[str, List[str]] = None,
819
+ height: Optional[int] = None,
820
+ width: Optional[int] = None,
821
+ num_inference_steps: int = 50,
822
+ timesteps: List[int] = None,
823
+ guidance_scale: float = 7.5,
824
+ negative_prompt: Optional[Union[str, List[str]]] = None,
825
+ num_images_per_prompt: Optional[int] = 1,
826
+ eta: float = 0.0,
827
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
828
+ latents: Optional[torch.FloatTensor] = None,
829
+ prompt_embeds: Optional[torch.FloatTensor] = None,
830
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
831
+ ip_adapter_image: Optional[PipelineImageInput] = None,
832
+ output_type: Optional[str] = "pil",
833
+ return_dict: bool = True,
834
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
835
+ guidance_rescale: float = 0.0,
836
+ clip_skip: Optional[int] = None,
837
+ cache_interval: int = 1,
838
+ cache_layer_id: int = None,
839
+ cache_block_id: int = None,
840
+ uniform: bool = True,
841
+ pow: float = None,
842
+ center: int = None,
843
+ output_all_sequence: bool = False,
844
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
845
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
846
+ **kwargs,
847
+ ):
848
+ r"""
849
+ The call function to the pipeline for generation.
850
+
851
+ Args:
852
+ prompt (`str` or `List[str]`, *optional*):
853
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
854
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
855
+ The height in pixels of the generated image.
856
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
857
+ The width in pixels of the generated image.
858
+ num_inference_steps (`int`, *optional*, defaults to 50):
859
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
860
+ expense of slower inference.
861
+ timesteps (`List[int]`, *optional*):
862
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
863
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
864
+ passed will be used. Must be in descending order.
865
+ guidance_scale (`float`, *optional*, defaults to 7.5):
866
+ A higher guidance scale value encourages the model to generate images closely linked to the text
867
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
868
+ negative_prompt (`str` or `List[str]`, *optional*):
869
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
870
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
871
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
872
+ The number of images to generate per prompt.
873
+ eta (`float`, *optional*, defaults to 0.0):
874
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
875
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
876
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
877
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
878
+ generation deterministic.
879
+ latents (`torch.FloatTensor`, *optional*):
880
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
881
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
882
+ tensor is generated by sampling using the supplied random `generator`.
883
+ prompt_embeds (`torch.FloatTensor`, *optional*):
884
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
885
+ provided, text embeddings are generated from the `prompt` input argument.
886
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
887
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
888
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
889
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
890
+ output_type (`str`, *optional*, defaults to `"pil"`):
891
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
892
+ return_dict (`bool`, *optional*, defaults to `True`):
893
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
894
+ plain tuple.
895
+ cross_attention_kwargs (`dict`, *optional*):
896
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
897
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
898
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
899
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
900
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
901
+ using zero terminal SNR.
902
+ clip_skip (`int`, *optional*):
903
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
904
+ the output of the pre-final layer will be used for computing the prompt embeddings.
905
+ callback_on_step_end (`Callable`, *optional*):
906
+ A function that calls at the end of each denoising steps during the inference. The function is called
907
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
908
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
909
+ `callback_on_step_end_tensor_inputs`.
910
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
911
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
912
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
913
+ `._callback_tensor_inputs` attribute of your pipeline class.
914
+
915
+ Examples:
916
+
917
+ Returns:
918
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
919
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
920
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
921
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
922
+ "not-safe-for-work" (nsfw) content.
923
+ """
924
+
925
+ callback = kwargs.pop("callback", None)
926
+ callback_steps = kwargs.pop("callback_steps", None)
927
+
928
+ if callback is not None:
929
+ deprecate(
930
+ "callback",
931
+ "1.0.0",
932
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
933
+ )
934
+ if callback_steps is not None:
935
+ deprecate(
936
+ "callback_steps",
937
+ "1.0.0",
938
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
939
+ )
940
+
941
+ # 0. Default height and width to unet
942
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
943
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
944
+ # to deal with lora scaling and other possible forward hooks
945
+
946
+ # 1. Check inputs. Raise error if not correct
947
+ self.check_inputs(
948
+ prompt,
949
+ height,
950
+ width,
951
+ callback_steps,
952
+ negative_prompt,
953
+ prompt_embeds,
954
+ negative_prompt_embeds,
955
+ callback_on_step_end_tensor_inputs,
956
+ )
957
+
958
+ self._guidance_scale = guidance_scale
959
+ self._guidance_rescale = guidance_rescale
960
+ self._clip_skip = clip_skip
961
+ self._cross_attention_kwargs = cross_attention_kwargs
962
+
963
+ # 2. Define call parameters
964
+ if prompt is not None and isinstance(prompt, str):
965
+ batch_size = 1
966
+ elif prompt is not None and isinstance(prompt, list):
967
+ batch_size = len(prompt)
968
+ else:
969
+ batch_size = prompt_embeds.shape[0]
970
+
971
+ device = self._execution_device
972
+
973
+ # 3. Encode input prompt
974
+ lora_scale = (
975
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
976
+ )
977
+
978
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
979
+ prompt,
980
+ device,
981
+ num_images_per_prompt,
982
+ self.do_classifier_free_guidance,
983
+ negative_prompt,
984
+ prompt_embeds=prompt_embeds,
985
+ negative_prompt_embeds=negative_prompt_embeds,
986
+ lora_scale=lora_scale,
987
+ clip_skip=self.clip_skip,
988
+ )
989
+
990
+ # For classifier free guidance, we need to do two forward passes.
991
+ # Here we concatenate the unconditional and text embeddings into a single batch
992
+ # to avoid doing two forward passes
993
+ if self.do_classifier_free_guidance:
994
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
995
+
996
+ if ip_adapter_image is not None:
997
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
998
+ image_embeds, negative_image_embeds = self.encode_image(
999
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1000
+ )
1001
+ if self.do_classifier_free_guidance:
1002
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
1003
+
1004
+ # 4. Prepare timesteps
1005
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1006
+
1007
+ # 5. Prepare latent variables
1008
+ num_channels_latents = self.unet.config.in_channels
1009
+ latents = self.prepare_latents(
1010
+ batch_size * num_images_per_prompt,
1011
+ num_channels_latents,
1012
+ height,
1013
+ width,
1014
+ prompt_embeds.dtype,
1015
+ device,
1016
+ generator,
1017
+ latents,
1018
+ )
1019
+
1020
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1021
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1022
+
1023
+ # 6.1 Add image embeds for IP-Adapter
1024
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1025
+
1026
+ # 6.2 Optionally get Guidance Scale Embedding
1027
+ timestep_cond = None
1028
+ if self.unet.config.time_cond_proj_dim is not None:
1029
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1030
+ timestep_cond = self.get_guidance_scale_embedding(
1031
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1032
+ ).to(device=device, dtype=latents.dtype)
1033
+
1034
+ # 7. Denoising loop
1035
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1036
+ self._num_timesteps = len(timesteps)
1037
+
1038
+ prv_features = None
1039
+ latents_list = [latents]
1040
+
1041
+ if cache_interval == 1:
1042
+ interval_seq = list(range(num_inference_steps))
1043
+ else:
1044
+ if uniform:
1045
+ interval_seq = list(range(0, num_inference_steps, cache_interval))
1046
+ else:
1047
+ num_slow_step = num_inference_steps//cache_interval
1048
+ if num_inference_steps%cache_interval != 0:
1049
+ num_slow_step += 1
1050
+
1051
+ interval_seq, pow = sample_from_quad_center(num_inference_steps, num_slow_step, center=center, pow=pow)#[0, 3, 6, 9, 12, 16, 22, 28, 35, 43,]
1052
+
1053
+ interval_seq = sorted(interval_seq)
1054
+
1055
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1056
+ for i, t in enumerate(timesteps):
1057
+ # expand the latents if we are doing classifier free guidance
1058
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1059
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1060
+
1061
+ if i in interval_seq:
1062
+ prv_features = None
1063
+
1064
+ # predict the noise residual
1065
+ noise_pred, prv_features = self.unet(
1066
+ latent_model_input,
1067
+ t,
1068
+ encoder_hidden_states=prompt_embeds,
1069
+ timestep_cond=timestep_cond,
1070
+ cross_attention_kwargs=self.cross_attention_kwargs,
1071
+ added_cond_kwargs=added_cond_kwargs,
1072
+ replicate_prv_feature=prv_features,
1073
+ quick_replicate= cache_interval>1,
1074
+ cache_layer_id=cache_layer_id,
1075
+ cache_block_id=cache_block_id,
1076
+
1077
+ return_dict=False,
1078
+ )
1079
+
1080
+ # perform guidance
1081
+ if self.do_classifier_free_guidance:
1082
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1083
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1084
+
1085
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1086
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1087
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1088
+
1089
+ # compute the previous noisy sample x_t -> x_t-1
1090
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1091
+ latents_list.append(latents)
1092
+
1093
+ if callback_on_step_end is not None:
1094
+ callback_kwargs = {}
1095
+ for k in callback_on_step_end_tensor_inputs:
1096
+ callback_kwargs[k] = locals()[k]
1097
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1098
+
1099
+ latents = callback_outputs.pop("latents", latents)
1100
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1101
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1102
+
1103
+ # call the callback, if provided
1104
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1105
+ progress_bar.update()
1106
+ if callback is not None and i % callback_steps == 0:
1107
+ step_idx = i // getattr(self.scheduler, "order", 1)
1108
+ callback(step_idx, t, latents)
1109
+
1110
+ if not output_type == "latent":
1111
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1112
+ 0
1113
+ ]
1114
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1115
+ else:
1116
+ image = latents
1117
+ has_nsfw_concept = None
1118
+
1119
+ if has_nsfw_concept is None:
1120
+ do_denormalize = [True] * image.shape[0]
1121
+ else:
1122
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1123
+
1124
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1125
+
1126
+ # Offload all models
1127
+ self.maybe_free_model_hooks()
1128
+
1129
+ if not return_dict:
1130
+ return (image, has_nsfw_concept)
1131
+
1132
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1133
+
pipeline_utils.py ADDED
@@ -0,0 +1,1837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import fnmatch
18
+ import importlib
19
+ import inspect
20
+ import os
21
+ import re
22
+ import sys
23
+ import warnings
24
+ from dataclasses import dataclass
25
+ from pathlib import Path
26
+ from typing import Any, Callable, Dict, List, Optional, Union
27
+
28
+ import numpy as np
29
+ import PIL
30
+ import torch
31
+ from huggingface_hub import ModelCard, create_repo, hf_hub_download, model_info, snapshot_download
32
+ from packaging import version
33
+ from requests.exceptions import HTTPError
34
+ from tqdm.auto import tqdm
35
+
36
+ import diffusers
37
+
38
+ from diffusers import __version__
39
+ from diffusers.configuration_utils import ConfigMixin
40
+ from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT
41
+ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
42
+ from diffusers.utils import (
43
+ CONFIG_NAME,
44
+ DEPRECATED_REVISION_ARGS,
45
+ DIFFUSERS_CACHE,
46
+ HF_HUB_OFFLINE,
47
+ SAFETENSORS_WEIGHTS_NAME,
48
+ WEIGHTS_NAME,
49
+ BaseOutput,
50
+ deprecate,
51
+ get_class_from_dynamic_module,
52
+ is_accelerate_available,
53
+ is_accelerate_version,
54
+ is_torch_version,
55
+ is_transformers_available,
56
+ logging,
57
+ numpy_to_pil,
58
+ )
59
+ from diffusers.utils.torch_utils import is_compiled_module
60
+
61
+
62
+ if is_transformers_available():
63
+ import transformers
64
+ from transformers import PreTrainedModel
65
+ from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
66
+ from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME
67
+ from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME
68
+
69
+ from diffusers.utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, PushToHubMixin
70
+
71
+
72
+ if is_accelerate_available():
73
+ import accelerate
74
+
75
+
76
+ INDEX_FILE = "diffusion_pytorch_model.bin"
77
+ CUSTOM_PIPELINE_FILE_NAME = "pipeline.py"
78
+ DUMMY_MODULES_FOLDER = "diffusers.utils"
79
+ TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils"
80
+ CONNECTED_PIPES_KEYS = ["prior"]
81
+
82
+
83
+ logger = logging.get_logger(__name__)
84
+
85
+
86
+ LOADABLE_CLASSES = {
87
+ "diffusers": {
88
+ "ModelMixin": ["save_pretrained", "from_pretrained"],
89
+ "SchedulerMixin": ["save_pretrained", "from_pretrained"],
90
+ "DiffusionPipeline": ["save_pretrained", "from_pretrained"],
91
+ "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"],
92
+ },
93
+ "transformers": {
94
+ "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"],
95
+ "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"],
96
+ "PreTrainedModel": ["save_pretrained", "from_pretrained"],
97
+ "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"],
98
+ "ProcessorMixin": ["save_pretrained", "from_pretrained"],
99
+ "ImageProcessingMixin": ["save_pretrained", "from_pretrained"],
100
+ },
101
+ "onnxruntime.training": {
102
+ "ORTModule": ["save_pretrained", "from_pretrained"],
103
+ },
104
+ }
105
+
106
+ ALL_IMPORTABLE_CLASSES = {}
107
+ for library in LOADABLE_CLASSES:
108
+ ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library])
109
+
110
+
111
+ @dataclass
112
+ class ImagePipelineOutput(BaseOutput):
113
+ """
114
+ Output class for image pipelines.
115
+
116
+ Args:
117
+ images (`List[PIL.Image.Image]` or `np.ndarray`)
118
+ List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
119
+ num_channels)`.
120
+ """
121
+
122
+ images: Union[List[PIL.Image.Image], np.ndarray]
123
+
124
+
125
+ @dataclass
126
+ class AudioPipelineOutput(BaseOutput):
127
+ """
128
+ Output class for audio pipelines.
129
+
130
+ Args:
131
+ audios (`np.ndarray`)
132
+ List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`.
133
+ """
134
+
135
+ audios: np.ndarray
136
+
137
+
138
+ def is_safetensors_compatible(filenames, variant=None, passed_components=None) -> bool:
139
+ """
140
+ Checking for safetensors compatibility:
141
+ - By default, all models are saved with the default pytorch serialization, so we use the list of default pytorch
142
+ files to know which safetensors files are needed.
143
+ - The model is safetensors compatible only if there is a matching safetensors file for every default pytorch file.
144
+
145
+ Converting default pytorch serialized filenames to safetensors serialized filenames:
146
+ - For models from the diffusers library, just replace the ".bin" extension with ".safetensors"
147
+ - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin"
148
+ extension is replaced with ".safetensors"
149
+ """
150
+ pt_filenames = []
151
+
152
+ sf_filenames = set()
153
+
154
+ passed_components = passed_components or []
155
+
156
+ for filename in filenames:
157
+ _, extension = os.path.splitext(filename)
158
+
159
+ if len(filename.split("/")) == 2 and filename.split("/")[0] in passed_components:
160
+ continue
161
+
162
+ if extension == ".bin":
163
+ pt_filenames.append(filename)
164
+ elif extension == ".safetensors":
165
+ sf_filenames.add(filename)
166
+
167
+ for filename in pt_filenames:
168
+ # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam'
169
+ path, filename = os.path.split(filename)
170
+ filename, extension = os.path.splitext(filename)
171
+
172
+ if filename.startswith("pytorch_model"):
173
+ filename = filename.replace("pytorch_model", "model")
174
+ else:
175
+ filename = filename
176
+
177
+ expected_sf_filename = os.path.join(path, filename)
178
+ expected_sf_filename = f"{expected_sf_filename}.safetensors"
179
+
180
+ if expected_sf_filename not in sf_filenames:
181
+ logger.warning(f"{expected_sf_filename} not found")
182
+ return False
183
+
184
+ return True
185
+
186
+
187
+ def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]:
188
+ weight_names = [
189
+ WEIGHTS_NAME,
190
+ SAFETENSORS_WEIGHTS_NAME,
191
+ FLAX_WEIGHTS_NAME,
192
+ ONNX_WEIGHTS_NAME,
193
+ ONNX_EXTERNAL_WEIGHTS_NAME,
194
+ ]
195
+
196
+ if is_transformers_available():
197
+ weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME]
198
+
199
+ # model_pytorch, diffusion_model_pytorch, ...
200
+ weight_prefixes = [w.split(".")[0] for w in weight_names]
201
+ # .bin, .safetensors, ...
202
+ weight_suffixs = [w.split(".")[-1] for w in weight_names]
203
+ # -00001-of-00002
204
+ transformers_index_format = r"\d{5}-of-\d{5}"
205
+
206
+ if variant is not None:
207
+ # `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors`
208
+ variant_file_re = re.compile(
209
+ rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$"
210
+ )
211
+ # `text_encoder/pytorch_model.bin.index.fp16.json`
212
+ variant_index_re = re.compile(
213
+ rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$"
214
+ )
215
+
216
+ # `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors`
217
+ non_variant_file_re = re.compile(
218
+ rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$"
219
+ )
220
+ # `text_encoder/pytorch_model.bin.index.json`
221
+ non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json")
222
+
223
+ if variant is not None:
224
+ variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None}
225
+ variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None}
226
+ variant_filenames = variant_weights | variant_indexes
227
+ else:
228
+ variant_filenames = set()
229
+
230
+ non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None}
231
+ non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None}
232
+ non_variant_filenames = non_variant_weights | non_variant_indexes
233
+
234
+ # all variant filenames will be used by default
235
+ usable_filenames = set(variant_filenames)
236
+
237
+ def convert_to_variant(filename):
238
+ if "index" in filename:
239
+ variant_filename = filename.replace("index", f"index.{variant}")
240
+ elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None:
241
+ variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}"
242
+ else:
243
+ variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}"
244
+ return variant_filename
245
+
246
+ for f in non_variant_filenames:
247
+ variant_filename = convert_to_variant(f)
248
+ if variant_filename not in usable_filenames:
249
+ usable_filenames.add(f)
250
+
251
+ return usable_filenames, variant_filenames
252
+
253
+
254
+ def warn_deprecated_model_variant(pretrained_model_name_or_path, use_auth_token, variant, revision, model_filenames):
255
+ info = model_info(
256
+ pretrained_model_name_or_path,
257
+ use_auth_token=use_auth_token,
258
+ revision=None,
259
+ )
260
+ filenames = {sibling.rfilename for sibling in info.siblings}
261
+ comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision)
262
+ comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames]
263
+
264
+ if set(comp_model_filenames) == set(model_filenames):
265
+ warnings.warn(
266
+ f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.",
267
+ FutureWarning,
268
+ )
269
+ else:
270
+ warnings.warn(
271
+ f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.",
272
+ FutureWarning,
273
+ )
274
+
275
+
276
+ def maybe_raise_or_warn(
277
+ library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module
278
+ ):
279
+ """Simple helper method to raise or warn in case incorrect module has been passed"""
280
+ if not is_pipeline_module:
281
+ library = importlib.import_module(library_name)
282
+ class_obj = getattr(library, class_name)
283
+ class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
284
+
285
+ expected_class_obj = None
286
+ for class_name, class_candidate in class_candidates.items():
287
+ if class_candidate is not None and issubclass(class_obj, class_candidate):
288
+ expected_class_obj = class_candidate
289
+
290
+ # Dynamo wraps the original model in a private class.
291
+ # I didn't find a public API to get the original class.
292
+ sub_model = passed_class_obj[name]
293
+ model_cls = sub_model.__class__
294
+ if is_compiled_module(sub_model):
295
+ model_cls = sub_model._orig_mod.__class__
296
+
297
+ if not issubclass(model_cls, expected_class_obj):
298
+ raise ValueError(
299
+ f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}"
300
+ )
301
+ else:
302
+ logger.warning(
303
+ f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it"
304
+ " has the correct type"
305
+ )
306
+
307
+
308
+ def get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module):
309
+ """Simple helper method to retrieve class object of module as well as potential parent class objects"""
310
+ if is_pipeline_module:
311
+ pipeline_module = getattr(pipelines, library_name)
312
+
313
+ class_obj = getattr(pipeline_module, class_name)
314
+ class_candidates = {c: class_obj for c in importable_classes.keys()}
315
+ else:
316
+ # else we just import it from the library.
317
+ if class_name == 'UNet2DConditionModel':
318
+ library_name = "DeepCache.sd.unet_2d_condition"
319
+
320
+
321
+ library = importlib.import_module(library_name)
322
+ class_obj = getattr(library, class_name)
323
+ class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
324
+
325
+ return class_obj, class_candidates
326
+
327
+
328
+ def _get_pipeline_class(
329
+ class_obj, config, load_connected_pipeline=False, custom_pipeline=None, cache_dir=None, revision=None
330
+ ):
331
+ if custom_pipeline is not None:
332
+ if custom_pipeline.endswith(".py"):
333
+ path = Path(custom_pipeline)
334
+ # decompose into folder & file
335
+ file_name = path.name
336
+ custom_pipeline = path.parent.absolute()
337
+ else:
338
+ file_name = CUSTOM_PIPELINE_FILE_NAME
339
+
340
+ return get_class_from_dynamic_module(
341
+ custom_pipeline, module_file=file_name, cache_dir=cache_dir, revision=revision
342
+ )
343
+
344
+ if class_obj != DiffusionPipeline:
345
+ return class_obj
346
+
347
+ diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0])
348
+ class_name = config["_class_name"]
349
+
350
+ if class_name.startswith("Flax"):
351
+ class_name = class_name[4:]
352
+
353
+ pipeline_cls = getattr(diffusers_module, class_name)
354
+
355
+ if load_connected_pipeline:
356
+ from .auto_pipeline import _get_connected_pipeline
357
+
358
+ connected_pipeline_cls = _get_connected_pipeline(pipeline_cls)
359
+ if connected_pipeline_cls is not None:
360
+ logger.info(
361
+ f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`"
362
+ )
363
+ else:
364
+ logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.")
365
+
366
+ pipeline_cls = connected_pipeline_cls or pipeline_cls
367
+
368
+ return pipeline_cls
369
+
370
+
371
+ def load_sub_model(
372
+ library_name: str,
373
+ class_name: str,
374
+ importable_classes: List[Any],
375
+ pipelines: Any,
376
+ is_pipeline_module: bool,
377
+ pipeline_class: Any,
378
+ torch_dtype: torch.dtype,
379
+ provider: Any,
380
+ sess_options: Any,
381
+ device_map: Optional[Union[Dict[str, torch.device], str]],
382
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]],
383
+ offload_folder: Optional[Union[str, os.PathLike]],
384
+ offload_state_dict: bool,
385
+ model_variants: Dict[str, str],
386
+ name: str,
387
+ from_flax: bool,
388
+ variant: str,
389
+ low_cpu_mem_usage: bool,
390
+ cached_folder: Union[str, os.PathLike],
391
+ ):
392
+ """Helper method to load the module `name` from `library_name` and `class_name`"""
393
+ # retrieve class candidates
394
+ class_obj, class_candidates = get_class_obj_and_candidates(
395
+ library_name, class_name, importable_classes, pipelines, is_pipeline_module
396
+ )
397
+
398
+ load_method_name = None
399
+ # retrive load method name
400
+ for class_name, class_candidate in class_candidates.items():
401
+ if class_candidate is not None and issubclass(class_obj, class_candidate):
402
+ load_method_name = importable_classes[class_name][1]
403
+
404
+ # if load method name is None, then we have a dummy module -> raise Error
405
+ if load_method_name is None:
406
+ none_module = class_obj.__module__
407
+ is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith(
408
+ TRANSFORMERS_DUMMY_MODULES_FOLDER
409
+ )
410
+ if is_dummy_path and "dummy" in none_module:
411
+ # call class_obj for nice error message of missing requirements
412
+ class_obj()
413
+
414
+ raise ValueError(
415
+ f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have"
416
+ f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}."
417
+ )
418
+
419
+ load_method = getattr(class_obj, load_method_name)
420
+
421
+ # add kwargs to loading method
422
+ loading_kwargs = {}
423
+ if issubclass(class_obj, torch.nn.Module):
424
+ loading_kwargs["torch_dtype"] = torch_dtype
425
+ if issubclass(class_obj, diffusers.OnnxRuntimeModel):
426
+ loading_kwargs["provider"] = provider
427
+ loading_kwargs["sess_options"] = sess_options
428
+
429
+ is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin)
430
+
431
+ if is_transformers_available():
432
+ transformers_version = version.parse(version.parse(transformers.__version__).base_version)
433
+ else:
434
+ transformers_version = "N/A"
435
+
436
+ is_transformers_model = (
437
+ is_transformers_available()
438
+ and issubclass(class_obj, PreTrainedModel)
439
+ and transformers_version >= version.parse("4.20.0")
440
+ )
441
+
442
+ # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers.
443
+ # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default.
444
+ # This makes sure that the weights won't be initialized which significantly speeds up loading.
445
+ if is_diffusers_model or is_transformers_model:
446
+ loading_kwargs["device_map"] = device_map
447
+ loading_kwargs["max_memory"] = max_memory
448
+ loading_kwargs["offload_folder"] = offload_folder
449
+ loading_kwargs["offload_state_dict"] = offload_state_dict
450
+ loading_kwargs["variant"] = model_variants.pop(name, None)
451
+ if from_flax:
452
+ loading_kwargs["from_flax"] = True
453
+
454
+ # the following can be deleted once the minimum required `transformers` version
455
+ # is higher than 4.27
456
+ if (
457
+ is_transformers_model
458
+ and loading_kwargs["variant"] is not None
459
+ and transformers_version < version.parse("4.27.0")
460
+ ):
461
+ raise ImportError(
462
+ f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0"
463
+ )
464
+ elif is_transformers_model and loading_kwargs["variant"] is None:
465
+ loading_kwargs.pop("variant")
466
+
467
+ # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage`
468
+ if not (from_flax and is_transformers_model):
469
+ loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
470
+ else:
471
+ loading_kwargs["low_cpu_mem_usage"] = False
472
+
473
+ # check if the module is in a subdirectory
474
+ if os.path.isdir(os.path.join(cached_folder, name)):
475
+ loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
476
+ else:
477
+ # else load from the root directory
478
+ loaded_sub_model = load_method(cached_folder, **loading_kwargs)
479
+
480
+ return loaded_sub_model
481
+
482
+
483
+ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
484
+ r"""
485
+ Base class for all pipelines.
486
+
487
+ [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and
488
+ provides methods for loading, downloading and saving models. It also includes methods to:
489
+
490
+ - move all PyTorch modules to the device of your choice
491
+ - enable/disable the progress bar for the denoising iteration
492
+
493
+ Class attributes:
494
+
495
+ - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the
496
+ diffusion pipeline's components.
497
+ - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the
498
+ pipeline to function (should be overridden by subclasses).
499
+ """
500
+ config_name = "model_index.json"
501
+ model_cpu_offload_seq = None
502
+ _optional_components = []
503
+ _exclude_from_cpu_offload = []
504
+ _load_connected_pipes = False
505
+ _is_onnx = False
506
+
507
+ def register_modules(self, **kwargs):
508
+ # import it here to avoid circular import
509
+ from diffusers import pipelines
510
+
511
+ for name, module in kwargs.items():
512
+ # retrieve library
513
+ if module is None:
514
+ register_dict = {name: (None, None)}
515
+ else:
516
+ # register the config from the original module, not the dynamo compiled one
517
+ if is_compiled_module(module):
518
+ not_compiled_module = module._orig_mod
519
+ else:
520
+ not_compiled_module = module
521
+
522
+ library = not_compiled_module.__module__.split(".")[0]
523
+
524
+ # check if the module is a pipeline module
525
+ module_path_items = not_compiled_module.__module__.split(".")
526
+ pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None
527
+
528
+ path = not_compiled_module.__module__.split(".")
529
+ is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)
530
+
531
+ # if library is not in LOADABLE_CLASSES, then it is a custom module.
532
+ # Or if it's a pipeline module, then the module is inside the pipeline
533
+ # folder so we set the library to module name.
534
+ if is_pipeline_module:
535
+ library = pipeline_dir
536
+ elif library not in LOADABLE_CLASSES:
537
+ library = not_compiled_module.__module__
538
+
539
+ # retrieve class_name
540
+ class_name = not_compiled_module.__class__.__name__
541
+
542
+ register_dict = {name: (library, class_name)}
543
+
544
+ # save model index config
545
+ self.register_to_config(**register_dict)
546
+
547
+ # set models
548
+ setattr(self, name, module)
549
+
550
+ def __setattr__(self, name: str, value: Any):
551
+ if name in self.__dict__ and hasattr(self.config, name):
552
+ # We need to overwrite the config if name exists in config
553
+ if isinstance(getattr(self.config, name), (tuple, list)):
554
+ if value is not None and self.config[name][0] is not None:
555
+ class_library_tuple = (value.__module__.split(".")[0], value.__class__.__name__)
556
+ else:
557
+ class_library_tuple = (None, None)
558
+
559
+ self.register_to_config(**{name: class_library_tuple})
560
+ else:
561
+ self.register_to_config(**{name: value})
562
+
563
+ super().__setattr__(name, value)
564
+
565
+ def save_pretrained(
566
+ self,
567
+ save_directory: Union[str, os.PathLike],
568
+ safe_serialization: bool = True,
569
+ variant: Optional[str] = None,
570
+ push_to_hub: bool = False,
571
+ **kwargs,
572
+ ):
573
+ """
574
+ Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its
575
+ class implements both a save and loading method. The pipeline is easily reloaded using the
576
+ [`~DiffusionPipeline.from_pretrained`] class method.
577
+
578
+ Arguments:
579
+ save_directory (`str` or `os.PathLike`):
580
+ Directory to save a pipeline to. Will be created if it doesn't exist.
581
+ safe_serialization (`bool`, *optional*, defaults to `True`):
582
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
583
+ variant (`str`, *optional*):
584
+ If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
585
+ push_to_hub (`bool`, *optional*, defaults to `False`):
586
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
587
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
588
+ namespace).
589
+ kwargs (`Dict[str, Any]`, *optional*):
590
+ Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
591
+ """
592
+ model_index_dict = dict(self.config)
593
+ model_index_dict.pop("_class_name", None)
594
+ model_index_dict.pop("_diffusers_version", None)
595
+ model_index_dict.pop("_module", None)
596
+ model_index_dict.pop("_name_or_path", None)
597
+
598
+ if push_to_hub:
599
+ commit_message = kwargs.pop("commit_message", None)
600
+ private = kwargs.pop("private", False)
601
+ create_pr = kwargs.pop("create_pr", False)
602
+ token = kwargs.pop("token", None)
603
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
604
+ repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
605
+
606
+ expected_modules, optional_kwargs = self._get_signature_keys(self)
607
+
608
+ def is_saveable_module(name, value):
609
+ if name not in expected_modules:
610
+ return False
611
+ if name in self._optional_components and value[0] is None:
612
+ return False
613
+ return True
614
+
615
+ model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)}
616
+ for pipeline_component_name in model_index_dict.keys():
617
+ sub_model = getattr(self, pipeline_component_name)
618
+ model_cls = sub_model.__class__
619
+
620
+ # Dynamo wraps the original model in a private class.
621
+ # I didn't find a public API to get the original class.
622
+ if is_compiled_module(sub_model):
623
+ sub_model = sub_model._orig_mod
624
+ model_cls = sub_model.__class__
625
+
626
+ save_method_name = None
627
+ # search for the model's base class in LOADABLE_CLASSES
628
+ for library_name, library_classes in LOADABLE_CLASSES.items():
629
+ if library_name in sys.modules:
630
+ library = importlib.import_module(library_name)
631
+ else:
632
+ logger.info(
633
+ f"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}"
634
+ )
635
+
636
+ for base_class, save_load_methods in library_classes.items():
637
+ class_candidate = getattr(library, base_class, None)
638
+ if class_candidate is not None and issubclass(model_cls, class_candidate):
639
+ # if we found a suitable base class in LOADABLE_CLASSES then grab its save method
640
+ save_method_name = save_load_methods[0]
641
+ break
642
+ if save_method_name is not None:
643
+ break
644
+
645
+ if save_method_name is None:
646
+ logger.warn(f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.")
647
+ # make sure that unsaveable components are not tried to be loaded afterward
648
+ self.register_to_config(**{pipeline_component_name: (None, None)})
649
+ continue
650
+
651
+ save_method = getattr(sub_model, save_method_name)
652
+
653
+ # Call the save method with the argument safe_serialization only if it's supported
654
+ save_method_signature = inspect.signature(save_method)
655
+ save_method_accept_safe = "safe_serialization" in save_method_signature.parameters
656
+ save_method_accept_variant = "variant" in save_method_signature.parameters
657
+
658
+ save_kwargs = {}
659
+ if save_method_accept_safe:
660
+ save_kwargs["safe_serialization"] = safe_serialization
661
+ if save_method_accept_variant:
662
+ save_kwargs["variant"] = variant
663
+
664
+ save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs)
665
+
666
+ # finally save the config
667
+ self.save_config(save_directory)
668
+
669
+ if push_to_hub:
670
+ self._upload_folder(
671
+ save_directory,
672
+ repo_id,
673
+ token=token,
674
+ commit_message=commit_message,
675
+ create_pr=create_pr,
676
+ )
677
+
678
+ def to(
679
+ self,
680
+ torch_device: Optional[Union[str, torch.device]] = None,
681
+ torch_dtype: Optional[torch.dtype] = None,
682
+ silence_dtype_warnings: bool = False,
683
+ ):
684
+ if torch_device is None and torch_dtype is None:
685
+ return self
686
+
687
+ # throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU.
688
+ def module_is_sequentially_offloaded(module):
689
+ if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"):
690
+ return False
691
+
692
+ return hasattr(module, "_hf_hook") and not isinstance(
693
+ module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook)
694
+ )
695
+
696
+ def module_is_offloaded(module):
697
+ if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"):
698
+ return False
699
+
700
+ return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload)
701
+
702
+ # .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer
703
+ pipeline_is_sequentially_offloaded = any(
704
+ module_is_sequentially_offloaded(module) for _, module in self.components.items()
705
+ )
706
+ if pipeline_is_sequentially_offloaded and torch_device and torch.device(torch_device).type == "cuda":
707
+ raise ValueError(
708
+ "It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading."
709
+ )
710
+
711
+ # Display a warning in this case (the operation succeeds but the benefits are lost)
712
+ pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items())
713
+ if pipeline_is_offloaded and torch_device and torch.device(torch_device).type == "cuda":
714
+ logger.warning(
715
+ f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading."
716
+ )
717
+
718
+ module_names, _ = self._get_signature_keys(self)
719
+ modules = [getattr(self, n, None) for n in module_names]
720
+ modules = [m for m in modules if isinstance(m, torch.nn.Module)]
721
+
722
+ is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded
723
+ for module in modules:
724
+ is_loaded_in_8bit = hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit
725
+
726
+ if is_loaded_in_8bit and torch_dtype is not None:
727
+ logger.warning(
728
+ f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {torch_dtype} is not yet supported. Module is still in 8bit precision."
729
+ )
730
+
731
+ if is_loaded_in_8bit and torch_device is not None:
732
+ logger.warning(
733
+ f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {torch_dtype} via `.to()` is not yet supported. Module is still on {module.device}."
734
+ )
735
+ else:
736
+ module.to(torch_device, torch_dtype)
737
+
738
+ if (
739
+ module.dtype == torch.float16
740
+ and str(torch_device) in ["cpu"]
741
+ and not silence_dtype_warnings
742
+ and not is_offloaded
743
+ ):
744
+ logger.warning(
745
+ "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` device. It"
746
+ " is not recommended to move them to `cpu` as running them will fail. Please make"
747
+ " sure to use an accelerator to run the pipeline in inference, due to the lack of"
748
+ " support for`float16` operations on this device in PyTorch. Please, remove the"
749
+ " `torch_dtype=torch.float16` argument, or use another device for inference."
750
+ )
751
+ return self
752
+
753
+ @property
754
+ def device(self) -> torch.device:
755
+ r"""
756
+ Returns:
757
+ `torch.device`: The torch device on which the pipeline is located.
758
+ """
759
+ module_names, _ = self._get_signature_keys(self)
760
+ modules = [getattr(self, n, None) for n in module_names]
761
+ modules = [m for m in modules if isinstance(m, torch.nn.Module)]
762
+
763
+ for module in modules:
764
+ return module.device
765
+
766
+ return torch.device("cpu")
767
+
768
+ @classmethod
769
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
770
+ r"""
771
+ Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights.
772
+
773
+ The pipeline is set in evaluation mode (`model.eval()`) by default.
774
+
775
+ If you get the error message below, you need to finetune the weights for your downstream task:
776
+
777
+ ```
778
+ Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
779
+ - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
780
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
781
+ ```
782
+
783
+ Parameters:
784
+ pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
785
+ Can be either:
786
+
787
+ - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
788
+ hosted on the Hub.
789
+ - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
790
+ saved using
791
+ [`~DiffusionPipeline.save_pretrained`].
792
+ torch_dtype (`str` or `torch.dtype`, *optional*):
793
+ Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the
794
+ dtype is automatically derived from the model's weights.
795
+ custom_pipeline (`str`, *optional*):
796
+
797
+ <Tip warning={true}>
798
+
799
+ 🧪 This is an experimental feature and may change in the future.
800
+
801
+ </Tip>
802
+
803
+ Can be either:
804
+
805
+ - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom
806
+ pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines
807
+ the custom pipeline.
808
+ - A string, the *file name* of a community pipeline hosted on GitHub under
809
+ [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file
810
+ names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`
811
+ instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the
812
+ current main branch of GitHub.
813
+ - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory
814
+ must contain a file called `pipeline.py` that defines the custom pipeline.
815
+
816
+ For more information on how to load and create custom pipelines, please have a look at [Loading and
817
+ Adding Custom
818
+ Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview)
819
+ force_download (`bool`, *optional*, defaults to `False`):
820
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
821
+ cached versions if they exist.
822
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
823
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
824
+ is not used.
825
+ resume_download (`bool`, *optional*, defaults to `False`):
826
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
827
+ incompletely downloaded files are deleted.
828
+ proxies (`Dict[str, str]`, *optional*):
829
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
830
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
831
+ output_loading_info(`bool`, *optional*, defaults to `False`):
832
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
833
+ local_files_only (`bool`, *optional*, defaults to `False`):
834
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
835
+ won't be downloaded from the Hub.
836
+ use_auth_token (`str` or *bool*, *optional*):
837
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
838
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
839
+ revision (`str`, *optional*, defaults to `"main"`):
840
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
841
+ allowed by Git.
842
+ custom_revision (`str`, *optional*, defaults to `"main"`):
843
+ The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
844
+ `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
845
+ custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
846
+ mirror (`str`, *optional*):
847
+ Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
848
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
849
+ information.
850
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
851
+ A map that specifies where each submodule should go. It doesn’t need to be defined for each
852
+ parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
853
+ same device.
854
+
855
+ Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
856
+ more information about each option see [designing a device
857
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
858
+ max_memory (`Dict`, *optional*):
859
+ A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
860
+ each GPU and the available CPU RAM if unset.
861
+ offload_folder (`str` or `os.PathLike`, *optional*):
862
+ The path to offload weights if device_map contains the value `"disk"`.
863
+ offload_state_dict (`bool`, *optional*):
864
+ If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
865
+ the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
866
+ when there is some disk offload.
867
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
868
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
869
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
870
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
871
+ argument to `True` will raise an error.
872
+ use_safetensors (`bool`, *optional*, defaults to `None`):
873
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
874
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
875
+ weights. If set to `False`, safetensors weights are not loaded.
876
+ use_onnx (`bool`, *optional*, defaults to `None`):
877
+ If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights
878
+ will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is
879
+ `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending
880
+ with `.onnx` and `.pb`.
881
+ kwargs (remaining dictionary of keyword arguments, *optional*):
882
+ Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
883
+ class). The overwritten components are passed directly to the pipelines `__init__` method. See example
884
+ below for more information.
885
+ variant (`str`, *optional*):
886
+ Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
887
+ loading `from_flax`.
888
+
889
+ <Tip>
890
+
891
+ To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
892
+ `huggingface-cli login`.
893
+
894
+ </Tip>
895
+
896
+ Examples:
897
+
898
+ ```py
899
+ >>> from diffusers import DiffusionPipeline
900
+
901
+ >>> # Download pipeline from huggingface.co and cache.
902
+ >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
903
+
904
+ >>> # Download pipeline that requires an authorization token
905
+ >>> # For more information on access tokens, please refer to this section
906
+ >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
907
+ >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
908
+
909
+ >>> # Use a different scheduler
910
+ >>> from diffusers import LMSDiscreteScheduler
911
+
912
+ >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
913
+ >>> pipeline.scheduler = scheduler
914
+ ```
915
+ """
916
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
917
+ resume_download = kwargs.pop("resume_download", False)
918
+ force_download = kwargs.pop("force_download", False)
919
+ proxies = kwargs.pop("proxies", None)
920
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
921
+ use_auth_token = kwargs.pop("use_auth_token", None)
922
+ revision = kwargs.pop("revision", None)
923
+ from_flax = kwargs.pop("from_flax", False)
924
+ torch_dtype = kwargs.pop("torch_dtype", None)
925
+ custom_pipeline = kwargs.pop("custom_pipeline", None)
926
+ custom_revision = kwargs.pop("custom_revision", None)
927
+ provider = kwargs.pop("provider", None)
928
+ sess_options = kwargs.pop("sess_options", None)
929
+ device_map = kwargs.pop("device_map", None)
930
+ max_memory = kwargs.pop("max_memory", None)
931
+ offload_folder = kwargs.pop("offload_folder", None)
932
+ offload_state_dict = kwargs.pop("offload_state_dict", False)
933
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
934
+ variant = kwargs.pop("variant", None)
935
+ use_safetensors = kwargs.pop("use_safetensors", None)
936
+ use_onnx = kwargs.pop("use_onnx", None)
937
+ load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
938
+
939
+ # 1. Download the checkpoints and configs
940
+ # use snapshot download here to get it working from from_pretrained
941
+ if not os.path.isdir(pretrained_model_name_or_path):
942
+ cached_folder = cls.download(
943
+ pretrained_model_name_or_path,
944
+ cache_dir=cache_dir,
945
+ resume_download=resume_download,
946
+ force_download=force_download,
947
+ proxies=proxies,
948
+ local_files_only=local_files_only,
949
+ use_auth_token=use_auth_token,
950
+ revision=revision,
951
+ from_flax=from_flax,
952
+ use_safetensors=use_safetensors,
953
+ use_onnx=use_onnx,
954
+ custom_pipeline=custom_pipeline,
955
+ custom_revision=custom_revision,
956
+ variant=variant,
957
+ load_connected_pipeline=load_connected_pipeline,
958
+ **kwargs,
959
+ )
960
+ else:
961
+ cached_folder = pretrained_model_name_or_path
962
+
963
+ config_dict = cls.load_config(cached_folder)
964
+
965
+ # pop out "_ignore_files" as it is only needed for download
966
+ config_dict.pop("_ignore_files", None)
967
+
968
+ # 2. Define which model components should load variants
969
+ # We retrieve the information by matching whether variant
970
+ # model checkpoints exist in the subfolders
971
+ model_variants = {}
972
+ if variant is not None:
973
+ for folder in os.listdir(cached_folder):
974
+ folder_path = os.path.join(cached_folder, folder)
975
+ is_folder = os.path.isdir(folder_path) and folder in config_dict
976
+ variant_exists = is_folder and any(
977
+ p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)
978
+ )
979
+ if variant_exists:
980
+ model_variants[folder] = variant
981
+
982
+ # 3. Load the pipeline class, if using custom module then load it from the hub
983
+ # if we load from explicit class, let's use it
984
+ pipeline_class = _get_pipeline_class(
985
+ cls,
986
+ config_dict,
987
+ load_connected_pipeline=load_connected_pipeline,
988
+ custom_pipeline=custom_pipeline,
989
+ cache_dir=cache_dir,
990
+ revision=custom_revision,
991
+ )
992
+
993
+ # DEPRECATED: To be removed in 1.0.0
994
+ if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse(
995
+ version.parse(config_dict["_diffusers_version"]).base_version
996
+ ) <= version.parse("0.5.1"):
997
+ from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy
998
+
999
+ pipeline_class = StableDiffusionInpaintPipelineLegacy
1000
+
1001
+ deprecation_message = (
1002
+ "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the"
1003
+ f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For"
1004
+ " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting"
1005
+ " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your"
1006
+ f" checkpoint {pretrained_model_name_or_path} to the format of"
1007
+ " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain"
1008
+ " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0."
1009
+ )
1010
+ deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False)
1011
+
1012
+ # 4. Define expected modules given pipeline signature
1013
+ # and define non-None initialized modules (=`init_kwargs`)
1014
+
1015
+ # some modules can be passed directly to the init
1016
+ # in this case they are already instantiated in `kwargs`
1017
+ # extract them here
1018
+ expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)
1019
+ passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
1020
+ passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
1021
+
1022
+ init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
1023
+
1024
+ # define init kwargs and make sure that optional component modules are filtered out
1025
+ init_kwargs = {
1026
+ k: init_dict.pop(k)
1027
+ for k in optional_kwargs
1028
+ if k in init_dict and k not in pipeline_class._optional_components
1029
+ }
1030
+ init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
1031
+
1032
+ # remove `null` components
1033
+ def load_module(name, value):
1034
+ if value[0] is None:
1035
+ return False
1036
+ if name in passed_class_obj and passed_class_obj[name] is None:
1037
+ return False
1038
+ return True
1039
+
1040
+ init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}
1041
+
1042
+ # Special case: safety_checker must be loaded separately when using `from_flax`
1043
+ if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj:
1044
+ raise NotImplementedError(
1045
+ "The safety checker cannot be automatically loaded when loading weights `from_flax`."
1046
+ " Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker"
1047
+ " separately if you need it."
1048
+ )
1049
+
1050
+ # 5. Throw nice warnings / errors for fast accelerate loading
1051
+ if len(unused_kwargs) > 0:
1052
+ logger.warning(
1053
+ f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored."
1054
+ )
1055
+
1056
+ if low_cpu_mem_usage and not is_accelerate_available():
1057
+ low_cpu_mem_usage = False
1058
+ logger.warning(
1059
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
1060
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
1061
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
1062
+ " install accelerate\n```\n."
1063
+ )
1064
+
1065
+ if device_map is not None and not is_torch_version(">=", "1.9.0"):
1066
+ raise NotImplementedError(
1067
+ "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set"
1068
+ " `device_map=None`."
1069
+ )
1070
+
1071
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
1072
+ raise NotImplementedError(
1073
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
1074
+ " `low_cpu_mem_usage=False`."
1075
+ )
1076
+
1077
+ if low_cpu_mem_usage is False and device_map is not None:
1078
+ raise ValueError(
1079
+ f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and"
1080
+ " dispatching. Please make sure to set `low_cpu_mem_usage=True`."
1081
+ )
1082
+
1083
+ # import it here to avoid circular import
1084
+ from diffusers import pipelines
1085
+
1086
+ # 6. Load each module in the pipeline
1087
+ for name, (library_name, class_name) in tqdm(init_dict.items(), desc="Loading pipeline components..."):
1088
+ # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names
1089
+ if class_name.startswith("Flax"):
1090
+ class_name = class_name[4:]
1091
+
1092
+ # 6.2 Define all importable classes
1093
+ is_pipeline_module = hasattr(pipelines, library_name)
1094
+ importable_classes = ALL_IMPORTABLE_CLASSES
1095
+ loaded_sub_model = None
1096
+
1097
+ # 6.3 Use passed sub model or load class_name from library_name
1098
+ if name in passed_class_obj:
1099
+ # if the model is in a pipeline module, then we load it from the pipeline
1100
+ # check that passed_class_obj has correct parent class
1101
+ maybe_raise_or_warn(
1102
+ library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module
1103
+ )
1104
+
1105
+ loaded_sub_model = passed_class_obj[name]
1106
+ else:
1107
+ # load sub model
1108
+ loaded_sub_model = load_sub_model(
1109
+ library_name=library_name,
1110
+ class_name=class_name,
1111
+ importable_classes=importable_classes,
1112
+ pipelines=pipelines,
1113
+ is_pipeline_module=is_pipeline_module,
1114
+ pipeline_class=pipeline_class,
1115
+ torch_dtype=torch_dtype,
1116
+ provider=provider,
1117
+ sess_options=sess_options,
1118
+ device_map=device_map,
1119
+ max_memory=max_memory,
1120
+ offload_folder=offload_folder,
1121
+ offload_state_dict=offload_state_dict,
1122
+ model_variants=model_variants,
1123
+ name=name,
1124
+ from_flax=from_flax,
1125
+ variant=variant,
1126
+ low_cpu_mem_usage=low_cpu_mem_usage,
1127
+ cached_folder=cached_folder,
1128
+ )
1129
+ logger.info(
1130
+ f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}."
1131
+ )
1132
+
1133
+ init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)
1134
+
1135
+ if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")):
1136
+ modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
1137
+ connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS}
1138
+ load_kwargs = {
1139
+ "cache_dir": cache_dir,
1140
+ "resume_download": resume_download,
1141
+ "force_download": force_download,
1142
+ "proxies": proxies,
1143
+ "local_files_only": local_files_only,
1144
+ "use_auth_token": use_auth_token,
1145
+ "revision": revision,
1146
+ "torch_dtype": torch_dtype,
1147
+ "custom_pipeline": custom_pipeline,
1148
+ "custom_revision": custom_revision,
1149
+ "provider": provider,
1150
+ "sess_options": sess_options,
1151
+ "device_map": device_map,
1152
+ "max_memory": max_memory,
1153
+ "offload_folder": offload_folder,
1154
+ "offload_state_dict": offload_state_dict,
1155
+ "low_cpu_mem_usage": low_cpu_mem_usage,
1156
+ "variant": variant,
1157
+ "use_safetensors": use_safetensors,
1158
+ }
1159
+
1160
+ def get_connected_passed_kwargs(prefix):
1161
+ connected_passed_class_obj = {
1162
+ k.replace(f"{prefix}_", ""): w for k, w in passed_class_obj.items() if k.split("_")[0] == prefix
1163
+ }
1164
+ connected_passed_pipe_kwargs = {
1165
+ k.replace(f"{prefix}_", ""): w for k, w in passed_pipe_kwargs.items() if k.split("_")[0] == prefix
1166
+ }
1167
+
1168
+ connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs}
1169
+ return connected_passed_kwargs
1170
+
1171
+ connected_pipes = {
1172
+ prefix: DiffusionPipeline.from_pretrained(
1173
+ repo_id, **load_kwargs.copy(), **get_connected_passed_kwargs(prefix)
1174
+ )
1175
+ for prefix, repo_id in connected_pipes.items()
1176
+ if repo_id is not None
1177
+ }
1178
+
1179
+ for prefix, connected_pipe in connected_pipes.items():
1180
+ # add connected pipes to `init_kwargs` with <prefix>_<component_name>, e.g. "prior_text_encoder"
1181
+ init_kwargs.update(
1182
+ {"_".join([prefix, name]): component for name, component in connected_pipe.components.items()}
1183
+ )
1184
+
1185
+ # 7. Potentially add passed objects if expected
1186
+ missing_modules = set(expected_modules) - set(init_kwargs.keys())
1187
+ passed_modules = list(passed_class_obj.keys())
1188
+ optional_modules = pipeline_class._optional_components
1189
+ if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules):
1190
+ for module in missing_modules:
1191
+ init_kwargs[module] = passed_class_obj.get(module, None)
1192
+ elif len(missing_modules) > 0:
1193
+ passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs
1194
+ raise ValueError(
1195
+ f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed."
1196
+ )
1197
+
1198
+ # 8. Instantiate the pipeline
1199
+ model = pipeline_class(**init_kwargs)
1200
+
1201
+ # 9. Save where the model was instantiated from
1202
+ model.register_to_config(_name_or_path=pretrained_model_name_or_path)
1203
+ return model
1204
+
1205
+ @property
1206
+ def name_or_path(self) -> str:
1207
+ return getattr(self.config, "_name_or_path", None)
1208
+
1209
+ @property
1210
+ def _execution_device(self):
1211
+ r"""
1212
+ Returns the device on which the pipeline's models will be executed. After calling
1213
+ [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from
1214
+ Accelerate's module hooks.
1215
+ """
1216
+ for name, model in self.components.items():
1217
+ if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload:
1218
+ continue
1219
+
1220
+ if not hasattr(model, "_hf_hook"):
1221
+ return self.device
1222
+ for module in model.modules():
1223
+ if (
1224
+ hasattr(module, "_hf_hook")
1225
+ and hasattr(module._hf_hook, "execution_device")
1226
+ and module._hf_hook.execution_device is not None
1227
+ ):
1228
+ return torch.device(module._hf_hook.execution_device)
1229
+ return self.device
1230
+
1231
+ def enable_model_cpu_offload(self, gpu_id: int = 0, device: Union[torch.device, str] = "cuda"):
1232
+ r"""
1233
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
1234
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
1235
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
1236
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
1237
+ """
1238
+ if self.model_cpu_offload_seq is None:
1239
+ raise ValueError(
1240
+ "Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set."
1241
+ )
1242
+
1243
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
1244
+ from accelerate import cpu_offload_with_hook
1245
+ else:
1246
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
1247
+
1248
+ device = torch.device(f"cuda:{gpu_id}")
1249
+
1250
+ if self.device.type != "cpu":
1251
+ self.to("cpu", silence_dtype_warnings=True)
1252
+ device_mod = getattr(torch, self.device.type, None)
1253
+ if hasattr(device_mod, "empty_cache") and device_mod.is_available():
1254
+ device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
1255
+
1256
+ all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)}
1257
+
1258
+ self._all_hooks = []
1259
+ hook = None
1260
+ for model_str in self.model_cpu_offload_seq.split("->"):
1261
+ model = all_model_components.pop(model_str, None)
1262
+ if not isinstance(model, torch.nn.Module):
1263
+ continue
1264
+
1265
+ _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook)
1266
+ self._all_hooks.append(hook)
1267
+
1268
+ # CPU offload models that are not in the seq chain unless they are explicitly excluded
1269
+ # these models will stay on CPU until maybe_free_model_hooks is called
1270
+ # some models cannot be in the seq chain because they are iteratively called, such as controlnet
1271
+ for name, model in all_model_components.items():
1272
+ if not isinstance(model, torch.nn.Module):
1273
+ continue
1274
+
1275
+ if name in self._exclude_from_cpu_offload:
1276
+ model.to(device)
1277
+ else:
1278
+ _, hook = cpu_offload_with_hook(model, device)
1279
+ self._all_hooks.append(hook)
1280
+
1281
+ def maybe_free_model_hooks(self):
1282
+ r"""
1283
+ TODO: Better doc string
1284
+ """
1285
+ if not hasattr(self, "_all_hooks") or len(self._all_hooks) == 0:
1286
+ # `enable_model_cpu_offload` has not be called, so silently do nothing
1287
+ return
1288
+
1289
+ for hook in self._all_hooks:
1290
+ # offload model and remove hook from model
1291
+ hook.offload()
1292
+ hook.remove()
1293
+
1294
+ # make sure the model is in the same state as before calling it
1295
+ self.enable_model_cpu_offload()
1296
+
1297
+ def enable_sequential_cpu_offload(self, gpu_id: int = 0, device: Union[torch.device, str] = "cuda"):
1298
+ r"""
1299
+ Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state
1300
+ dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU
1301
+ and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward`
1302
+ method called. Offloading happens on a submodule basis. Memory savings are higher than with
1303
+ `enable_model_cpu_offload`, but performance is lower.
1304
+ """
1305
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
1306
+ from accelerate import cpu_offload
1307
+ else:
1308
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
1309
+
1310
+ if device == "cuda":
1311
+ device = torch.device(f"{device}:{gpu_id}")
1312
+
1313
+ if self.device.type != "cpu":
1314
+ self.to("cpu", silence_dtype_warnings=True)
1315
+ device_mod = getattr(torch, self.device.type, None)
1316
+ if hasattr(device_mod, "empty_cache") and device_mod.is_available():
1317
+ device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
1318
+
1319
+ for name, model in self.components.items():
1320
+ if not isinstance(model, torch.nn.Module):
1321
+ continue
1322
+
1323
+ if name in self._exclude_from_cpu_offload:
1324
+ model.to(device)
1325
+ else:
1326
+ # make sure to offload buffers if not all high level weights
1327
+ # are of type nn.Module
1328
+ offload_buffers = len(model._parameters) > 0
1329
+ cpu_offload(model, device, offload_buffers=offload_buffers)
1330
+
1331
+ @classmethod
1332
+ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
1333
+ r"""
1334
+ Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights.
1335
+
1336
+ Parameters:
1337
+ pretrained_model_name (`str` or `os.PathLike`, *optional*):
1338
+ A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
1339
+ hosted on the Hub.
1340
+ custom_pipeline (`str`, *optional*):
1341
+ Can be either:
1342
+
1343
+ - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained
1344
+ pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines
1345
+ the custom pipeline.
1346
+
1347
+ - A string, the *file name* of a community pipeline hosted on GitHub under
1348
+ [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file
1349
+ names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`
1350
+ instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the
1351
+ current `main` branch of GitHub.
1352
+
1353
+ - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory
1354
+ must contain a file called `pipeline.py` that defines the custom pipeline.
1355
+
1356
+ <Tip warning={true}>
1357
+
1358
+ 🧪 This is an experimental feature and may change in the future.
1359
+
1360
+ </Tip>
1361
+
1362
+ For more information on how to load and create custom pipelines, take a look at [How to contribute a
1363
+ community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline).
1364
+
1365
+ force_download (`bool`, *optional*, defaults to `False`):
1366
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
1367
+ cached versions if they exist.
1368
+ resume_download (`bool`, *optional*, defaults to `False`):
1369
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
1370
+ incompletely downloaded files are deleted.
1371
+ proxies (`Dict[str, str]`, *optional*):
1372
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
1373
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1374
+ output_loading_info(`bool`, *optional*, defaults to `False`):
1375
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
1376
+ local_files_only (`bool`, *optional*, defaults to `False`):
1377
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
1378
+ won't be downloaded from the Hub.
1379
+ use_auth_token (`str` or *bool*, *optional*):
1380
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
1381
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
1382
+ revision (`str`, *optional*, defaults to `"main"`):
1383
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
1384
+ allowed by Git.
1385
+ custom_revision (`str`, *optional*, defaults to `"main"`):
1386
+ The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
1387
+ `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
1388
+ custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
1389
+ mirror (`str`, *optional*):
1390
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
1391
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
1392
+ information.
1393
+ variant (`str`, *optional*):
1394
+ Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
1395
+ loading `from_flax`.
1396
+ use_safetensors (`bool`, *optional*, defaults to `None`):
1397
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
1398
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
1399
+ weights. If set to `False`, safetensors weights are not loaded.
1400
+ use_onnx (`bool`, *optional*, defaults to `False`):
1401
+ If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights
1402
+ will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is
1403
+ `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending
1404
+ with `.onnx` and `.pb`.
1405
+
1406
+ Returns:
1407
+ `os.PathLike`:
1408
+ A path to the downloaded pipeline.
1409
+
1410
+ <Tip>
1411
+
1412
+ To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
1413
+ `huggingface-cli login`.
1414
+
1415
+ </Tip>
1416
+
1417
+ """
1418
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
1419
+ resume_download = kwargs.pop("resume_download", False)
1420
+ force_download = kwargs.pop("force_download", False)
1421
+ proxies = kwargs.pop("proxies", None)
1422
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
1423
+ use_auth_token = kwargs.pop("use_auth_token", None)
1424
+ revision = kwargs.pop("revision", None)
1425
+ from_flax = kwargs.pop("from_flax", False)
1426
+ custom_pipeline = kwargs.pop("custom_pipeline", None)
1427
+ custom_revision = kwargs.pop("custom_revision", None)
1428
+ variant = kwargs.pop("variant", None)
1429
+ use_safetensors = kwargs.pop("use_safetensors", None)
1430
+ use_onnx = kwargs.pop("use_onnx", None)
1431
+ load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
1432
+
1433
+ allow_pickle = False
1434
+ if use_safetensors is None:
1435
+ use_safetensors = True
1436
+ allow_pickle = True
1437
+
1438
+ allow_patterns = None
1439
+ ignore_patterns = None
1440
+
1441
+ model_info_call_error: Optional[Exception] = None
1442
+ if not local_files_only:
1443
+ try:
1444
+ info = model_info(
1445
+ pretrained_model_name,
1446
+ use_auth_token=use_auth_token,
1447
+ revision=revision,
1448
+ )
1449
+ except HTTPError as e:
1450
+ logger.warn(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.")
1451
+ local_files_only = True
1452
+ model_info_call_error = e # save error to reraise it if model is not cached locally
1453
+
1454
+ if not local_files_only:
1455
+ config_file = hf_hub_download(
1456
+ pretrained_model_name,
1457
+ cls.config_name,
1458
+ cache_dir=cache_dir,
1459
+ revision=revision,
1460
+ proxies=proxies,
1461
+ force_download=force_download,
1462
+ resume_download=resume_download,
1463
+ use_auth_token=use_auth_token,
1464
+ )
1465
+
1466
+ config_dict = cls._dict_from_json_file(config_file)
1467
+
1468
+ ignore_filenames = config_dict.pop("_ignore_files", [])
1469
+
1470
+ # retrieve all folder_names that contain relevant files
1471
+ folder_names = [k for k, v in config_dict.items() if isinstance(v, list)]
1472
+
1473
+ filenames = {sibling.rfilename for sibling in info.siblings}
1474
+ model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
1475
+
1476
+ if len(variant_filenames) == 0 and variant is not None:
1477
+ deprecation_message = (
1478
+ f"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available."
1479
+ f"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`"
1480
+ "if such variant modeling files are not available. Doing so will lead to an error in v0.22.0 as defaulting to non-variant"
1481
+ "modeling files is deprecated."
1482
+ )
1483
+ deprecate("no variant default", "0.22.0", deprecation_message, standard_warn=False)
1484
+
1485
+ # remove ignored filenames
1486
+ model_filenames = set(model_filenames) - set(ignore_filenames)
1487
+ variant_filenames = set(variant_filenames) - set(ignore_filenames)
1488
+
1489
+ # if the whole pipeline is cached we don't have to ping the Hub
1490
+ if revision in DEPRECATED_REVISION_ARGS and version.parse(
1491
+ version.parse(__version__).base_version
1492
+ ) >= version.parse("0.22.0"):
1493
+ warn_deprecated_model_variant(
1494
+ pretrained_model_name, use_auth_token, variant, revision, model_filenames
1495
+ )
1496
+
1497
+ model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names}
1498
+
1499
+ # all filenames compatible with variant will be added
1500
+ allow_patterns = list(model_filenames)
1501
+
1502
+ # allow all patterns from non-model folders
1503
+ # this enables downloading schedulers, tokenizers, ...
1504
+ allow_patterns += [f"{k}/*" for k in folder_names if k not in model_folder_names]
1505
+ # also allow downloading config.json files with the model
1506
+ allow_patterns += [os.path.join(k, "config.json") for k in model_folder_names]
1507
+
1508
+ allow_patterns += [
1509
+ SCHEDULER_CONFIG_NAME,
1510
+ CONFIG_NAME,
1511
+ cls.config_name,
1512
+ CUSTOM_PIPELINE_FILE_NAME,
1513
+ ]
1514
+
1515
+ # retrieve passed components that should not be downloaded
1516
+ pipeline_class = _get_pipeline_class(
1517
+ cls,
1518
+ config_dict,
1519
+ load_connected_pipeline=load_connected_pipeline,
1520
+ custom_pipeline=custom_pipeline,
1521
+ cache_dir=cache_dir,
1522
+ revision=custom_revision,
1523
+ )
1524
+ expected_components, _ = cls._get_signature_keys(pipeline_class)
1525
+ passed_components = [k for k in expected_components if k in kwargs]
1526
+
1527
+ if (
1528
+ use_safetensors
1529
+ and not allow_pickle
1530
+ and not is_safetensors_compatible(
1531
+ model_filenames, variant=variant, passed_components=passed_components
1532
+ )
1533
+ ):
1534
+ raise EnvironmentError(
1535
+ f"Could not found the necessary `safetensors` weights in {model_filenames} (variant={variant})"
1536
+ )
1537
+ if from_flax:
1538
+ ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"]
1539
+ elif use_safetensors and is_safetensors_compatible(
1540
+ model_filenames, variant=variant, passed_components=passed_components
1541
+ ):
1542
+ ignore_patterns = ["*.bin", "*.msgpack"]
1543
+
1544
+ use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx
1545
+ if not use_onnx:
1546
+ ignore_patterns += ["*.onnx", "*.pb"]
1547
+
1548
+ safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")}
1549
+ safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")}
1550
+ if (
1551
+ len(safetensors_variant_filenames) > 0
1552
+ and safetensors_model_filenames != safetensors_variant_filenames
1553
+ ):
1554
+ logger.warn(
1555
+ f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure."
1556
+ )
1557
+ else:
1558
+ ignore_patterns = ["*.safetensors", "*.msgpack"]
1559
+
1560
+ use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx
1561
+ if not use_onnx:
1562
+ ignore_patterns += ["*.onnx", "*.pb"]
1563
+
1564
+ bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")}
1565
+ bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")}
1566
+ if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames:
1567
+ logger.warn(
1568
+ f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure."
1569
+ )
1570
+
1571
+ # Don't download any objects that are passed
1572
+ allow_patterns = [
1573
+ p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components)
1574
+ ]
1575
+
1576
+ if pipeline_class._load_connected_pipes:
1577
+ allow_patterns.append("README.md")
1578
+
1579
+ # Don't download index files of forbidden patterns either
1580
+ ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns]
1581
+
1582
+ re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns]
1583
+ re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns]
1584
+
1585
+ expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)]
1586
+ expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)]
1587
+
1588
+ snapshot_folder = Path(config_file).parent
1589
+ pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files)
1590
+
1591
+ if pipeline_is_cached and not force_download:
1592
+ # if the pipeline is cached, we can directly return it
1593
+ # else call snapshot_download
1594
+ return snapshot_folder
1595
+
1596
+ user_agent = {"pipeline_class": cls.__name__}
1597
+ if custom_pipeline is not None and not custom_pipeline.endswith(".py"):
1598
+ user_agent["custom_pipeline"] = custom_pipeline
1599
+
1600
+ # download all allow_patterns - ignore_patterns
1601
+ try:
1602
+ cached_folder = snapshot_download(
1603
+ pretrained_model_name,
1604
+ cache_dir=cache_dir,
1605
+ resume_download=resume_download,
1606
+ proxies=proxies,
1607
+ local_files_only=local_files_only,
1608
+ use_auth_token=use_auth_token,
1609
+ revision=revision,
1610
+ allow_patterns=allow_patterns,
1611
+ ignore_patterns=ignore_patterns,
1612
+ user_agent=user_agent,
1613
+ )
1614
+
1615
+ # retrieve pipeline class from local file
1616
+ cls_name = cls.load_config(os.path.join(cached_folder, "model_index.json")).get("_class_name", None)
1617
+ pipeline_class = getattr(diffusers, cls_name, None)
1618
+
1619
+ if pipeline_class is not None and pipeline_class._load_connected_pipes:
1620
+ modelcard = ModelCard.load(os.path.join(cached_folder, "README.md"))
1621
+ connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], [])
1622
+ for connected_pipe_repo_id in connected_pipes:
1623
+ download_kwargs = {
1624
+ "cache_dir": cache_dir,
1625
+ "resume_download": resume_download,
1626
+ "force_download": force_download,
1627
+ "proxies": proxies,
1628
+ "local_files_only": local_files_only,
1629
+ "use_auth_token": use_auth_token,
1630
+ "variant": variant,
1631
+ "use_safetensors": use_safetensors,
1632
+ }
1633
+ DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs)
1634
+
1635
+ return cached_folder
1636
+
1637
+ except FileNotFoundError:
1638
+ # Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache.
1639
+ # This can happen in two cases:
1640
+ # 1. If the user passed `local_files_only=True` => we raise the error directly
1641
+ # 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error
1642
+ if model_info_call_error is None:
1643
+ # 1. user passed `local_files_only=True`
1644
+ raise
1645
+ else:
1646
+ # 2. we forced `local_files_only=True` when `model_info` failed
1647
+ raise EnvironmentError(
1648
+ f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occured"
1649
+ " while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace"
1650
+ " above."
1651
+ ) from model_info_call_error
1652
+
1653
+ @staticmethod
1654
+ def _get_signature_keys(obj):
1655
+ parameters = inspect.signature(obj.__init__).parameters
1656
+ required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
1657
+ optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
1658
+ expected_modules = set(required_parameters.keys()) - {"self"}
1659
+ return expected_modules, optional_parameters
1660
+
1661
+ @property
1662
+ def components(self) -> Dict[str, Any]:
1663
+ r"""
1664
+ The `self.components` property can be useful to run different pipelines with the same weights and
1665
+ configurations without reallocating additional memory.
1666
+
1667
+ Returns (`dict`):
1668
+ A dictionary containing all the modules needed to initialize the pipeline.
1669
+
1670
+ Examples:
1671
+
1672
+ ```py
1673
+ >>> from diffusers import (
1674
+ ... StableDiffusionPipeline,
1675
+ ... StableDiffusionImg2ImgPipeline,
1676
+ ... StableDiffusionInpaintPipeline,
1677
+ ... )
1678
+
1679
+ >>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
1680
+ >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
1681
+ >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components)
1682
+ ```
1683
+ """
1684
+ expected_modules, optional_parameters = self._get_signature_keys(self)
1685
+ components = {
1686
+ k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters
1687
+ }
1688
+
1689
+ if set(components.keys()) != expected_modules:
1690
+ raise ValueError(
1691
+ f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected"
1692
+ f" {expected_modules} to be defined, but {components.keys()} are defined."
1693
+ )
1694
+
1695
+ return components
1696
+
1697
+ @staticmethod
1698
+ def numpy_to_pil(images):
1699
+ """
1700
+ Convert a NumPy image or a batch of images to a PIL image.
1701
+ """
1702
+ return numpy_to_pil(images)
1703
+
1704
+ def progress_bar(self, iterable=None, total=None):
1705
+ if not hasattr(self, "_progress_bar_config"):
1706
+ self._progress_bar_config = {}
1707
+ elif not isinstance(self._progress_bar_config, dict):
1708
+ raise ValueError(
1709
+ f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
1710
+ )
1711
+
1712
+ if iterable is not None:
1713
+ return tqdm(iterable, **self._progress_bar_config)
1714
+ elif total is not None:
1715
+ return tqdm(total=total, **self._progress_bar_config)
1716
+ else:
1717
+ raise ValueError("Either `total` or `iterable` has to be defined.")
1718
+
1719
+ def set_progress_bar_config(self, **kwargs):
1720
+ self._progress_bar_config = kwargs
1721
+
1722
+ def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
1723
+ r"""
1724
+ Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this
1725
+ option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed
1726
+ up during training is not guaranteed.
1727
+
1728
+ <Tip warning={true}>
1729
+
1730
+ ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes
1731
+ precedent.
1732
+
1733
+ </Tip>
1734
+
1735
+ Parameters:
1736
+ attention_op (`Callable`, *optional*):
1737
+ Override the default `None` operator for use as `op` argument to the
1738
+ [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)
1739
+ function of xFormers.
1740
+
1741
+ Examples:
1742
+
1743
+ ```py
1744
+ >>> import torch
1745
+ >>> from diffusers import DiffusionPipeline
1746
+ >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
1747
+
1748
+ >>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16)
1749
+ >>> pipe = pipe.to("cuda")
1750
+ >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
1751
+ >>> # Workaround for not accepting attention shape using VAE for Flash Attention
1752
+ >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)
1753
+ ```
1754
+ """
1755
+ self.set_use_memory_efficient_attention_xformers(True, attention_op)
1756
+
1757
+ def disable_xformers_memory_efficient_attention(self):
1758
+ r"""
1759
+ Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
1760
+ """
1761
+ self.set_use_memory_efficient_attention_xformers(False)
1762
+
1763
+ def set_use_memory_efficient_attention_xformers(
1764
+ self, valid: bool, attention_op: Optional[Callable] = None
1765
+ ) -> None:
1766
+ # Recursively walk through all the children.
1767
+ # Any children which exposes the set_use_memory_efficient_attention_xformers method
1768
+ # gets the message
1769
+ def fn_recursive_set_mem_eff(module: torch.nn.Module):
1770
+ if hasattr(module, "set_use_memory_efficient_attention_xformers"):
1771
+ module.set_use_memory_efficient_attention_xformers(valid, attention_op)
1772
+
1773
+ for child in module.children():
1774
+ fn_recursive_set_mem_eff(child)
1775
+
1776
+ module_names, _ = self._get_signature_keys(self)
1777
+ modules = [getattr(self, n, None) for n in module_names]
1778
+ modules = [m for m in modules if isinstance(m, torch.nn.Module)]
1779
+
1780
+ for module in modules:
1781
+ fn_recursive_set_mem_eff(module)
1782
+
1783
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
1784
+ r"""
1785
+ Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor
1786
+ in slices to compute attention in several steps. For more than one attention head, the computation is performed
1787
+ sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.
1788
+
1789
+ <Tip warning={true}>
1790
+
1791
+ ⚠️ Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch
1792
+ 2.0 or xFormers. These attention computations are already very memory efficient so you won't need to enable
1793
+ this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!
1794
+
1795
+ </Tip>
1796
+
1797
+ Args:
1798
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
1799
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
1800
+ `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is
1801
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
1802
+ must be a multiple of `slice_size`.
1803
+
1804
+ Examples:
1805
+
1806
+ ```py
1807
+ >>> import torch
1808
+ >>> from diffusers import StableDiffusionPipeline
1809
+
1810
+ >>> pipe = StableDiffusionPipeline.from_pretrained(
1811
+ ... "runwayml/stable-diffusion-v1-5",
1812
+ ... torch_dtype=torch.float16,
1813
+ ... use_safetensors=True,
1814
+ ... )
1815
+
1816
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
1817
+ >>> pipe.enable_attention_slicing()
1818
+ >>> image = pipe(prompt).images[0]
1819
+ ```
1820
+ """
1821
+ self.set_attention_slice(slice_size)
1822
+
1823
+ def disable_attention_slicing(self):
1824
+ r"""
1825
+ Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is
1826
+ computed in one step.
1827
+ """
1828
+ # set slice_size = `None` to disable `attention slicing`
1829
+ self.enable_attention_slicing(None)
1830
+
1831
+ def set_attention_slice(self, slice_size: Optional[int]):
1832
+ module_names, _ = self._get_signature_keys(self)
1833
+ modules = [getattr(self, n, None) for n in module_names]
1834
+ modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, "set_attention_slice")]
1835
+
1836
+ for module in modules:
1837
+ module.set_attention_slice(slice_size)
unet_2d_blocks.py ADDED
The diff for this file is too large to render. See raw diff
 
unet_2d_condition.py ADDED
@@ -0,0 +1,1152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.utils.checkpoint
20
+
21
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
22
+ from diffusers.loaders import UNet2DConditionLoadersMixin
23
+ from diffusers.utils import BaseOutput, logging
24
+ from diffusers.models.activations import get_activation
25
+ from diffusers.models.attention_processor import (
26
+ ADDED_KV_ATTENTION_PROCESSORS,
27
+ CROSS_ATTENTION_PROCESSORS,
28
+ AttentionProcessor,
29
+ AttnAddedKVProcessor,
30
+ AttnProcessor,
31
+ )
32
+ from diffusers.models.embeddings import (
33
+ GaussianFourierProjection,
34
+ ImageHintTimeEmbedding,
35
+ ImageProjection,
36
+ ImageTimeEmbedding,
37
+ PositionNet,
38
+ TextImageProjection,
39
+ TextImageTimeEmbedding,
40
+ TextTimeEmbedding,
41
+ TimestepEmbedding,
42
+ Timesteps,
43
+ )
44
+ from diffusers.models.modeling_utils import ModelMixin
45
+
46
+ from deepcache.unet_2d_blocks import (
47
+ UNetMidBlock2DCrossAttn,
48
+ UNetMidBlock2DSimpleCrossAttn,
49
+ get_down_block,
50
+ get_up_block,
51
+ )
52
+
53
+ import time
54
+
55
+
56
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
57
+
58
+
59
+ @dataclass
60
+ class UNet2DConditionOutput(BaseOutput):
61
+ """
62
+ The output of [`UNet2DConditionModel`].
63
+
64
+ Args:
65
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
66
+ The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
67
+ """
68
+
69
+ sample: torch.FloatTensor = None
70
+
71
+
72
+ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
73
+ r"""
74
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
75
+ shaped output.
76
+
77
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
78
+ for all models (such as downloading or saving).
79
+
80
+ Parameters:
81
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
82
+ Height and width of input/output sample.
83
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
84
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
85
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
86
+ flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
87
+ Whether to flip the sin to cos in the time embedding.
88
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
89
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
90
+ The tuple of downsample blocks to use.
91
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
92
+ Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or
93
+ `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
94
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
95
+ The tuple of upsample blocks to use.
96
+ only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
97
+ Whether to include self-attention in the basic transformer blocks, see
98
+ [`~models.attention.BasicTransformerBlock`].
99
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
100
+ The tuple of output channels for each block.
101
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
102
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
103
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
104
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
105
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
106
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
107
+ If `None`, normalization and activation layers is skipped in post-processing.
108
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
109
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
110
+ The dimension of the cross attention features.
111
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
112
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
113
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
114
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
115
+ encoder_hid_dim (`int`, *optional*, defaults to None):
116
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
117
+ dimension to `cross_attention_dim`.
118
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
119
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
120
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
121
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
122
+ num_attention_heads (`int`, *optional*):
123
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
124
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
125
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
126
+ class_embed_type (`str`, *optional*, defaults to `None`):
127
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
128
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
129
+ addition_embed_type (`str`, *optional*, defaults to `None`):
130
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
131
+ "text". "text" will use the `TextTimeEmbedding` layer.
132
+ addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
133
+ Dimension for the timestep embeddings.
134
+ num_class_embeds (`int`, *optional*, defaults to `None`):
135
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
136
+ class conditioning with `class_embed_type` equal to `None`.
137
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
138
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
139
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
140
+ An optional override for the dimension of the projected time embedding.
141
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
142
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
143
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
144
+ timestep_post_act (`str`, *optional*, defaults to `None`):
145
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
146
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
147
+ The dimension of `cond_proj` layer in the timestep embedding.
148
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
149
+ conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
150
+ projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
151
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
152
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
153
+ embeddings with the class embeddings.
154
+ mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
155
+ Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
156
+ `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
157
+ `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
158
+ otherwise.
159
+ """
160
+
161
+ _supports_gradient_checkpointing = True
162
+
163
+ @register_to_config
164
+ def __init__(
165
+ self,
166
+ sample_size: Optional[int] = None,
167
+ in_channels: int = 4,
168
+ out_channels: int = 4,
169
+ center_input_sample: bool = False,
170
+ flip_sin_to_cos: bool = True,
171
+ freq_shift: int = 0,
172
+ down_block_types: Tuple[str] = (
173
+ "CrossAttnDownBlock2D",
174
+ "CrossAttnDownBlock2D",
175
+ "CrossAttnDownBlock2D",
176
+ "DownBlock2D",
177
+ ),
178
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
179
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
180
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
181
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
182
+ layers_per_block: Union[int, Tuple[int]] = 2,
183
+ downsample_padding: int = 1,
184
+ mid_block_scale_factor: float = 1,
185
+ dropout: float = 0.0,
186
+ act_fn: str = "silu",
187
+ norm_num_groups: Optional[int] = 32,
188
+ norm_eps: float = 1e-5,
189
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
190
+ transformer_layers_per_block: Union[int, Tuple[int]] = 1,
191
+ encoder_hid_dim: Optional[int] = None,
192
+ encoder_hid_dim_type: Optional[str] = None,
193
+ attention_head_dim: Union[int, Tuple[int]] = 8,
194
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
195
+ dual_cross_attention: bool = False,
196
+ use_linear_projection: bool = False,
197
+ class_embed_type: Optional[str] = None,
198
+ addition_embed_type: Optional[str] = None,
199
+ addition_time_embed_dim: Optional[int] = None,
200
+ num_class_embeds: Optional[int] = None,
201
+ upcast_attention: bool = False,
202
+ resnet_time_scale_shift: str = "default",
203
+ resnet_skip_time_act: bool = False,
204
+ resnet_out_scale_factor: int = 1.0,
205
+ time_embedding_type: str = "positional",
206
+ time_embedding_dim: Optional[int] = None,
207
+ time_embedding_act_fn: Optional[str] = None,
208
+ timestep_post_act: Optional[str] = None,
209
+ time_cond_proj_dim: Optional[int] = None,
210
+ conv_in_kernel: int = 3,
211
+ conv_out_kernel: int = 3,
212
+ projection_class_embeddings_input_dim: Optional[int] = None,
213
+ attention_type: str = "default",
214
+ class_embeddings_concat: bool = False,
215
+ mid_block_only_cross_attention: Optional[bool] = None,
216
+ cross_attention_norm: Optional[str] = None,
217
+ addition_embed_type_num_heads=64,
218
+ ):
219
+ super().__init__()
220
+
221
+ self.sample_size = sample_size
222
+
223
+ if num_attention_heads is not None:
224
+ raise ValueError(
225
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
226
+ )
227
+
228
+ # If `num_attention_heads` is not defined (which is the case for most models)
229
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
230
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
231
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
232
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
233
+ # which is why we correct for the naming here.
234
+ num_attention_heads = num_attention_heads or attention_head_dim
235
+
236
+ # Check inputs
237
+ if len(down_block_types) != len(up_block_types):
238
+ raise ValueError(
239
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
240
+ )
241
+
242
+ if len(block_out_channels) != len(down_block_types):
243
+ raise ValueError(
244
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
245
+ )
246
+
247
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
248
+ raise ValueError(
249
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
250
+ )
251
+
252
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
253
+ raise ValueError(
254
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
255
+ )
256
+
257
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
258
+ raise ValueError(
259
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
260
+ )
261
+
262
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
263
+ raise ValueError(
264
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
265
+ )
266
+
267
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
268
+ raise ValueError(
269
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
270
+ )
271
+
272
+ # input
273
+ conv_in_padding = (conv_in_kernel - 1) // 2
274
+ self.conv_in = nn.Conv2d(
275
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
276
+ )
277
+
278
+ # time
279
+ if time_embedding_type == "fourier":
280
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
281
+ if time_embed_dim % 2 != 0:
282
+ raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
283
+ self.time_proj = GaussianFourierProjection(
284
+ time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
285
+ )
286
+ timestep_input_dim = time_embed_dim
287
+ elif time_embedding_type == "positional":
288
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
289
+
290
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
291
+ timestep_input_dim = block_out_channels[0]
292
+ else:
293
+ raise ValueError(
294
+ f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
295
+ )
296
+
297
+ self.time_embedding = TimestepEmbedding(
298
+ timestep_input_dim,
299
+ time_embed_dim,
300
+ act_fn=act_fn,
301
+ post_act_fn=timestep_post_act,
302
+ cond_proj_dim=time_cond_proj_dim,
303
+ )
304
+
305
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
306
+ encoder_hid_dim_type = "text_proj"
307
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
308
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
309
+
310
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
311
+ raise ValueError(
312
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
313
+ )
314
+
315
+ if encoder_hid_dim_type == "text_proj":
316
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
317
+ elif encoder_hid_dim_type == "text_image_proj":
318
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
319
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
320
+ # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
321
+ self.encoder_hid_proj = TextImageProjection(
322
+ text_embed_dim=encoder_hid_dim,
323
+ image_embed_dim=cross_attention_dim,
324
+ cross_attention_dim=cross_attention_dim,
325
+ )
326
+ elif encoder_hid_dim_type == "image_proj":
327
+ # Kandinsky 2.2
328
+ self.encoder_hid_proj = ImageProjection(
329
+ image_embed_dim=encoder_hid_dim,
330
+ cross_attention_dim=cross_attention_dim,
331
+ )
332
+ elif encoder_hid_dim_type is not None:
333
+ raise ValueError(
334
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
335
+ )
336
+ else:
337
+ self.encoder_hid_proj = None
338
+
339
+ # class embedding
340
+ if class_embed_type is None and num_class_embeds is not None:
341
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
342
+ elif class_embed_type == "timestep":
343
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
344
+ elif class_embed_type == "identity":
345
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
346
+ elif class_embed_type == "projection":
347
+ if projection_class_embeddings_input_dim is None:
348
+ raise ValueError(
349
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
350
+ )
351
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
352
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
353
+ # 2. it projects from an arbitrary input dimension.
354
+ #
355
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
356
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
357
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
358
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
359
+ elif class_embed_type == "simple_projection":
360
+ if projection_class_embeddings_input_dim is None:
361
+ raise ValueError(
362
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
363
+ )
364
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
365
+ else:
366
+ self.class_embedding = None
367
+
368
+ if addition_embed_type == "text":
369
+ if encoder_hid_dim is not None:
370
+ text_time_embedding_from_dim = encoder_hid_dim
371
+ else:
372
+ text_time_embedding_from_dim = cross_attention_dim
373
+
374
+ self.add_embedding = TextTimeEmbedding(
375
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
376
+ )
377
+ elif addition_embed_type == "text_image":
378
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
379
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
380
+ # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
381
+ self.add_embedding = TextImageTimeEmbedding(
382
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
383
+ )
384
+ elif addition_embed_type == "text_time":
385
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
386
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
387
+ elif addition_embed_type == "image":
388
+ # Kandinsky 2.2
389
+ self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
390
+ elif addition_embed_type == "image_hint":
391
+ # Kandinsky 2.2 ControlNet
392
+ self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
393
+ elif addition_embed_type is not None:
394
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
395
+
396
+ if time_embedding_act_fn is None:
397
+ self.time_embed_act = None
398
+ else:
399
+ self.time_embed_act = get_activation(time_embedding_act_fn)
400
+
401
+ self.down_blocks = nn.ModuleList([])
402
+ self.up_blocks = nn.ModuleList([])
403
+
404
+ if isinstance(only_cross_attention, bool):
405
+ if mid_block_only_cross_attention is None:
406
+ mid_block_only_cross_attention = only_cross_attention
407
+
408
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
409
+
410
+ if mid_block_only_cross_attention is None:
411
+ mid_block_only_cross_attention = False
412
+
413
+ if isinstance(num_attention_heads, int):
414
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
415
+
416
+ if isinstance(attention_head_dim, int):
417
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
418
+
419
+ if isinstance(cross_attention_dim, int):
420
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
421
+
422
+ if isinstance(layers_per_block, int):
423
+ layers_per_block = [layers_per_block] * len(down_block_types)
424
+
425
+ if isinstance(transformer_layers_per_block, int):
426
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
427
+
428
+ if class_embeddings_concat:
429
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
430
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
431
+ # regular time embeddings
432
+ blocks_time_embed_dim = time_embed_dim * 2
433
+ else:
434
+ blocks_time_embed_dim = time_embed_dim
435
+
436
+ # down
437
+ output_channel = block_out_channels[0]
438
+ for i, down_block_type in enumerate(down_block_types):
439
+ input_channel = output_channel
440
+ output_channel = block_out_channels[i]
441
+ is_final_block = i == len(block_out_channels) - 1
442
+
443
+ down_block = get_down_block(
444
+ down_block_type,
445
+ num_layers=layers_per_block[i],
446
+ transformer_layers_per_block=transformer_layers_per_block[i],
447
+ in_channels=input_channel,
448
+ out_channels=output_channel,
449
+ temb_channels=blocks_time_embed_dim,
450
+ add_downsample=not is_final_block,
451
+ resnet_eps=norm_eps,
452
+ resnet_act_fn=act_fn,
453
+ resnet_groups=norm_num_groups,
454
+ cross_attention_dim=cross_attention_dim[i],
455
+ num_attention_heads=num_attention_heads[i],
456
+ downsample_padding=downsample_padding,
457
+ dual_cross_attention=dual_cross_attention,
458
+ use_linear_projection=use_linear_projection,
459
+ only_cross_attention=only_cross_attention[i],
460
+ upcast_attention=upcast_attention,
461
+ resnet_time_scale_shift=resnet_time_scale_shift,
462
+ attention_type=attention_type,
463
+ resnet_skip_time_act=resnet_skip_time_act,
464
+ resnet_out_scale_factor=resnet_out_scale_factor,
465
+ cross_attention_norm=cross_attention_norm,
466
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
467
+ dropout=dropout,
468
+ )
469
+ self.down_blocks.append(down_block)
470
+
471
+ # mid
472
+ if mid_block_type == "UNetMidBlock2DCrossAttn":
473
+ self.mid_block = UNetMidBlock2DCrossAttn(
474
+ transformer_layers_per_block=transformer_layers_per_block[-1],
475
+ in_channels=block_out_channels[-1],
476
+ temb_channels=blocks_time_embed_dim,
477
+ dropout=dropout,
478
+ resnet_eps=norm_eps,
479
+ resnet_act_fn=act_fn,
480
+ output_scale_factor=mid_block_scale_factor,
481
+ resnet_time_scale_shift=resnet_time_scale_shift,
482
+ cross_attention_dim=cross_attention_dim[-1],
483
+ num_attention_heads=num_attention_heads[-1],
484
+ resnet_groups=norm_num_groups,
485
+ dual_cross_attention=dual_cross_attention,
486
+ use_linear_projection=use_linear_projection,
487
+ upcast_attention=upcast_attention,
488
+ attention_type=attention_type,
489
+ )
490
+ elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
491
+ self.mid_block = UNetMidBlock2DSimpleCrossAttn(
492
+ in_channels=block_out_channels[-1],
493
+ temb_channels=blocks_time_embed_dim,
494
+ dropout=dropout,
495
+ resnet_eps=norm_eps,
496
+ resnet_act_fn=act_fn,
497
+ output_scale_factor=mid_block_scale_factor,
498
+ cross_attention_dim=cross_attention_dim[-1],
499
+ attention_head_dim=attention_head_dim[-1],
500
+ resnet_groups=norm_num_groups,
501
+ resnet_time_scale_shift=resnet_time_scale_shift,
502
+ skip_time_act=resnet_skip_time_act,
503
+ only_cross_attention=mid_block_only_cross_attention,
504
+ cross_attention_norm=cross_attention_norm,
505
+ )
506
+ elif mid_block_type is None:
507
+ self.mid_block = None
508
+ else:
509
+ raise ValueError(f"unknown mid_block_type : {mid_block_type}")
510
+
511
+ # count how many layers upsample the images
512
+ self.num_upsamplers = 0
513
+
514
+ # up
515
+ reversed_block_out_channels = list(reversed(block_out_channels))
516
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
517
+ reversed_layers_per_block = list(reversed(layers_per_block))
518
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
519
+ reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))
520
+ only_cross_attention = list(reversed(only_cross_attention))
521
+
522
+ output_channel = reversed_block_out_channels[0]
523
+ for i, up_block_type in enumerate(up_block_types):
524
+ is_final_block = i == len(block_out_channels) - 1
525
+
526
+ prev_output_channel = output_channel
527
+ output_channel = reversed_block_out_channels[i]
528
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
529
+
530
+ # add upsample block for all BUT final layer
531
+ if not is_final_block:
532
+ add_upsample = True
533
+ self.num_upsamplers += 1
534
+ else:
535
+ add_upsample = False
536
+
537
+ up_block = get_up_block(
538
+ up_block_type,
539
+ num_layers=reversed_layers_per_block[i] + 1,
540
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
541
+ in_channels=input_channel,
542
+ out_channels=output_channel,
543
+ prev_output_channel=prev_output_channel,
544
+ temb_channels=blocks_time_embed_dim,
545
+ add_upsample=add_upsample,
546
+ resnet_eps=norm_eps,
547
+ resnet_act_fn=act_fn,
548
+ resnet_groups=norm_num_groups,
549
+ cross_attention_dim=reversed_cross_attention_dim[i],
550
+ num_attention_heads=reversed_num_attention_heads[i],
551
+ dual_cross_attention=dual_cross_attention,
552
+ use_linear_projection=use_linear_projection,
553
+ only_cross_attention=only_cross_attention[i],
554
+ upcast_attention=upcast_attention,
555
+ resnet_time_scale_shift=resnet_time_scale_shift,
556
+ attention_type=attention_type,
557
+ resnet_skip_time_act=resnet_skip_time_act,
558
+ resnet_out_scale_factor=resnet_out_scale_factor,
559
+ cross_attention_norm=cross_attention_norm,
560
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
561
+ dropout=dropout,
562
+ )
563
+ self.up_blocks.append(up_block)
564
+ prev_output_channel = output_channel
565
+
566
+ # out
567
+ if norm_num_groups is not None:
568
+ self.conv_norm_out = nn.GroupNorm(
569
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
570
+ )
571
+
572
+ self.conv_act = get_activation(act_fn)
573
+
574
+ else:
575
+ self.conv_norm_out = None
576
+ self.conv_act = None
577
+
578
+ conv_out_padding = (conv_out_kernel - 1) // 2
579
+ self.conv_out = nn.Conv2d(
580
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
581
+ )
582
+
583
+ if attention_type in ["gated", "gated-text-image"]:
584
+ positive_len = 768
585
+ if isinstance(cross_attention_dim, int):
586
+ positive_len = cross_attention_dim
587
+ elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
588
+ positive_len = cross_attention_dim[0]
589
+
590
+ feature_type = "text-only" if attention_type == "gated" else "text-image"
591
+ self.position_net = PositionNet(
592
+ positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
593
+ )
594
+
595
+ @property
596
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
597
+ r"""
598
+ Returns:
599
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
600
+ indexed by its weight name.
601
+ """
602
+ # set recursively
603
+ processors = {}
604
+
605
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
606
+ if hasattr(module, "get_processor"):
607
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
608
+
609
+ for sub_name, child in module.named_children():
610
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
611
+
612
+ return processors
613
+
614
+ for name, module in self.named_children():
615
+ fn_recursive_add_processors(name, module, processors)
616
+
617
+ return processors
618
+
619
+ def set_attn_processor(
620
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
621
+ ):
622
+ r"""
623
+ Sets the attention processor to use to compute attention.
624
+
625
+ Parameters:
626
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
627
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
628
+ for **all** `Attention` layers.
629
+
630
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
631
+ processor. This is strongly recommended when setting trainable attention processors.
632
+
633
+ """
634
+ count = len(self.attn_processors.keys())
635
+
636
+ if isinstance(processor, dict) and len(processor) != count:
637
+ raise ValueError(
638
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
639
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
640
+ )
641
+
642
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
643
+ if hasattr(module, "set_processor"):
644
+ if not isinstance(processor, dict):
645
+ module.set_processor(processor, _remove_lora=_remove_lora)
646
+ else:
647
+ module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
648
+
649
+ for sub_name, child in module.named_children():
650
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
651
+
652
+ for name, module in self.named_children():
653
+ fn_recursive_attn_processor(name, module, processor)
654
+
655
+ def set_default_attn_processor(self):
656
+ """
657
+ Disables custom attention processors and sets the default attention implementation.
658
+ """
659
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
660
+ processor = AttnAddedKVProcessor()
661
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
662
+ processor = AttnProcessor()
663
+ else:
664
+ raise ValueError(
665
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
666
+ )
667
+
668
+ self.set_attn_processor(processor, _remove_lora=True)
669
+
670
+ def set_attention_slice(self, slice_size):
671
+ r"""
672
+ Enable sliced attention computation.
673
+
674
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
675
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
676
+
677
+ Args:
678
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
679
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
680
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
681
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
682
+ must be a multiple of `slice_size`.
683
+ """
684
+ sliceable_head_dims = []
685
+
686
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
687
+ if hasattr(module, "set_attention_slice"):
688
+ sliceable_head_dims.append(module.sliceable_head_dim)
689
+
690
+ for child in module.children():
691
+ fn_recursive_retrieve_sliceable_dims(child)
692
+
693
+ # retrieve number of attention layers
694
+ for module in self.children():
695
+ fn_recursive_retrieve_sliceable_dims(module)
696
+
697
+ num_sliceable_layers = len(sliceable_head_dims)
698
+
699
+ if slice_size == "auto":
700
+ # half the attention head size is usually a good trade-off between
701
+ # speed and memory
702
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
703
+ elif slice_size == "max":
704
+ # make smallest slice possible
705
+ slice_size = num_sliceable_layers * [1]
706
+
707
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
708
+
709
+ if len(slice_size) != len(sliceable_head_dims):
710
+ raise ValueError(
711
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
712
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
713
+ )
714
+
715
+ for i in range(len(slice_size)):
716
+ size = slice_size[i]
717
+ dim = sliceable_head_dims[i]
718
+ if size is not None and size > dim:
719
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
720
+
721
+ # Recursively walk through all the children.
722
+ # Any children which exposes the set_attention_slice method
723
+ # gets the message
724
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
725
+ if hasattr(module, "set_attention_slice"):
726
+ module.set_attention_slice(slice_size.pop())
727
+
728
+ for child in module.children():
729
+ fn_recursive_set_attention_slice(child, slice_size)
730
+
731
+ reversed_slice_size = list(reversed(slice_size))
732
+ for module in self.children():
733
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
734
+
735
+ def _set_gradient_checkpointing(self, module, value=False):
736
+ if hasattr(module, "gradient_checkpointing"):
737
+ module.gradient_checkpointing = value
738
+
739
+ def forward(
740
+ self,
741
+ sample: torch.FloatTensor,
742
+ timestep: Union[torch.Tensor, float, int],
743
+ encoder_hidden_states: torch.Tensor,
744
+ class_labels: Optional[torch.Tensor] = None,
745
+ timestep_cond: Optional[torch.Tensor] = None,
746
+ attention_mask: Optional[torch.Tensor] = None,
747
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
748
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
749
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
750
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
751
+ encoder_attention_mask: Optional[torch.Tensor] = None,
752
+ quick_replicate: bool = False,
753
+ replicate_prv_feature: Optional[List[torch.Tensor]] = None,
754
+ cache_layer_id: Optional[int] = None,
755
+ cache_block_id: Optional[int] = None,
756
+ return_dict: bool = True,
757
+ ) -> Union[UNet2DConditionOutput, Tuple]:
758
+ r"""
759
+ The [`UNet2DConditionModel`] forward method.
760
+
761
+ Args:
762
+ sample (`torch.FloatTensor`):
763
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
764
+ timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
765
+ encoder_hidden_states (`torch.FloatTensor`):
766
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
767
+ encoder_attention_mask (`torch.Tensor`):
768
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
769
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
770
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
771
+ return_dict (`bool`, *optional*, defaults to `True`):
772
+ Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
773
+ tuple.
774
+ cross_attention_kwargs (`dict`, *optional*):
775
+ A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
776
+ added_cond_kwargs: (`dict`, *optional*):
777
+ A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
778
+ are passed along to the UNet blocks.
779
+
780
+ Returns:
781
+ [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
782
+ If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
783
+ a `tuple` is returned where the first element is the sample tensor.
784
+ """
785
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
786
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
787
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
788
+ # on the fly if necessary.
789
+ default_overall_up_factor = 2**self.num_upsamplers
790
+
791
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
792
+ forward_upsample_size = False
793
+ upsample_size = None
794
+
795
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
796
+ logger.info("Forward upsample size to force interpolation output size.")
797
+ forward_upsample_size = True
798
+
799
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
800
+ # expects mask of shape:
801
+ # [batch, key_tokens]
802
+ # adds singleton query_tokens dimension:
803
+ # [batch, 1, key_tokens]
804
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
805
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
806
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
807
+ if attention_mask is not None:
808
+ # assume that mask is expressed as:
809
+ # (1 = keep, 0 = discard)
810
+ # convert mask into a bias that can be added to attention scores:
811
+ # (keep = +0, discard = -10000.0)
812
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
813
+ attention_mask = attention_mask.unsqueeze(1)
814
+
815
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
816
+ if encoder_attention_mask is not None:
817
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
818
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
819
+
820
+ # 0. center input if necessary
821
+ if self.config.center_input_sample:
822
+ sample = 2 * sample - 1.0
823
+
824
+ # 1. time
825
+ timesteps = timestep
826
+ if not torch.is_tensor(timesteps):
827
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
828
+ # This would be a good case for the `match` statement (Python 3.10+)
829
+ is_mps = sample.device.type == "mps"
830
+ if isinstance(timestep, float):
831
+ dtype = torch.float32 if is_mps else torch.float64
832
+ else:
833
+ dtype = torch.int32 if is_mps else torch.int64
834
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
835
+ elif len(timesteps.shape) == 0:
836
+ timesteps = timesteps[None].to(sample.device)
837
+
838
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
839
+ timesteps = timesteps.expand(sample.shape[0])
840
+
841
+ t_emb = self.time_proj(timesteps)
842
+
843
+ # `Timesteps` does not contain any weights and will always return f32 tensors
844
+ # but time_embedding might actually be running in fp16. so we need to cast here.
845
+ # there might be better ways to encapsulate this.
846
+ t_emb = t_emb.to(dtype=sample.dtype)
847
+
848
+ emb = self.time_embedding(t_emb, timestep_cond)
849
+ aug_emb = None
850
+
851
+ if self.class_embedding is not None:
852
+ if class_labels is None:
853
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
854
+
855
+ if self.config.class_embed_type == "timestep":
856
+ class_labels = self.time_proj(class_labels)
857
+
858
+ # `Timesteps` does not contain any weights and will always return f32 tensors
859
+ # there might be better ways to encapsulate this.
860
+ class_labels = class_labels.to(dtype=sample.dtype)
861
+
862
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
863
+
864
+ if self.config.class_embeddings_concat:
865
+ emb = torch.cat([emb, class_emb], dim=-1)
866
+ else:
867
+ emb = emb + class_emb
868
+
869
+ if self.config.addition_embed_type == "text":
870
+ aug_emb = self.add_embedding(encoder_hidden_states)
871
+ elif self.config.addition_embed_type == "text_image":
872
+ # Kandinsky 2.1 - style
873
+ if "image_embeds" not in added_cond_kwargs:
874
+ raise ValueError(
875
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
876
+ )
877
+
878
+ image_embs = added_cond_kwargs.get("image_embeds")
879
+ text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
880
+ aug_emb = self.add_embedding(text_embs, image_embs)
881
+ elif self.config.addition_embed_type == "text_time":
882
+ # SDXL - style
883
+ if "text_embeds" not in added_cond_kwargs:
884
+ raise ValueError(
885
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
886
+ )
887
+ text_embeds = added_cond_kwargs.get("text_embeds")
888
+ if "time_ids" not in added_cond_kwargs:
889
+ raise ValueError(
890
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
891
+ )
892
+ time_ids = added_cond_kwargs.get("time_ids")
893
+ time_embeds = self.add_time_proj(time_ids.flatten())
894
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
895
+
896
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
897
+ add_embeds = add_embeds.to(emb.dtype)
898
+ aug_emb = self.add_embedding(add_embeds)
899
+ elif self.config.addition_embed_type == "image":
900
+ # Kandinsky 2.2 - style
901
+ if "image_embeds" not in added_cond_kwargs:
902
+ raise ValueError(
903
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
904
+ )
905
+ image_embs = added_cond_kwargs.get("image_embeds")
906
+ aug_emb = self.add_embedding(image_embs)
907
+ elif self.config.addition_embed_type == "image_hint":
908
+ # Kandinsky 2.2 - style
909
+ if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
910
+ raise ValueError(
911
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
912
+ )
913
+ image_embs = added_cond_kwargs.get("image_embeds")
914
+ hint = added_cond_kwargs.get("hint")
915
+ aug_emb, hint = self.add_embedding(image_embs, hint)
916
+ sample = torch.cat([sample, hint], dim=1)
917
+
918
+ emb = emb + aug_emb if aug_emb is not None else emb
919
+
920
+ if self.time_embed_act is not None:
921
+ emb = self.time_embed_act(emb)
922
+
923
+ if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
924
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
925
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
926
+ # Kadinsky 2.1 - style
927
+ if "image_embeds" not in added_cond_kwargs:
928
+ raise ValueError(
929
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
930
+ )
931
+
932
+ image_embeds = added_cond_kwargs.get("image_embeds")
933
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
934
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
935
+ # Kandinsky 2.2 - style
936
+ if "image_embeds" not in added_cond_kwargs:
937
+ raise ValueError(
938
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
939
+ )
940
+ image_embeds = added_cond_kwargs.get("image_embeds")
941
+ encoder_hidden_states = self.encoder_hid_proj(image_embeds)
942
+ # 2. pre-process
943
+ sample = self.conv_in(sample)
944
+
945
+ # 2.5 GLIGEN position net
946
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
947
+ cross_attention_kwargs = cross_attention_kwargs.copy()
948
+ gligen_args = cross_attention_kwargs.pop("gligen")
949
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
950
+
951
+ # 3. down
952
+ lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
953
+
954
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
955
+ is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None
956
+
957
+ down_block_res_samples = (sample,)
958
+ if quick_replicate and replicate_prv_feature is not None:
959
+ # Down
960
+ for i, downsample_block in enumerate(self.down_blocks):
961
+ if i > cache_layer_id:
962
+ break
963
+
964
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
965
+ # For t2i-adapter CrossAttnDownBlock2D
966
+ additional_residuals = {}
967
+ if is_adapter and len(down_block_additional_residuals) > 0:
968
+ additional_residuals["additional_residuals"] = down_block_additional_residuals.pop(0)
969
+
970
+ sample, res_samples = downsample_block(
971
+ hidden_states=sample,
972
+ temb=emb,
973
+ encoder_hidden_states=encoder_hidden_states,
974
+ attention_mask=attention_mask,
975
+ cross_attention_kwargs=cross_attention_kwargs,
976
+ encoder_attention_mask=encoder_attention_mask,
977
+ exist_block_number=cache_block_id if i == cache_layer_id else None,
978
+ **additional_residuals,
979
+ )
980
+ else:
981
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)
982
+
983
+ if is_adapter and len(down_block_additional_residuals) > 0:
984
+ sample += down_block_additional_residuals.pop(0)
985
+
986
+ down_block_res_samples += res_samples
987
+
988
+ # No Middle
989
+ # Up
990
+ #print("down_block_res_samples:", [res_sample.shape for res_sample in down_block_res_samples])
991
+ sample = replicate_prv_feature
992
+ #down_block_res_samples = down_block_res_samples[:-1]
993
+ if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :
994
+ cache_block_id = 0
995
+ cache_layer_id += 1
996
+ else:
997
+ cache_block_id += 1
998
+
999
+ for i, upsample_block in enumerate(self.up_blocks):
1000
+ if i < len(self.up_blocks) - 1 - cache_layer_id:
1001
+ continue
1002
+
1003
+ if i == len(self.up_blocks) - 1 - cache_layer_id:
1004
+ trunc_upsample_block = cache_block_id + 1
1005
+ else:
1006
+ trunc_upsample_block = len(upsample_block.resnets)
1007
+
1008
+ is_final_block = i == len(self.up_blocks) - 1
1009
+
1010
+ res_samples = down_block_res_samples[-trunc_upsample_block:]
1011
+ down_block_res_samples = down_block_res_samples[: -trunc_upsample_block]
1012
+
1013
+ # if we have not reached the final block and need to forward the
1014
+ # upsample size, we do it here
1015
+ if not is_final_block and forward_upsample_size:
1016
+ upsample_size = down_block_res_samples[-1].shape[2:]
1017
+
1018
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1019
+ #print(sample.shape, [res_sample.shape for res_sample in res_samples])
1020
+ sample, _ = upsample_block(
1021
+ hidden_states=sample,
1022
+ temb=emb,
1023
+ res_hidden_states_tuple=res_samples,
1024
+ encoder_hidden_states=encoder_hidden_states,
1025
+ cross_attention_kwargs=cross_attention_kwargs,
1026
+ upsample_size=upsample_size,
1027
+ attention_mask=attention_mask,
1028
+ encoder_attention_mask=encoder_attention_mask,
1029
+ enter_block_number=cache_block_id if i == len(self.up_blocks) - 1 - cache_layer_id else None,
1030
+ )
1031
+ else:
1032
+ sample = upsample_block(
1033
+ hidden_states=sample,
1034
+ temb=emb,
1035
+ res_hidden_states_tuple=res_samples,
1036
+ upsample_size=upsample_size,
1037
+ scale=lora_scale,
1038
+ )
1039
+
1040
+ prv_f = replicate_prv_feature
1041
+ else:
1042
+ for i, downsample_block in enumerate(self.down_blocks):
1043
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1044
+ # For t2i-adapter CrossAttnDownBlock2D
1045
+ additional_residuals = {}
1046
+ if is_adapter and len(down_block_additional_residuals) > 0:
1047
+ additional_residuals["additional_residuals"] = down_block_additional_residuals.pop(0)
1048
+
1049
+ sample, res_samples = downsample_block(
1050
+ hidden_states=sample,
1051
+ temb=emb,
1052
+ encoder_hidden_states=encoder_hidden_states,
1053
+ attention_mask=attention_mask,
1054
+ cross_attention_kwargs=cross_attention_kwargs,
1055
+ encoder_attention_mask=encoder_attention_mask,
1056
+ **additional_residuals,
1057
+ )
1058
+ else:
1059
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)
1060
+
1061
+ if is_adapter and len(down_block_additional_residuals) > 0:
1062
+ sample += down_block_additional_residuals.pop(0)
1063
+
1064
+ down_block_res_samples += res_samples
1065
+
1066
+ if is_controlnet:
1067
+ new_down_block_res_samples = ()
1068
+
1069
+ for down_block_res_sample, down_block_additional_residual in zip(
1070
+ down_block_res_samples, down_block_additional_residuals
1071
+ ):
1072
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
1073
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1074
+
1075
+ down_block_res_samples = new_down_block_res_samples
1076
+
1077
+ # 4. mid
1078
+ if self.mid_block is not None:
1079
+ sample = self.mid_block(
1080
+ sample,
1081
+ emb,
1082
+ encoder_hidden_states=encoder_hidden_states,
1083
+ attention_mask=attention_mask,
1084
+ cross_attention_kwargs=cross_attention_kwargs,
1085
+ encoder_attention_mask=encoder_attention_mask,
1086
+ )
1087
+ # To support T2I-Adapter-XL
1088
+ if (
1089
+ is_adapter
1090
+ and len(down_block_additional_residuals) > 0
1091
+ and sample.shape == down_block_additional_residuals[0].shape
1092
+ ):
1093
+ sample += down_block_additional_residuals.pop(0)
1094
+
1095
+ if is_controlnet:
1096
+ sample = sample + mid_block_additional_residual
1097
+
1098
+ # 5. up
1099
+ if cache_block_id is not None:
1100
+ if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :
1101
+ cache_block_id = 0
1102
+ cache_layer_id += 1
1103
+ else:
1104
+ cache_block_id += 1
1105
+ #print("down_block_res_samples:", [res_sample.shape for res_sample in down_block_res_samples])
1106
+ #print(cache_block_id, cache_layer_id)
1107
+ prv_f = None
1108
+ for i, upsample_block in enumerate(self.up_blocks):
1109
+ is_final_block = i == len(self.up_blocks) - 1
1110
+
1111
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1112
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1113
+ #print(sample.shape, [res_sample.shape for res_sample in res_samples])
1114
+ # if we have not reached the final block and need to forward the
1115
+ # upsample size, we do it here
1116
+ if not is_final_block and forward_upsample_size:
1117
+ upsample_size = down_block_res_samples[-1].shape[2:]
1118
+
1119
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1120
+ sample, current_record_f = upsample_block(
1121
+ hidden_states=sample,
1122
+ temb=emb,
1123
+ res_hidden_states_tuple=res_samples,
1124
+ encoder_hidden_states=encoder_hidden_states,
1125
+ cross_attention_kwargs=cross_attention_kwargs,
1126
+ upsample_size=upsample_size,
1127
+ attention_mask=attention_mask,
1128
+ encoder_attention_mask=encoder_attention_mask,
1129
+ )
1130
+ else:
1131
+ sample = upsample_block(
1132
+ hidden_states=sample,
1133
+ temb=emb,
1134
+ res_hidden_states_tuple=res_samples,
1135
+ upsample_size=upsample_size,
1136
+ scale=lora_scale,
1137
+ )
1138
+ current_record_f = None
1139
+
1140
+ #print("Append prv_feature with shape:", sample.shape)
1141
+ if cache_layer_id is not None and current_record_f is not None and i == len(self.up_blocks) - cache_layer_id - 1:
1142
+ prv_f = current_record_f[-cache_block_id-1]
1143
+
1144
+ # 6. post-process
1145
+ if self.conv_norm_out:
1146
+ sample = self.conv_norm_out(sample)
1147
+ sample = self.conv_act(sample)
1148
+ sample = self.conv_out(sample)
1149
+ if not return_dict:
1150
+ return (sample, prv_f,)
1151
+
1152
+ return UNet2DConditionOutput(sample=sample)