kadirnar commited on
Commit
58b9308
·
1 Parent(s): 83792a6

Upload pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +1133 -0
pipeline.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import time
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ import numpy as np
20
+ from packaging import version
21
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
22
+
23
+ from diffusers.configuration_utils import FrozenDict
24
+ from diffusers.image_processor import VaeImageProcessor, PipelineImageInput
25
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
+
28
+ from diffusers.models import AutoencoderKL
29
+ from diffusers.models.attention_processor import FusedAttnProcessor2_0
30
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
31
+ from diffusers.schedulers import KarrasDiffusionSchedulers
32
+ from diffusers.utils import (
33
+ USE_PEFT_BACKEND,
34
+ deprecate,
35
+ logging,
36
+ replace_example_docstring,
37
+ scale_lora_layers,
38
+ unscale_lora_layers,
39
+ )
40
+ from diffusers.utils.torch_utils import randn_tensor
41
+
42
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
43
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
44
+
45
+ from .unet_2d_condition import UNet2DConditionModel, ImageProjection
46
+ from .pipeline_utils import DiffusionPipeline
47
+
48
+
49
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
50
+
51
+ EXAMPLE_DOC_STRING = """
52
+ Examples:
53
+ ```py
54
+ >>> import torch
55
+ >>> from diffusers import StableDiffusionPipeline
56
+
57
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
58
+ >>> pipe = pipe.to("cuda")
59
+
60
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
61
+ >>> image = pipe(prompt).images[0]
62
+ ```
63
+ """
64
+
65
+ def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100):
66
+ samples = []
67
+
68
+ while len(samples) < sample_size:
69
+ # Sample from a Gaussian centered at n/2
70
+ sample = int(np.random.normal(loc=n/2, scale=std_dev))
71
+
72
+ # Check if the sample is in bounds
73
+ if 1 <= sample < n and sample not in samples:
74
+ samples.append(sample)
75
+
76
+ return samples
77
+
78
+ def sample_from_quad(total_numbers, n_samples, pow=1.2):
79
+ while pow > 1:
80
+ # Generate linearly spaced values between 0 and a max value
81
+ x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1)
82
+
83
+ # Raise these values to the power of 1.5 to get a non-linear distribution
84
+ indices = np.unique(np.int32(x_values**pow))[:-1]
85
+ if len(indices) == n_samples:
86
+ break
87
+ pow -=0.02
88
+ if pow <= 1:
89
+ raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.")
90
+ return indices, pow
91
+
92
+ def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2):
93
+ while pow > 1:
94
+ # Generate linearly spaced values between 0 and a max value
95
+ x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1)
96
+ indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]]
97
+ if len(indices) == n_samples:
98
+ break
99
+ pow -=0.02
100
+ if pow <= 1:
101
+ raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.")
102
+ return indices, pow
103
+
104
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
105
+ """
106
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
107
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
108
+ """
109
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
110
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
111
+ # rescale the results from guidance (fixes overexposure)
112
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
113
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
114
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
115
+ return noise_cfg
116
+
117
+
118
+ def retrieve_timesteps(
119
+ scheduler,
120
+ num_inference_steps: Optional[int] = None,
121
+ device: Optional[Union[str, torch.device]] = None,
122
+ timesteps: Optional[List[int]] = None,
123
+ **kwargs,
124
+ ):
125
+ """
126
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
127
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
128
+
129
+ Args:
130
+ scheduler (`SchedulerMixin`):
131
+ The scheduler to get timesteps from.
132
+ num_inference_steps (`int`):
133
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
134
+ `timesteps` must be `None`.
135
+ device (`str` or `torch.device`, *optional*):
136
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
137
+ timesteps (`List[int]`, *optional*):
138
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
139
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
140
+ must be `None`.
141
+
142
+ Returns:
143
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
144
+ second element is the number of inference steps.
145
+ """
146
+ if timesteps is not None:
147
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
148
+ if not accepts_timesteps:
149
+ raise ValueError(
150
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
151
+ f" timestep schedules. Please check whether you are using the correct scheduler."
152
+ )
153
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
154
+ timesteps = scheduler.timesteps
155
+ num_inference_steps = len(timesteps)
156
+ else:
157
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
158
+ timesteps = scheduler.timesteps
159
+ return timesteps, num_inference_steps
160
+
161
+ class StableDiffusionPipeline(
162
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
163
+ ):
164
+ r"""
165
+ Pipeline for text-to-image generation using Stable Diffusion.
166
+
167
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
168
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
169
+
170
+ The pipeline also inherits the following loading methods:
171
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
172
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
173
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
174
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
175
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
176
+
177
+ Args:
178
+ vae ([`AutoencoderKL`]):
179
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
180
+ text_encoder ([`~transformers.CLIPTextModel`]):
181
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
182
+ tokenizer ([`~transformers.CLIPTokenizer`]):
183
+ A `CLIPTokenizer` to tokenize text.
184
+ unet ([`UNet2DConditionModel`]):
185
+ A `UNet2DConditionModel` to denoise the encoded image latents.
186
+ scheduler ([`SchedulerMixin`]):
187
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
188
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
189
+ safety_checker ([`StableDiffusionSafetyChecker`]):
190
+ Classification module that estimates whether generated images could be considered offensive or harmful.
191
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
192
+ about a model's potential harms.
193
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
194
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
195
+ """
196
+
197
+ model_cpu_offload_seq = "text_encoder->unet->vae"
198
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
199
+ _exclude_from_cpu_offload = ["safety_checker"]
200
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
201
+
202
+ def __init__(
203
+ self,
204
+ vae: AutoencoderKL,
205
+ text_encoder: CLIPTextModel,
206
+ tokenizer: CLIPTokenizer,
207
+ unet: UNet2DConditionModel,
208
+ scheduler: KarrasDiffusionSchedulers,
209
+ safety_checker: StableDiffusionSafetyChecker,
210
+ feature_extractor: CLIPImageProcessor,
211
+ image_encoder: CLIPVisionModelWithProjection = None,
212
+ requires_safety_checker: bool = True,
213
+ ):
214
+ super().__init__()
215
+
216
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
217
+ deprecation_message = (
218
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
219
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
220
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
221
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
222
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
223
+ " file"
224
+ )
225
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
226
+ new_config = dict(scheduler.config)
227
+ new_config["steps_offset"] = 1
228
+ scheduler._internal_dict = FrozenDict(new_config)
229
+
230
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
231
+ deprecation_message = (
232
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
233
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
234
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
235
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
236
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
237
+ )
238
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
239
+ new_config = dict(scheduler.config)
240
+ new_config["clip_sample"] = False
241
+ scheduler._internal_dict = FrozenDict(new_config)
242
+
243
+ if safety_checker is None and requires_safety_checker:
244
+ logger.warning(
245
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
246
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
247
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
248
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
249
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
250
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
251
+ )
252
+
253
+ if safety_checker is not None and feature_extractor is None:
254
+ raise ValueError(
255
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
256
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
257
+ )
258
+
259
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
260
+ version.parse(unet.config._diffusers_version).base_version
261
+ ) < version.parse("0.9.0.dev0")
262
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
263
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
264
+ deprecation_message = (
265
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
266
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
267
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
268
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
269
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
270
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
271
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
272
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
273
+ " the `unet/config.json` file"
274
+ )
275
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
276
+ new_config = dict(unet.config)
277
+ new_config["sample_size"] = 64
278
+ unet._internal_dict = FrozenDict(new_config)
279
+
280
+ self.register_modules(
281
+ vae=vae,
282
+ text_encoder=text_encoder,
283
+ tokenizer=tokenizer,
284
+ unet=unet,
285
+ scheduler=scheduler,
286
+ safety_checker=safety_checker,
287
+ feature_extractor=feature_extractor,
288
+ image_encoder=image_encoder,
289
+ )
290
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
291
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
292
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
293
+
294
+ def enable_vae_slicing(self):
295
+ r"""
296
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
297
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
298
+ """
299
+ self.vae.enable_slicing()
300
+
301
+ def disable_vae_slicing(self):
302
+ r"""
303
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
304
+ computing decoding in one step.
305
+ """
306
+ self.vae.disable_slicing()
307
+
308
+ def enable_vae_tiling(self):
309
+ r"""
310
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
311
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
312
+ processing larger images.
313
+ """
314
+ self.vae.enable_tiling()
315
+
316
+ def disable_vae_tiling(self):
317
+ r"""
318
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
319
+ computing decoding in one step.
320
+ """
321
+ self.vae.disable_tiling()
322
+
323
+ def _encode_prompt(
324
+ self,
325
+ prompt,
326
+ device,
327
+ num_images_per_prompt,
328
+ do_classifier_free_guidance,
329
+ negative_prompt=None,
330
+ prompt_embeds: Optional[torch.FloatTensor] = None,
331
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
332
+ lora_scale: Optional[float] = None,
333
+ **kwargs,
334
+ ):
335
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
336
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
337
+
338
+ prompt_embeds_tuple = self.encode_prompt(
339
+ prompt=prompt,
340
+ device=device,
341
+ num_images_per_prompt=num_images_per_prompt,
342
+ do_classifier_free_guidance=do_classifier_free_guidance,
343
+ negative_prompt=negative_prompt,
344
+ prompt_embeds=prompt_embeds,
345
+ negative_prompt_embeds=negative_prompt_embeds,
346
+ lora_scale=lora_scale,
347
+ **kwargs,
348
+ )
349
+
350
+ # concatenate for backwards comp
351
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
352
+
353
+ return prompt_embeds
354
+
355
+ def encode_prompt(
356
+ self,
357
+ prompt,
358
+ device,
359
+ num_images_per_prompt,
360
+ do_classifier_free_guidance,
361
+ negative_prompt=None,
362
+ prompt_embeds: Optional[torch.FloatTensor] = None,
363
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
364
+ lora_scale: Optional[float] = None,
365
+ clip_skip: Optional[int] = None,
366
+ ):
367
+ r"""
368
+ Encodes the prompt into text encoder hidden states.
369
+
370
+ Args:
371
+ prompt (`str` or `List[str]`, *optional*):
372
+ prompt to be encoded
373
+ device: (`torch.device`):
374
+ torch device
375
+ num_images_per_prompt (`int`):
376
+ number of images that should be generated per prompt
377
+ do_classifier_free_guidance (`bool`):
378
+ whether to use classifier free guidance or not
379
+ negative_prompt (`str` or `List[str]`, *optional*):
380
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
381
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
382
+ less than `1`).
383
+ prompt_embeds (`torch.FloatTensor`, *optional*):
384
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
385
+ provided, text embeddings will be generated from `prompt` input argument.
386
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
387
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
388
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
389
+ argument.
390
+ lora_scale (`float`, *optional*):
391
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
392
+ clip_skip (`int`, *optional*):
393
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
394
+ the output of the pre-final layer will be used for computing the prompt embeddings.
395
+ """
396
+ # set lora scale so that monkey patched LoRA
397
+ # function of text encoder can correctly access it
398
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
399
+ self._lora_scale = lora_scale
400
+
401
+ # dynamically adjust the LoRA scale
402
+ if not USE_PEFT_BACKEND:
403
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
404
+ else:
405
+ scale_lora_layers(self.text_encoder, lora_scale)
406
+
407
+ if prompt is not None and isinstance(prompt, str):
408
+ batch_size = 1
409
+ elif prompt is not None and isinstance(prompt, list):
410
+ batch_size = len(prompt)
411
+ else:
412
+ batch_size = prompt_embeds.shape[0]
413
+
414
+ if prompt_embeds is None:
415
+ # textual inversion: procecss multi-vector tokens if necessary
416
+ if isinstance(self, TextualInversionLoaderMixin):
417
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
418
+
419
+ text_inputs = self.tokenizer(
420
+ prompt,
421
+ padding="max_length",
422
+ max_length=self.tokenizer.model_max_length,
423
+ truncation=True,
424
+ return_tensors="pt",
425
+ )
426
+ text_input_ids = text_inputs.input_ids
427
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
428
+
429
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
430
+ text_input_ids, untruncated_ids
431
+ ):
432
+ removed_text = self.tokenizer.batch_decode(
433
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
434
+ )
435
+ logger.warning(
436
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
437
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
438
+ )
439
+
440
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
441
+ attention_mask = text_inputs.attention_mask.to(device)
442
+ else:
443
+ attention_mask = None
444
+
445
+ if clip_skip is None:
446
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
447
+ prompt_embeds = prompt_embeds[0]
448
+ else:
449
+ prompt_embeds = self.text_encoder(
450
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
451
+ )
452
+ # Access the `hidden_states` first, that contains a tuple of
453
+ # all the hidden states from the encoder layers. Then index into
454
+ # the tuple to access the hidden states from the desired layer.
455
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
456
+ # We also need to apply the final LayerNorm here to not mess with the
457
+ # representations. The `last_hidden_states` that we typically use for
458
+ # obtaining the final prompt representations passes through the LayerNorm
459
+ # layer.
460
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
461
+
462
+ if self.text_encoder is not None:
463
+ prompt_embeds_dtype = self.text_encoder.dtype
464
+ elif self.unet is not None:
465
+ prompt_embeds_dtype = self.unet.dtype
466
+ else:
467
+ prompt_embeds_dtype = prompt_embeds.dtype
468
+
469
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
470
+
471
+ bs_embed, seq_len, _ = prompt_embeds.shape
472
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
473
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
474
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
475
+
476
+ # get unconditional embeddings for classifier free guidance
477
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
478
+ uncond_tokens: List[str]
479
+ if negative_prompt is None:
480
+ uncond_tokens = [""] * batch_size
481
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
482
+ raise TypeError(
483
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
484
+ f" {type(prompt)}."
485
+ )
486
+ elif isinstance(negative_prompt, str):
487
+ uncond_tokens = [negative_prompt]
488
+ elif batch_size != len(negative_prompt):
489
+ raise ValueError(
490
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
491
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
492
+ " the batch size of `prompt`."
493
+ )
494
+ else:
495
+ uncond_tokens = negative_prompt
496
+
497
+ # textual inversion: procecss multi-vector tokens if necessary
498
+ if isinstance(self, TextualInversionLoaderMixin):
499
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
500
+
501
+ max_length = prompt_embeds.shape[1]
502
+ uncond_input = self.tokenizer(
503
+ uncond_tokens,
504
+ padding="max_length",
505
+ max_length=max_length,
506
+ truncation=True,
507
+ return_tensors="pt",
508
+ )
509
+
510
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
511
+ attention_mask = uncond_input.attention_mask.to(device)
512
+ else:
513
+ attention_mask = None
514
+
515
+ negative_prompt_embeds = self.text_encoder(
516
+ uncond_input.input_ids.to(device),
517
+ attention_mask=attention_mask,
518
+ )
519
+ negative_prompt_embeds = negative_prompt_embeds[0]
520
+
521
+ if do_classifier_free_guidance:
522
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
523
+ seq_len = negative_prompt_embeds.shape[1]
524
+
525
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
526
+
527
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
528
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
529
+
530
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
531
+ # Retrieve the original scale by scaling back the LoRA layers
532
+ unscale_lora_layers(self.text_encoder, lora_scale)
533
+
534
+ return prompt_embeds, negative_prompt_embeds
535
+
536
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
537
+ dtype = next(self.image_encoder.parameters()).dtype
538
+
539
+ if not isinstance(image, torch.Tensor):
540
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
541
+
542
+ image = image.to(device=device, dtype=dtype)
543
+ if output_hidden_states:
544
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
545
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
546
+ uncond_image_enc_hidden_states = self.image_encoder(
547
+ torch.zeros_like(image), output_hidden_states=True
548
+ ).hidden_states[-2]
549
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
550
+ num_images_per_prompt, dim=0
551
+ )
552
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
553
+ else:
554
+ image_embeds = self.image_encoder(image).image_embeds
555
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
556
+ uncond_image_embeds = torch.zeros_like(image_embeds)
557
+
558
+ return image_embeds, uncond_image_embeds
559
+
560
+ def run_safety_checker(self, image, device, dtype):
561
+ if self.safety_checker is None:
562
+ has_nsfw_concept = None
563
+ else:
564
+ if torch.is_tensor(image):
565
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
566
+ else:
567
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
568
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
569
+ image, has_nsfw_concept = self.safety_checker(
570
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
571
+ )
572
+ return image, has_nsfw_concept
573
+
574
+ def decode_latents(self, latents):
575
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
576
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
577
+
578
+ latents = 1 / self.vae.config.scaling_factor * latents
579
+ image = self.vae.decode(latents, return_dict=False)[0]
580
+ image = (image / 2 + 0.5).clamp(0, 1)
581
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
582
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
583
+ return image
584
+
585
+ def prepare_extra_step_kwargs(self, generator, eta):
586
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
587
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
588
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
589
+ # and should be between [0, 1]
590
+
591
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
592
+ extra_step_kwargs = {}
593
+ if accepts_eta:
594
+ extra_step_kwargs["eta"] = eta
595
+
596
+ # check if the scheduler accepts generator
597
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
598
+ if accepts_generator:
599
+ extra_step_kwargs["generator"] = generator
600
+ return extra_step_kwargs
601
+
602
+ def check_inputs(
603
+ self,
604
+ prompt,
605
+ height,
606
+ width,
607
+ callback_steps,
608
+ negative_prompt=None,
609
+ prompt_embeds=None,
610
+ negative_prompt_embeds=None,
611
+ callback_on_step_end_tensor_inputs=None,
612
+ ):
613
+ if height % 8 != 0 or width % 8 != 0:
614
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
615
+
616
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
617
+ raise ValueError(
618
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
619
+ f" {type(callback_steps)}."
620
+ )
621
+ if callback_on_step_end_tensor_inputs is not None and not all(
622
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
623
+ ):
624
+ raise ValueError(
625
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
626
+ )
627
+
628
+ if prompt is not None and prompt_embeds is not None:
629
+ raise ValueError(
630
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
631
+ " only forward one of the two."
632
+ )
633
+ elif prompt is None and prompt_embeds is None:
634
+ raise ValueError(
635
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
636
+ )
637
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
638
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
639
+
640
+ if negative_prompt is not None and negative_prompt_embeds is not None:
641
+ raise ValueError(
642
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
643
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
644
+ )
645
+
646
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
647
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
648
+ raise ValueError(
649
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
650
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
651
+ f" {negative_prompt_embeds.shape}."
652
+ )
653
+
654
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
655
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
656
+ if isinstance(generator, list) and len(generator) != batch_size:
657
+ raise ValueError(
658
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
659
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
660
+ )
661
+
662
+ if latents is None:
663
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
664
+ else:
665
+ latents = latents.to(device)
666
+
667
+ # scale the initial noise by the standard deviation required by the scheduler
668
+ latents = latents * self.scheduler.init_noise_sigma
669
+ return latents
670
+
671
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
672
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
673
+
674
+ The suffixes after the scaling factors represent the stages where they are being applied.
675
+
676
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
677
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
678
+
679
+ Args:
680
+ s1 (`float`):
681
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
682
+ mitigate "oversmoothing effect" in the enhanced denoising process.
683
+ s2 (`float`):
684
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
685
+ mitigate "oversmoothing effect" in the enhanced denoising process.
686
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
687
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
688
+ """
689
+ if not hasattr(self, "unet"):
690
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
691
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
692
+
693
+ def disable_freeu(self):
694
+ """Disables the FreeU mechanism if enabled."""
695
+ self.unet.disable_freeu()
696
+
697
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
698
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
699
+ """
700
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
701
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
702
+
703
+ <Tip warning={true}>
704
+
705
+ This API is 🧪 experimental.
706
+
707
+ </Tip>
708
+
709
+ Args:
710
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
711
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
712
+ """
713
+ self.fusing_unet = False
714
+ self.fusing_vae = False
715
+
716
+ if unet:
717
+ self.fusing_unet = True
718
+ self.unet.fuse_qkv_projections()
719
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
720
+
721
+ if vae:
722
+ if not isinstance(self.vae, AutoencoderKL):
723
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
724
+
725
+ self.fusing_vae = True
726
+ self.vae.fuse_qkv_projections()
727
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
728
+
729
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
730
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
731
+ """Disable QKV projection fusion if enabled.
732
+
733
+ <Tip warning={true}>
734
+
735
+ This API is 🧪 experimental.
736
+
737
+ </Tip>
738
+
739
+ Args:
740
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
741
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
742
+
743
+ """
744
+ if unet:
745
+ if not self.fusing_unet:
746
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
747
+ else:
748
+ self.unet.unfuse_qkv_projections()
749
+ self.fusing_unet = False
750
+
751
+ if vae:
752
+ if not self.fusing_vae:
753
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
754
+ else:
755
+ self.vae.unfuse_qkv_projections()
756
+ self.fusing_vae = False
757
+
758
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
759
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
760
+ """
761
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
762
+
763
+ Args:
764
+ timesteps (`torch.Tensor`):
765
+ generate embedding vectors at these timesteps
766
+ embedding_dim (`int`, *optional*, defaults to 512):
767
+ dimension of the embeddings to generate
768
+ dtype:
769
+ data type of the generated embeddings
770
+
771
+ Returns:
772
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
773
+ """
774
+ assert len(w.shape) == 1
775
+ w = w * 1000.0
776
+
777
+ half_dim = embedding_dim // 2
778
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
779
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
780
+ emb = w.to(dtype)[:, None] * emb[None, :]
781
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
782
+ if embedding_dim % 2 == 1: # zero pad
783
+ emb = torch.nn.functional.pad(emb, (0, 1))
784
+ assert emb.shape == (w.shape[0], embedding_dim)
785
+ return emb
786
+
787
+ @property
788
+ def guidance_scale(self):
789
+ return self._guidance_scale
790
+
791
+ @property
792
+ def guidance_rescale(self):
793
+ return self._guidance_rescale
794
+
795
+ @property
796
+ def clip_skip(self):
797
+ return self._clip_skip
798
+
799
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
800
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
801
+ # corresponds to doing no classifier free guidance.
802
+ @property
803
+ def do_classifier_free_guidance(self):
804
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
805
+
806
+ @property
807
+ def cross_attention_kwargs(self):
808
+ return self._cross_attention_kwargs
809
+
810
+ @property
811
+ def num_timesteps(self):
812
+ return self._num_timesteps
813
+
814
+ @torch.no_grad()
815
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
816
+ def __call__(
817
+ self,
818
+ prompt: Union[str, List[str]] = None,
819
+ height: Optional[int] = None,
820
+ width: Optional[int] = None,
821
+ num_inference_steps: int = 50,
822
+ timesteps: List[int] = None,
823
+ guidance_scale: float = 7.5,
824
+ negative_prompt: Optional[Union[str, List[str]]] = None,
825
+ num_images_per_prompt: Optional[int] = 1,
826
+ eta: float = 0.0,
827
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
828
+ latents: Optional[torch.FloatTensor] = None,
829
+ prompt_embeds: Optional[torch.FloatTensor] = None,
830
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
831
+ ip_adapter_image: Optional[PipelineImageInput] = None,
832
+ output_type: Optional[str] = "pil",
833
+ return_dict: bool = True,
834
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
835
+ guidance_rescale: float = 0.0,
836
+ clip_skip: Optional[int] = None,
837
+ cache_interval: int = 1,
838
+ cache_layer_id: int = None,
839
+ cache_block_id: int = None,
840
+ uniform: bool = True,
841
+ pow: float = None,
842
+ center: int = None,
843
+ output_all_sequence: bool = False,
844
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
845
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
846
+ **kwargs,
847
+ ):
848
+ r"""
849
+ The call function to the pipeline for generation.
850
+
851
+ Args:
852
+ prompt (`str` or `List[str]`, *optional*):
853
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
854
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
855
+ The height in pixels of the generated image.
856
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
857
+ The width in pixels of the generated image.
858
+ num_inference_steps (`int`, *optional*, defaults to 50):
859
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
860
+ expense of slower inference.
861
+ timesteps (`List[int]`, *optional*):
862
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
863
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
864
+ passed will be used. Must be in descending order.
865
+ guidance_scale (`float`, *optional*, defaults to 7.5):
866
+ A higher guidance scale value encourages the model to generate images closely linked to the text
867
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
868
+ negative_prompt (`str` or `List[str]`, *optional*):
869
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
870
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
871
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
872
+ The number of images to generate per prompt.
873
+ eta (`float`, *optional*, defaults to 0.0):
874
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
875
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
876
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
877
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
878
+ generation deterministic.
879
+ latents (`torch.FloatTensor`, *optional*):
880
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
881
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
882
+ tensor is generated by sampling using the supplied random `generator`.
883
+ prompt_embeds (`torch.FloatTensor`, *optional*):
884
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
885
+ provided, text embeddings are generated from the `prompt` input argument.
886
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
887
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
888
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
889
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
890
+ output_type (`str`, *optional*, defaults to `"pil"`):
891
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
892
+ return_dict (`bool`, *optional*, defaults to `True`):
893
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
894
+ plain tuple.
895
+ cross_attention_kwargs (`dict`, *optional*):
896
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
897
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
898
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
899
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
900
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
901
+ using zero terminal SNR.
902
+ clip_skip (`int`, *optional*):
903
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
904
+ the output of the pre-final layer will be used for computing the prompt embeddings.
905
+ callback_on_step_end (`Callable`, *optional*):
906
+ A function that calls at the end of each denoising steps during the inference. The function is called
907
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
908
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
909
+ `callback_on_step_end_tensor_inputs`.
910
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
911
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
912
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
913
+ `._callback_tensor_inputs` attribute of your pipeline class.
914
+
915
+ Examples:
916
+
917
+ Returns:
918
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
919
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
920
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
921
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
922
+ "not-safe-for-work" (nsfw) content.
923
+ """
924
+
925
+ callback = kwargs.pop("callback", None)
926
+ callback_steps = kwargs.pop("callback_steps", None)
927
+
928
+ if callback is not None:
929
+ deprecate(
930
+ "callback",
931
+ "1.0.0",
932
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
933
+ )
934
+ if callback_steps is not None:
935
+ deprecate(
936
+ "callback_steps",
937
+ "1.0.0",
938
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
939
+ )
940
+
941
+ # 0. Default height and width to unet
942
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
943
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
944
+ # to deal with lora scaling and other possible forward hooks
945
+
946
+ # 1. Check inputs. Raise error if not correct
947
+ self.check_inputs(
948
+ prompt,
949
+ height,
950
+ width,
951
+ callback_steps,
952
+ negative_prompt,
953
+ prompt_embeds,
954
+ negative_prompt_embeds,
955
+ callback_on_step_end_tensor_inputs,
956
+ )
957
+
958
+ self._guidance_scale = guidance_scale
959
+ self._guidance_rescale = guidance_rescale
960
+ self._clip_skip = clip_skip
961
+ self._cross_attention_kwargs = cross_attention_kwargs
962
+
963
+ # 2. Define call parameters
964
+ if prompt is not None and isinstance(prompt, str):
965
+ batch_size = 1
966
+ elif prompt is not None and isinstance(prompt, list):
967
+ batch_size = len(prompt)
968
+ else:
969
+ batch_size = prompt_embeds.shape[0]
970
+
971
+ device = self._execution_device
972
+
973
+ # 3. Encode input prompt
974
+ lora_scale = (
975
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
976
+ )
977
+
978
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
979
+ prompt,
980
+ device,
981
+ num_images_per_prompt,
982
+ self.do_classifier_free_guidance,
983
+ negative_prompt,
984
+ prompt_embeds=prompt_embeds,
985
+ negative_prompt_embeds=negative_prompt_embeds,
986
+ lora_scale=lora_scale,
987
+ clip_skip=self.clip_skip,
988
+ )
989
+
990
+ # For classifier free guidance, we need to do two forward passes.
991
+ # Here we concatenate the unconditional and text embeddings into a single batch
992
+ # to avoid doing two forward passes
993
+ if self.do_classifier_free_guidance:
994
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
995
+
996
+ if ip_adapter_image is not None:
997
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
998
+ image_embeds, negative_image_embeds = self.encode_image(
999
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1000
+ )
1001
+ if self.do_classifier_free_guidance:
1002
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
1003
+
1004
+ # 4. Prepare timesteps
1005
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1006
+
1007
+ # 5. Prepare latent variables
1008
+ num_channels_latents = self.unet.config.in_channels
1009
+ latents = self.prepare_latents(
1010
+ batch_size * num_images_per_prompt,
1011
+ num_channels_latents,
1012
+ height,
1013
+ width,
1014
+ prompt_embeds.dtype,
1015
+ device,
1016
+ generator,
1017
+ latents,
1018
+ )
1019
+
1020
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1021
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1022
+
1023
+ # 6.1 Add image embeds for IP-Adapter
1024
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1025
+
1026
+ # 6.2 Optionally get Guidance Scale Embedding
1027
+ timestep_cond = None
1028
+ if self.unet.config.time_cond_proj_dim is not None:
1029
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1030
+ timestep_cond = self.get_guidance_scale_embedding(
1031
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1032
+ ).to(device=device, dtype=latents.dtype)
1033
+
1034
+ # 7. Denoising loop
1035
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1036
+ self._num_timesteps = len(timesteps)
1037
+
1038
+ prv_features = None
1039
+ latents_list = [latents]
1040
+
1041
+ if cache_interval == 1:
1042
+ interval_seq = list(range(num_inference_steps))
1043
+ else:
1044
+ if uniform:
1045
+ interval_seq = list(range(0, num_inference_steps, cache_interval))
1046
+ else:
1047
+ num_slow_step = num_inference_steps//cache_interval
1048
+ if num_inference_steps%cache_interval != 0:
1049
+ num_slow_step += 1
1050
+
1051
+ interval_seq, pow = sample_from_quad_center(num_inference_steps, num_slow_step, center=center, pow=pow)#[0, 3, 6, 9, 12, 16, 22, 28, 35, 43,]
1052
+
1053
+ interval_seq = sorted(interval_seq)
1054
+
1055
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1056
+ for i, t in enumerate(timesteps):
1057
+ # expand the latents if we are doing classifier free guidance
1058
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1059
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1060
+
1061
+ if i in interval_seq:
1062
+ prv_features = None
1063
+
1064
+ # predict the noise residual
1065
+ noise_pred, prv_features = self.unet(
1066
+ latent_model_input,
1067
+ t,
1068
+ encoder_hidden_states=prompt_embeds,
1069
+ timestep_cond=timestep_cond,
1070
+ cross_attention_kwargs=self.cross_attention_kwargs,
1071
+ added_cond_kwargs=added_cond_kwargs,
1072
+ replicate_prv_feature=prv_features,
1073
+ quick_replicate= cache_interval>1,
1074
+ cache_layer_id=cache_layer_id,
1075
+ cache_block_id=cache_block_id,
1076
+
1077
+ return_dict=False,
1078
+ )
1079
+
1080
+ # perform guidance
1081
+ if self.do_classifier_free_guidance:
1082
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1083
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1084
+
1085
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1086
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1087
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1088
+
1089
+ # compute the previous noisy sample x_t -> x_t-1
1090
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1091
+ latents_list.append(latents)
1092
+
1093
+ if callback_on_step_end is not None:
1094
+ callback_kwargs = {}
1095
+ for k in callback_on_step_end_tensor_inputs:
1096
+ callback_kwargs[k] = locals()[k]
1097
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1098
+
1099
+ latents = callback_outputs.pop("latents", latents)
1100
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1101
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1102
+
1103
+ # call the callback, if provided
1104
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1105
+ progress_bar.update()
1106
+ if callback is not None and i % callback_steps == 0:
1107
+ step_idx = i // getattr(self.scheduler, "order", 1)
1108
+ callback(step_idx, t, latents)
1109
+
1110
+ if not output_type == "latent":
1111
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1112
+ 0
1113
+ ]
1114
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1115
+ else:
1116
+ image = latents
1117
+ has_nsfw_concept = None
1118
+
1119
+ if has_nsfw_concept is None:
1120
+ do_denormalize = [True] * image.shape[0]
1121
+ else:
1122
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1123
+
1124
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1125
+
1126
+ # Offload all models
1127
+ self.maybe_free_model_hooks()
1128
+
1129
+ if not return_dict:
1130
+ return (image, has_nsfw_concept)
1131
+
1132
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1133
+