|
import inspect |
|
from typing import Any, Callable, Dict, List, Optional, Union |
|
|
|
import numpy as np |
|
import PIL |
|
import torch |
|
from packaging import version |
|
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer |
|
|
|
from diffusers.configuration_utils import FrozenDict |
|
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor |
|
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin |
|
from diffusers.models import AutoencoderKL, UNet2DConditionModel |
|
from diffusers.models.lora import adjust_lora_scale_text_encoder |
|
from diffusers.schedulers import LCMScheduler |
|
from diffusers.utils import PIL_INTERPOLATION, deprecate, logging |
|
from diffusers.utils.torch_utils import randn_tensor |
|
from diffusers.pipelines.pipeline_utils import DiffusionPipeline |
|
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput |
|
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
def preprocess(image): |
|
deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" |
|
deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) |
|
if isinstance(image, torch.Tensor): |
|
return image |
|
elif isinstance(image, PIL.Image.Image): |
|
image = [image] |
|
|
|
if isinstance(image[0], PIL.Image.Image): |
|
w, h = image[0].size |
|
w, h = (x - x % 8 for x in (w, h)) |
|
|
|
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] |
|
image = np.concatenate(image, axis=0) |
|
image = np.array(image).astype(np.float32) / 255.0 |
|
image = image.transpose(0, 3, 1, 2) |
|
image = 2.0 * image - 1.0 |
|
image = torch.from_numpy(image) |
|
elif isinstance(image[0], torch.Tensor): |
|
image = torch.cat(image, dim=0) |
|
return image |
|
|
|
|
|
def ddcm_sampler(scheduler, x_s, x_t, timestep, e_s, e_t, x_0, noise, eta, to_next=True): |
|
if scheduler.num_inference_steps is None: |
|
raise ValueError( |
|
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" |
|
) |
|
|
|
if scheduler.step_index is None: |
|
scheduler._init_step_index(timestep) |
|
|
|
prev_step_index = scheduler.step_index + 1 |
|
if prev_step_index < len(scheduler.timesteps): |
|
prev_timestep = scheduler.timesteps[prev_step_index] |
|
else: |
|
prev_timestep = timestep |
|
|
|
alpha_prod_t = scheduler.alphas_cumprod[timestep] |
|
alpha_prod_t_prev = ( |
|
scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod |
|
) |
|
beta_prod_t = 1 - alpha_prod_t |
|
beta_prod_t_prev = 1 - alpha_prod_t_prev |
|
variance = beta_prod_t_prev |
|
std_dev_t = eta * variance |
|
noise = std_dev_t ** (0.5) * noise |
|
|
|
e_c = (x_s - alpha_prod_t ** (0.5) * x_0) / (1 - alpha_prod_t) ** (0.5) |
|
|
|
pred_x0 = x_0 + ((x_t - x_s) - beta_prod_t ** (0.5) * (e_t - e_s)) / alpha_prod_t ** (0.5) |
|
eps = (e_t - e_s) + e_c |
|
dir_xt = (beta_prod_t_prev - std_dev_t) ** (0.5) * eps |
|
|
|
|
|
if len(scheduler.timesteps) > 1: |
|
prev_xt = alpha_prod_t_prev ** (0.5) * pred_x0 + dir_xt + noise |
|
prev_xs = alpha_prod_t_prev ** (0.5) * x_0 + dir_xt + noise |
|
else: |
|
prev_xt = pred_x0 |
|
prev_xs = x_0 |
|
|
|
if to_next: |
|
scheduler._step_index += 1 |
|
return prev_xs, prev_xt, pred_x0 |
|
|
|
|
|
class EditPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): |
|
model_cpu_offload_seq = "text_encoder->unet->vae" |
|
_optional_components = ["safety_checker", "feature_extractor"] |
|
|
|
def __init__( |
|
self, |
|
vae: AutoencoderKL, |
|
text_encoder: CLIPTextModel, |
|
tokenizer: CLIPTokenizer, |
|
unet: UNet2DConditionModel, |
|
scheduler: LCMScheduler, |
|
safety_checker: StableDiffusionSafetyChecker, |
|
feature_extractor: CLIPImageProcessor, |
|
requires_safety_checker: bool = True, |
|
): |
|
super().__init__() |
|
|
|
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: |
|
deprecation_message = ( |
|
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" |
|
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " |
|
"to update the config accordingly as leaving `steps_offset` might led to incorrect results" |
|
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," |
|
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" |
|
" file" |
|
) |
|
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) |
|
new_config = dict(scheduler.config) |
|
new_config["steps_offset"] = 1 |
|
scheduler._internal_dict = FrozenDict(new_config) |
|
|
|
if safety_checker is None and requires_safety_checker: |
|
logger.warning( |
|
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" |
|
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" |
|
" results in services or applications open to the public. Both the diffusers team and Hugging Face" |
|
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" |
|
" it only for use-cases that involve analyzing network behavior or auditing its results. For more" |
|
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." |
|
) |
|
|
|
if safety_checker is not None and feature_extractor is None: |
|
raise ValueError( |
|
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" |
|
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." |
|
) |
|
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( |
|
version.parse(unet.config._diffusers_version).base_version |
|
) < version.parse("0.9.0.dev0") |
|
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 |
|
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: |
|
deprecation_message = ( |
|
"The configuration file of the unet has set the default `sample_size` to smaller than" |
|
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" |
|
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" |
|
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" |
|
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" |
|
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" |
|
" in the config might lead to incorrect results in future versions. If you have downloaded this" |
|
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" |
|
" the `unet/config.json` file" |
|
) |
|
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) |
|
new_config = dict(unet.config) |
|
new_config["sample_size"] = 64 |
|
unet._internal_dict = FrozenDict(new_config) |
|
|
|
self.register_modules( |
|
vae=vae, |
|
text_encoder=text_encoder, |
|
tokenizer=tokenizer, |
|
unet=unet, |
|
scheduler=scheduler, |
|
safety_checker=safety_checker, |
|
feature_extractor=feature_extractor, |
|
) |
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
|
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) |
|
self.register_to_config(requires_safety_checker=requires_safety_checker) |
|
|
|
|
|
def _encode_prompt( |
|
self, |
|
prompt, |
|
device, |
|
num_images_per_prompt, |
|
do_classifier_free_guidance, |
|
negative_prompt=None, |
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
lora_scale: Optional[float] = None, |
|
): |
|
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." |
|
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) |
|
|
|
prompt_embeds_tuple = self.encode_prompt( |
|
prompt=prompt, |
|
device=device, |
|
num_images_per_prompt=num_images_per_prompt, |
|
do_classifier_free_guidance=do_classifier_free_guidance, |
|
negative_prompt=negative_prompt, |
|
prompt_embeds=prompt_embeds, |
|
negative_prompt_embeds=negative_prompt_embeds, |
|
lora_scale=lora_scale, |
|
) |
|
|
|
|
|
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) |
|
|
|
return prompt_embeds |
|
|
|
|
|
def encode_prompt( |
|
self, |
|
prompt, |
|
device, |
|
num_images_per_prompt, |
|
do_classifier_free_guidance, |
|
negative_prompt=None, |
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
lora_scale: Optional[float] = None, |
|
): |
|
|
|
|
|
if lora_scale is not None and isinstance(self, LoraLoaderMixin): |
|
self._lora_scale = lora_scale |
|
|
|
|
|
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) |
|
|
|
if prompt is not None and isinstance(prompt, str): |
|
batch_size = 1 |
|
elif prompt is not None and isinstance(prompt, list): |
|
batch_size = len(prompt) |
|
else: |
|
batch_size = prompt_embeds.shape[0] |
|
|
|
if prompt_embeds is None: |
|
|
|
if isinstance(self, TextualInversionLoaderMixin): |
|
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) |
|
|
|
text_inputs = self.tokenizer( |
|
prompt, |
|
padding="max_length", |
|
max_length=self.tokenizer.model_max_length, |
|
truncation=True, |
|
return_tensors="pt", |
|
) |
|
text_input_ids = text_inputs.input_ids |
|
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids |
|
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( |
|
text_input_ids, untruncated_ids |
|
): |
|
removed_text = self.tokenizer.batch_decode( |
|
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] |
|
) |
|
logger.warning( |
|
"The following part of your input was truncated because CLIP can only handle sequences up to" |
|
f" {self.tokenizer.model_max_length} tokens: {removed_text}" |
|
) |
|
|
|
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: |
|
attention_mask = text_inputs.attention_mask.to(device) |
|
else: |
|
attention_mask = None |
|
|
|
prompt_embeds = self.text_encoder( |
|
text_input_ids.to(device), |
|
attention_mask=attention_mask, |
|
) |
|
prompt_embeds = prompt_embeds[0] |
|
|
|
if self.text_encoder is not None: |
|
prompt_embeds_dtype = self.text_encoder.dtype |
|
elif self.unet is not None: |
|
prompt_embeds_dtype = self.unet.dtype |
|
else: |
|
prompt_embeds_dtype = prompt_embeds.dtype |
|
|
|
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) |
|
|
|
bs_embed, seq_len, _ = prompt_embeds.shape |
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) |
|
|
|
|
|
if do_classifier_free_guidance and negative_prompt_embeds is None: |
|
uncond_tokens: List[str] |
|
if negative_prompt is None: |
|
uncond_tokens = [""] * batch_size |
|
elif prompt is not None and type(prompt) is not type(negative_prompt): |
|
raise TypeError( |
|
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" |
|
f" {type(prompt)}." |
|
) |
|
elif isinstance(negative_prompt, str): |
|
uncond_tokens = [negative_prompt] |
|
elif batch_size != len(negative_prompt): |
|
raise ValueError( |
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" |
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" |
|
" the batch size of `prompt`." |
|
) |
|
else: |
|
uncond_tokens = negative_prompt |
|
|
|
|
|
if isinstance(self, TextualInversionLoaderMixin): |
|
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) |
|
|
|
max_length = prompt_embeds.shape[1] |
|
uncond_input = self.tokenizer( |
|
uncond_tokens, |
|
padding="max_length", |
|
max_length=max_length, |
|
truncation=True, |
|
return_tensors="pt", |
|
) |
|
|
|
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: |
|
attention_mask = uncond_input.attention_mask.to(device) |
|
else: |
|
attention_mask = None |
|
|
|
negative_prompt_embeds = self.text_encoder( |
|
uncond_input.input_ids.to(device), |
|
attention_mask=attention_mask, |
|
) |
|
negative_prompt_embeds = negative_prompt_embeds[0] |
|
|
|
if do_classifier_free_guidance: |
|
|
|
seq_len = negative_prompt_embeds.shape[1] |
|
|
|
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) |
|
|
|
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
|
return prompt_embeds, negative_prompt_embeds |
|
|
|
|
|
def check_inputs( |
|
self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None |
|
): |
|
if strength < 0 or strength > 1: |
|
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") |
|
|
|
if (callback_steps is None) or ( |
|
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) |
|
): |
|
raise ValueError( |
|
f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
|
f" {type(callback_steps)}." |
|
) |
|
|
|
if prompt is not None and prompt_embeds is not None: |
|
raise ValueError( |
|
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" |
|
" only forward one of the two." |
|
) |
|
elif prompt is None and prompt_embeds is None: |
|
raise ValueError( |
|
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." |
|
) |
|
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): |
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
|
|
|
if negative_prompt is not None and negative_prompt_embeds is not None: |
|
raise ValueError( |
|
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" |
|
f" {negative_prompt_embeds}. Please make sure to only forward one of the two." |
|
) |
|
|
|
if prompt_embeds is not None and negative_prompt_embeds is not None: |
|
if prompt_embeds.shape != negative_prompt_embeds.shape: |
|
raise ValueError( |
|
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" |
|
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" |
|
f" {negative_prompt_embeds.shape}." |
|
) |
|
|
|
|
|
def prepare_extra_step_kwargs(self, generator, eta): |
|
|
|
|
|
|
|
|
|
|
|
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
extra_step_kwargs = {} |
|
if accepts_eta: |
|
extra_step_kwargs["eta"] = eta |
|
|
|
|
|
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
if accepts_generator: |
|
extra_step_kwargs["generator"] = generator |
|
return extra_step_kwargs |
|
|
|
|
|
def run_safety_checker(self, image, device, dtype): |
|
if self.safety_checker is None: |
|
has_nsfw_concept = None |
|
else: |
|
if torch.is_tensor(image): |
|
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") |
|
else: |
|
feature_extractor_input = self.image_processor.numpy_to_pil(image) |
|
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) |
|
image, has_nsfw_concept = self.safety_checker( |
|
images=image, clip_input=safety_checker_input.pixel_values.to(dtype) |
|
) |
|
return image, has_nsfw_concept |
|
|
|
|
|
def decode_latents(self, latents): |
|
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" |
|
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) |
|
|
|
latents = 1 / self.vae.config.scaling_factor * latents |
|
image = self.vae.decode(latents, return_dict=False)[0] |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
|
|
image = image.cpu().permute(0, 2, 3, 1).float().numpy() |
|
return image |
|
|
|
|
|
def get_timesteps(self, num_inference_steps, strength, device): |
|
|
|
init_timestep = min(int(num_inference_steps * strength), num_inference_steps) |
|
|
|
t_start = max(num_inference_steps - init_timestep, 0) |
|
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] |
|
|
|
return timesteps, num_inference_steps - t_start |
|
|
|
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, denoise_model, generator=None): |
|
image = image.to(device=device, dtype=dtype) |
|
|
|
batch_size = image.shape[0] |
|
|
|
if image.shape[1] == 4: |
|
init_latents = image |
|
|
|
else: |
|
if isinstance(generator, list) and len(generator) != batch_size: |
|
raise ValueError( |
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
|
f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
|
) |
|
|
|
if isinstance(generator, list): |
|
init_latents = [ |
|
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) |
|
] |
|
init_latents = torch.cat(init_latents, dim=0) |
|
else: |
|
init_latents = self.vae.encode(image).latent_dist.sample(generator) |
|
|
|
init_latents = self.vae.config.scaling_factor * init_latents |
|
|
|
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: |
|
|
|
deprecation_message = ( |
|
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" |
|
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note" |
|
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" |
|
" your script to pass as many initial images as text prompts to suppress this warning." |
|
) |
|
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) |
|
additional_image_per_prompt = batch_size // init_latents.shape[0] |
|
init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0) |
|
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: |
|
raise ValueError( |
|
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." |
|
) |
|
else: |
|
init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) |
|
|
|
|
|
shape = init_latents.shape |
|
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
|
|
|
|
|
clean_latents = init_latents |
|
if denoise_model: |
|
init_latents = self.scheduler.add_noise(init_latents, noise, timestep) |
|
latents = init_latents |
|
else: |
|
latents = noise |
|
|
|
return latents, clean_latents |
|
|
|
@torch.no_grad() |
|
def __call__( |
|
self, |
|
prompt: Union[str, List[str]], |
|
source_prompt: Union[str, List[str]], |
|
negative_prompt: Union[str, List[str]]=None, |
|
positive_prompt: Union[str, List[str]]=None, |
|
image: PipelineImageInput = None, |
|
strength: float = 0.8, |
|
num_inference_steps: Optional[int] = 50, |
|
original_inference_steps: Optional[int] = 50, |
|
guidance_scale: Optional[float] = 7.5, |
|
source_guidance_scale: Optional[float] = 1, |
|
num_images_per_prompt: Optional[int] = 1, |
|
eta: Optional[float] = 1.0, |
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
output_type: Optional[str] = "pil", |
|
return_dict: bool = True, |
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, |
|
callback_steps: int = 1, |
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
denoise_model: Optional[bool] = True, |
|
): |
|
|
|
self.check_inputs(prompt, strength, callback_steps) |
|
|
|
|
|
batch_size = 1 if isinstance(prompt, str) else len(prompt) |
|
device = self._execution_device |
|
|
|
|
|
|
|
do_classifier_free_guidance = guidance_scale > 1.0 |
|
|
|
|
|
text_encoder_lora_scale = ( |
|
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None |
|
) |
|
prompt_embeds_tuple = self.encode_prompt( |
|
prompt, |
|
device, |
|
num_images_per_prompt, |
|
do_classifier_free_guidance, |
|
negative_prompt=negative_prompt, |
|
prompt_embeds=prompt_embeds, |
|
lora_scale=text_encoder_lora_scale, |
|
) |
|
source_prompt_embeds_tuple = self.encode_prompt( |
|
source_prompt, device, num_images_per_prompt, do_classifier_free_guidance, positive_prompt, None |
|
) |
|
if prompt_embeds_tuple[1] is not None: |
|
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) |
|
else: |
|
prompt_embeds = prompt_embeds_tuple[0] |
|
if source_prompt_embeds_tuple[1] is not None: |
|
source_prompt_embeds = torch.cat([source_prompt_embeds_tuple[1], source_prompt_embeds_tuple[0]]) |
|
else: |
|
source_prompt_embeds = source_prompt_embeds_tuple[0] |
|
|
|
|
|
image = self.image_processor.preprocess(image) |
|
|
|
|
|
self.scheduler.set_timesteps( |
|
num_inference_steps=num_inference_steps, |
|
device=device, |
|
original_inference_steps=original_inference_steps) |
|
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) |
|
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) |
|
|
|
|
|
latents, clean_latents = self.prepare_latents( |
|
image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, denoise_model, generator |
|
) |
|
source_latents = latents |
|
mutual_latents = latents |
|
|
|
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
|
generator = extra_step_kwargs.pop("generator", None) |
|
|
|
|
|
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order |
|
with self.progress_bar(total=num_inference_steps) as progress_bar: |
|
for i, t in enumerate(timesteps): |
|
|
|
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
|
source_latent_model_input = ( |
|
torch.cat([source_latents] * 2) if do_classifier_free_guidance else source_latents |
|
) |
|
mutual_latent_model_input = ( |
|
torch.cat([mutual_latents] * 2) if do_classifier_free_guidance else mutual_latents |
|
) |
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
source_latent_model_input = self.scheduler.scale_model_input(source_latent_model_input, t) |
|
mutual_latent_model_input = self.scheduler.scale_model_input(mutual_latent_model_input, t) |
|
|
|
|
|
if do_classifier_free_guidance: |
|
concat_latent_model_input = torch.stack( |
|
[ |
|
source_latent_model_input[0], |
|
latent_model_input[0], |
|
mutual_latent_model_input[0], |
|
source_latent_model_input[1], |
|
latent_model_input[1], |
|
mutual_latent_model_input[1], |
|
], |
|
dim=0, |
|
) |
|
concat_prompt_embeds = torch.stack( |
|
[ |
|
source_prompt_embeds[0], |
|
prompt_embeds[0], |
|
source_prompt_embeds[0], |
|
source_prompt_embeds[1], |
|
prompt_embeds[1], |
|
source_prompt_embeds[1], |
|
], |
|
dim=0, |
|
) |
|
else: |
|
concat_latent_model_input = torch.cat( |
|
[ |
|
source_latent_model_input, |
|
latent_model_input, |
|
mutual_latent_model_input, |
|
], |
|
dim=0, |
|
) |
|
concat_prompt_embeds = torch.cat( |
|
[ |
|
source_prompt_embeds, |
|
prompt_embeds, |
|
source_prompt_embeds, |
|
], |
|
dim=0, |
|
) |
|
|
|
concat_noise_pred = self.unet( |
|
concat_latent_model_input, |
|
t, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
encoder_hidden_states=concat_prompt_embeds, |
|
).sample |
|
|
|
|
|
if do_classifier_free_guidance: |
|
( |
|
source_noise_pred_uncond, |
|
noise_pred_uncond, |
|
mutual_noise_pred_uncond, |
|
source_noise_pred_text, |
|
noise_pred_text, |
|
mutual_noise_pred_text |
|
) = concat_noise_pred.chunk(6, dim=0) |
|
|
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
source_noise_pred = source_noise_pred_uncond + source_guidance_scale * ( |
|
source_noise_pred_text - source_noise_pred_uncond |
|
) |
|
mutual_noise_pred = mutual_noise_pred_uncond + source_guidance_scale * ( |
|
mutual_noise_pred_text - mutual_noise_pred_uncond |
|
) |
|
|
|
else: |
|
(source_noise_pred, noise_pred, mutual_noise_pred) = concat_noise_pred.chunk(3, dim=0) |
|
|
|
noise = torch.randn( |
|
latents.shape, dtype=latents.dtype, device=latents.device, generator=generator |
|
) |
|
|
|
_, latents, pred_x0 = ddcm_sampler( |
|
self.scheduler, source_latents, |
|
latents, t, |
|
source_noise_pred, noise_pred, |
|
clean_latents, noise=noise, |
|
eta=eta, to_next=False, |
|
**extra_step_kwargs |
|
) |
|
|
|
source_latents, mutual_latents, pred_xm = ddcm_sampler( |
|
self.scheduler, source_latents, |
|
mutual_latents, t, |
|
source_noise_pred, mutual_noise_pred, |
|
clean_latents, noise=noise, |
|
eta=eta, **extra_step_kwargs |
|
) |
|
|
|
|
|
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
|
progress_bar.update() |
|
if callback is not None and i % callback_steps == 0: |
|
alpha_prod_t = self.scheduler.alphas_cumprod[t] |
|
mutual_latents, latents = callback(i, t, source_latents, latents, mutual_latents, alpha_prod_t) |
|
|
|
|
|
if not output_type == "latent": |
|
image = self.vae.decode(pred_x0 / self.vae.config.scaling_factor, return_dict=False)[0] |
|
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) |
|
else: |
|
image = pred_x0 |
|
has_nsfw_concept = None |
|
|
|
if has_nsfw_concept is None: |
|
do_denormalize = [True] * image.shape[0] |
|
else: |
|
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] |
|
|
|
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) |
|
|
|
if not return_dict: |
|
return (image, has_nsfw_concept) |
|
|
|
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) |
|
|