Datasets:

ArXiv:
Diffusers Bot commited on
Commit
345d8a9
·
verified ·
1 Parent(s): 78b20db

Upload folder using huggingface_hub

Browse files
main/README.md CHANGED
@@ -1641,18 +1641,18 @@ from io import BytesIO
1641
  from PIL import Image
1642
  import torch
1643
  from diffusers import DDIMScheduler
1644
- from diffusers.pipelines.stable_diffusion import StableDiffusionImg2ImgPipeline
1645
 
1646
  # Use the DDIMScheduler scheduler here instead
1647
  scheduler = DDIMScheduler.from_pretrained("stabilityai/stable-diffusion-2-1",
1648
  subfolder="scheduler")
1649
 
1650
 
1651
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-1",
1652
- custom_pipeline="stable_diffusion_tensorrt_img2img",
1653
- variant='fp16',
1654
- torch_dtype=torch.float16,
1655
- scheduler=scheduler,)
1656
 
1657
  # re-use cached folder to save ONNX models and TensorRT Engines
1658
  pipe.set_cached_folder("stabilityai/stable-diffusion-2-1", variant='fp16',)
 
1641
  from PIL import Image
1642
  import torch
1643
  from diffusers import DDIMScheduler
1644
+ from diffusers import DiffusionPipeline
1645
 
1646
  # Use the DDIMScheduler scheduler here instead
1647
  scheduler = DDIMScheduler.from_pretrained("stabilityai/stable-diffusion-2-1",
1648
  subfolder="scheduler")
1649
 
1650
 
1651
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1",
1652
+ custom_pipeline="stable_diffusion_tensorrt_img2img",
1653
+ variant='fp16',
1654
+ torch_dtype=torch.float16,
1655
+ scheduler=scheduler,)
1656
 
1657
  # re-use cached folder to save ONNX models and TensorRT Engines
1658
  pipe.set_cached_folder("stabilityai/stable-diffusion-2-1", variant='fp16',)
main/stable_diffusion_tensorrt_img2img.py CHANGED
@@ -18,8 +18,7 @@
18
  import gc
19
  import os
20
  from collections import OrderedDict
21
- from copy import copy
22
- from typing import List, Optional, Union
23
 
24
  import numpy as np
25
  import onnx
@@ -27,9 +26,11 @@ import onnx_graphsurgeon as gs
27
  import PIL.Image
28
  import tensorrt as trt
29
  import torch
 
30
  from huggingface_hub import snapshot_download
31
  from huggingface_hub.utils import validate_hf_hub_args
32
  from onnx import shape_inference
 
33
  from polygraphy import cuda
34
  from polygraphy.backend.common import bytes_from_path
35
  from polygraphy.backend.onnx.loader import fold_constants
@@ -41,12 +42,13 @@ from polygraphy.backend.trt import (
41
  network_from_onnx_path,
42
  save_engine,
43
  )
44
- from polygraphy.backend.trt import util as trt_util
45
  from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
 
 
 
47
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
48
  from diffusers.pipelines.stable_diffusion import (
49
- StableDiffusionImg2ImgPipeline,
50
  StableDiffusionPipelineOutput,
51
  StableDiffusionSafetyChecker,
52
  )
@@ -58,7 +60,7 @@ from diffusers.utils import logging
58
  """
59
  Installation instructions
60
  python3 -m pip install --upgrade transformers diffusers>=0.16.0
61
- python3 -m pip install --upgrade tensorrt>=8.6.1
62
  python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
63
  python3 -m pip install onnxruntime
64
  """
@@ -88,10 +90,6 @@ else:
88
  torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
89
 
90
 
91
- def device_view(t):
92
- return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
93
-
94
-
95
  def preprocess_image(image):
96
  """
97
  image: torch.Tensor
@@ -125,10 +123,8 @@ class Engine:
125
  onnx_path,
126
  fp16,
127
  input_profile=None,
128
- enable_preview=False,
129
  enable_all_tactics=False,
130
  timing_cache=None,
131
- workspace_size=0,
132
  ):
133
  logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
134
  p = Profile()
@@ -137,20 +133,13 @@ class Engine:
137
  assert len(dims) == 3
138
  p.add(name, min=dims[0], opt=dims[1], max=dims[2])
139
 
140
- config_kwargs = {}
141
-
142
- config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
143
- if enable_preview:
144
- # Faster dynamic shapes made optional since it increases engine build time.
145
- config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
146
- if workspace_size > 0:
147
- config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
148
  if not enable_all_tactics:
149
- config_kwargs["tactic_sources"] = []
150
 
151
  engine = engine_from_network(
152
  network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
153
- config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
154
  save_timing_cache=timing_cache,
155
  )
156
  save_engine(engine, path=self.engine_path)
@@ -163,28 +152,24 @@ class Engine:
163
  self.context = self.engine.create_execution_context()
164
 
165
  def allocate_buffers(self, shape_dict=None, device="cuda"):
166
- for idx in range(trt_util.get_bindings_per_profile(self.engine)):
167
- binding = self.engine[idx]
168
- if shape_dict and binding in shape_dict:
169
- shape = shape_dict[binding]
170
  else:
171
- shape = self.engine.get_binding_shape(binding)
172
- dtype = trt.nptype(self.engine.get_binding_dtype(binding))
173
- if self.engine.binding_is_input(binding):
174
- self.context.set_binding_shape(idx, shape)
175
  tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
176
- self.tensors[binding] = tensor
177
- self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
178
 
179
  def infer(self, feed_dict, stream):
180
- start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
181
- # shallow copy of ordered dict
182
- device_buffers = copy(self.buffers)
183
  for name, buf in feed_dict.items():
184
- assert isinstance(buf, cuda.DeviceView)
185
- device_buffers[name] = buf
186
- bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
187
- noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
188
  if not noerror:
189
  raise ValueError("ERROR: inference failed.")
190
 
@@ -325,10 +310,8 @@ def build_engines(
325
  force_engine_rebuild=False,
326
  static_batch=False,
327
  static_shape=True,
328
- enable_preview=False,
329
  enable_all_tactics=False,
330
  timing_cache=None,
331
- max_workspace_size=0,
332
  ):
333
  built_engines = {}
334
  if not os.path.isdir(onnx_dir):
@@ -393,9 +376,7 @@ def build_engines(
393
  static_batch=static_batch,
394
  static_shape=static_shape,
395
  ),
396
- enable_preview=enable_preview,
397
  timing_cache=timing_cache,
398
- workspace_size=max_workspace_size,
399
  )
400
  built_engines[model_name] = engine
401
 
@@ -674,7 +655,7 @@ def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False)
674
  return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
675
 
676
 
677
- class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
678
  r"""
679
  Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
680
 
@@ -702,6 +683,8 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
702
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
703
  """
704
 
 
 
705
  def __init__(
706
  self,
707
  vae: AutoencoderKL,
@@ -722,24 +705,86 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
722
  onnx_dir: str = "onnx",
723
  # TensorRT engine build parameters
724
  engine_dir: str = "engine",
725
- build_preview_features: bool = True,
726
  force_engine_rebuild: bool = False,
727
  timing_cache: str = "timing_cache",
728
  ):
729
- super().__init__(
730
- vae,
731
- text_encoder,
732
- tokenizer,
733
- unet,
734
- scheduler,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
735
  safety_checker=safety_checker,
736
  feature_extractor=feature_extractor,
737
  image_encoder=image_encoder,
738
- requires_safety_checker=requires_safety_checker,
739
  )
740
 
741
- self.vae.forward = self.vae.decode
742
-
743
  self.stages = stages
744
  self.image_height, self.image_width = image_height, image_width
745
  self.inpaint = False
@@ -750,7 +795,6 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
750
  self.timing_cache = timing_cache
751
  self.build_static_batch = False
752
  self.build_dynamic_shape = False
753
- self.build_preview_features = build_preview_features
754
 
755
  self.max_batch_size = max_batch_size
756
  # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
@@ -761,6 +805,11 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
761
  self.models = {} # loaded in __loadModels()
762
  self.engine = {} # loaded in build_engines()
763
 
 
 
 
 
 
764
  def __loadModels(self):
765
  # Load pipeline models
766
  self.embedding_dim = self.text_encoder.config.hidden_size
@@ -779,6 +828,33 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
779
  if "vae_encoder" in self.stages:
780
  self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
782
  @classmethod
783
  @validate_hf_hub_args
784
  def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
@@ -826,7 +902,6 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
826
  force_engine_rebuild=self.force_engine_rebuild,
827
  static_batch=self.build_static_batch,
828
  static_shape=not self.build_dynamic_shape,
829
- enable_preview=self.build_preview_features,
830
  timing_cache=self.timing_cache,
831
  )
832
 
@@ -850,9 +925,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
850
  return tuple(init_images)
851
 
852
  def __encode_image(self, init_image):
853
- init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[
854
- "latent"
855
- ]
856
  init_latents = 0.18215 * init_latents
857
  return init_latents
858
 
@@ -881,9 +954,8 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
881
  .to(self.torch_device)
882
  )
883
 
884
- text_input_ids_inp = device_view(text_input_ids)
885
  # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
886
- text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
887
  "text_embeddings"
888
  ].clone()
889
 
@@ -899,8 +971,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
899
  .input_ids.type(torch.int32)
900
  .to(self.torch_device)
901
  )
902
- uncond_input_ids_inp = device_view(uncond_input_ids)
903
- uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
904
  "text_embeddings"
905
  ]
906
 
@@ -924,18 +995,15 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
924
  # Predict the noise residual
925
  timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
926
 
927
- sample_inp = device_view(latent_model_input)
928
- timestep_inp = device_view(timestep_float)
929
- embeddings_inp = device_view(text_embeddings)
930
  noise_pred = runEngine(
931
  self.engine["unet"],
932
- {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
933
  self.stream,
934
  )["latent"]
935
 
936
  # Perform guidance
937
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
938
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
939
 
940
  latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
941
 
@@ -943,12 +1011,12 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
943
  return latents
944
 
945
  def __decode_latent(self, latents):
946
- images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
947
  images = (images / 2 + 0.5).clamp(0, 1)
948
  return images.cpu().permute(0, 2, 3, 1).float().numpy()
949
 
950
  def __loadResources(self, image_height, image_width, batch_size):
951
- self.stream = cuda.Stream()
952
 
953
  # Allocate buffers for TensorRT engine bindings
954
  for model_name, obj in self.models.items():
@@ -1061,5 +1129,6 @@ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
1061
  # VAE decode latent
1062
  images = self.__decode_latent(latents)
1063
 
 
1064
  images = self.numpy_to_pil(images)
1065
- return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)
 
18
  import gc
19
  import os
20
  from collections import OrderedDict
21
+ from typing import List, Optional, Tuple, Union
 
22
 
23
  import numpy as np
24
  import onnx
 
26
  import PIL.Image
27
  import tensorrt as trt
28
  import torch
29
+ from cuda import cudart
30
  from huggingface_hub import snapshot_download
31
  from huggingface_hub.utils import validate_hf_hub_args
32
  from onnx import shape_inference
33
+ from packaging import version
34
  from polygraphy import cuda
35
  from polygraphy.backend.common import bytes_from_path
36
  from polygraphy.backend.onnx.loader import fold_constants
 
42
  network_from_onnx_path,
43
  save_engine,
44
  )
 
45
  from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
+ from diffusers import DiffusionPipeline
48
+ from diffusers.configuration_utils import FrozenDict, deprecate
49
+ from diffusers.image_processor import VaeImageProcessor
50
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
51
  from diffusers.pipelines.stable_diffusion import (
 
52
  StableDiffusionPipelineOutput,
53
  StableDiffusionSafetyChecker,
54
  )
 
60
  """
61
  Installation instructions
62
  python3 -m pip install --upgrade transformers diffusers>=0.16.0
63
+ python3 -m pip install --upgrade tensorrt-cu12==10.2.0
64
  python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
65
  python3 -m pip install onnxruntime
66
  """
 
90
  torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
91
 
92
 
 
 
 
 
93
  def preprocess_image(image):
94
  """
95
  image: torch.Tensor
 
123
  onnx_path,
124
  fp16,
125
  input_profile=None,
 
126
  enable_all_tactics=False,
127
  timing_cache=None,
 
128
  ):
129
  logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
130
  p = Profile()
 
133
  assert len(dims) == 3
134
  p.add(name, min=dims[0], opt=dims[1], max=dims[2])
135
 
136
+ extra_build_args = {}
 
 
 
 
 
 
 
137
  if not enable_all_tactics:
138
+ extra_build_args["tactic_sources"] = []
139
 
140
  engine = engine_from_network(
141
  network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
142
+ config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **extra_build_args),
143
  save_timing_cache=timing_cache,
144
  )
145
  save_engine(engine, path=self.engine_path)
 
152
  self.context = self.engine.create_execution_context()
153
 
154
  def allocate_buffers(self, shape_dict=None, device="cuda"):
155
+ for binding in range(self.engine.num_io_tensors):
156
+ name = self.engine.get_tensor_name(binding)
157
+ if shape_dict and name in shape_dict:
158
+ shape = shape_dict[name]
159
  else:
160
+ shape = self.engine.get_tensor_shape(name)
161
+ dtype = trt.nptype(self.engine.get_tensor_dtype(name))
162
+ if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:
163
+ self.context.set_input_shape(name, shape)
164
  tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
165
+ self.tensors[name] = tensor
 
166
 
167
  def infer(self, feed_dict, stream):
 
 
 
168
  for name, buf in feed_dict.items():
169
+ self.tensors[name].copy_(buf)
170
+ for name, tensor in self.tensors.items():
171
+ self.context.set_tensor_address(name, tensor.data_ptr())
172
+ noerror = self.context.execute_async_v3(stream)
173
  if not noerror:
174
  raise ValueError("ERROR: inference failed.")
175
 
 
310
  force_engine_rebuild=False,
311
  static_batch=False,
312
  static_shape=True,
 
313
  enable_all_tactics=False,
314
  timing_cache=None,
 
315
  ):
316
  built_engines = {}
317
  if not os.path.isdir(onnx_dir):
 
376
  static_batch=static_batch,
377
  static_shape=static_shape,
378
  ),
 
379
  timing_cache=timing_cache,
 
380
  )
381
  built_engines[model_name] = engine
382
 
 
655
  return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
656
 
657
 
658
+ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
659
  r"""
660
  Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
661
 
 
683
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
684
  """
685
 
686
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
687
+
688
  def __init__(
689
  self,
690
  vae: AutoencoderKL,
 
705
  onnx_dir: str = "onnx",
706
  # TensorRT engine build parameters
707
  engine_dir: str = "engine",
 
708
  force_engine_rebuild: bool = False,
709
  timing_cache: str = "timing_cache",
710
  ):
711
+ super().__init__()
712
+
713
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
714
+ deprecation_message = (
715
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
716
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
717
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
718
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
719
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
720
+ " file"
721
+ )
722
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
723
+ new_config = dict(scheduler.config)
724
+ new_config["steps_offset"] = 1
725
+ scheduler._internal_dict = FrozenDict(new_config)
726
+
727
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
728
+ deprecation_message = (
729
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
730
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
731
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
732
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
733
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
734
+ )
735
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
736
+ new_config = dict(scheduler.config)
737
+ new_config["clip_sample"] = False
738
+ scheduler._internal_dict = FrozenDict(new_config)
739
+
740
+ if safety_checker is None and requires_safety_checker:
741
+ logger.warning(
742
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
743
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
744
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
745
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
746
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
747
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
748
+ )
749
+
750
+ if safety_checker is not None and feature_extractor is None:
751
+ raise ValueError(
752
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
753
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
754
+ )
755
+
756
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
757
+ version.parse(unet.config._diffusers_version).base_version
758
+ ) < version.parse("0.9.0.dev0")
759
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
760
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
761
+ deprecation_message = (
762
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
763
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
764
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
765
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
766
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
767
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
768
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
769
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
770
+ " the `unet/config.json` file"
771
+ )
772
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
773
+ new_config = dict(unet.config)
774
+ new_config["sample_size"] = 64
775
+ unet._internal_dict = FrozenDict(new_config)
776
+
777
+ self.register_modules(
778
+ vae=vae,
779
+ text_encoder=text_encoder,
780
+ tokenizer=tokenizer,
781
+ unet=unet,
782
+ scheduler=scheduler,
783
  safety_checker=safety_checker,
784
  feature_extractor=feature_extractor,
785
  image_encoder=image_encoder,
 
786
  )
787
 
 
 
788
  self.stages = stages
789
  self.image_height, self.image_width = image_height, image_width
790
  self.inpaint = False
 
795
  self.timing_cache = timing_cache
796
  self.build_static_batch = False
797
  self.build_dynamic_shape = False
 
798
 
799
  self.max_batch_size = max_batch_size
800
  # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
 
805
  self.models = {} # loaded in __loadModels()
806
  self.engine = {} # loaded in build_engines()
807
 
808
+ self.vae.forward = self.vae.decode
809
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
810
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
811
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
812
+
813
  def __loadModels(self):
814
  # Load pipeline models
815
  self.embedding_dim = self.text_encoder.config.hidden_size
 
828
  if "vae_encoder" in self.stages:
829
  self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
830
 
831
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
832
+ def run_safety_checker(
833
+ self, image: Union[torch.Tensor, PIL.Image.Image], device: torch.device, dtype: torch.dtype
834
+ ) -> Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]:
835
+ r"""
836
+ Runs the safety checker on the given image.
837
+ Args:
838
+ image (Union[torch.Tensor, PIL.Image.Image]): The input image to be checked.
839
+ device (torch.device): The device to run the safety checker on.
840
+ dtype (torch.dtype): The data type of the input image.
841
+ Returns:
842
+ (image, has_nsfw_concept) Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: A tuple containing the processed image and
843
+ a boolean indicating whether the image has a NSFW (Not Safe for Work) concept.
844
+ """
845
+ if self.safety_checker is None:
846
+ has_nsfw_concept = None
847
+ else:
848
+ if torch.is_tensor(image):
849
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
850
+ else:
851
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
852
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
853
+ image, has_nsfw_concept = self.safety_checker(
854
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
855
+ )
856
+ return image, has_nsfw_concept
857
+
858
  @classmethod
859
  @validate_hf_hub_args
860
  def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
 
902
  force_engine_rebuild=self.force_engine_rebuild,
903
  static_batch=self.build_static_batch,
904
  static_shape=not self.build_dynamic_shape,
 
905
  timing_cache=self.timing_cache,
906
  )
907
 
 
925
  return tuple(init_images)
926
 
927
  def __encode_image(self, init_image):
928
+ init_latents = runEngine(self.engine["vae_encoder"], {"images": init_image}, self.stream)["latent"]
 
 
929
  init_latents = 0.18215 * init_latents
930
  return init_latents
931
 
 
954
  .to(self.torch_device)
955
  )
956
 
 
957
  # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
958
+ text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids}, self.stream)[
959
  "text_embeddings"
960
  ].clone()
961
 
 
971
  .input_ids.type(torch.int32)
972
  .to(self.torch_device)
973
  )
974
+ uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids}, self.stream)[
 
975
  "text_embeddings"
976
  ]
977
 
 
995
  # Predict the noise residual
996
  timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
997
 
 
 
 
998
  noise_pred = runEngine(
999
  self.engine["unet"],
1000
+ {"sample": latent_model_input, "timestep": timestep_float, "encoder_hidden_states": text_embeddings},
1001
  self.stream,
1002
  )["latent"]
1003
 
1004
  # Perform guidance
1005
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1006
+ noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond)
1007
 
1008
  latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
1009
 
 
1011
  return latents
1012
 
1013
  def __decode_latent(self, latents):
1014
+ images = runEngine(self.engine["vae"], {"latent": latents}, self.stream)["images"]
1015
  images = (images / 2 + 0.5).clamp(0, 1)
1016
  return images.cpu().permute(0, 2, 3, 1).float().numpy()
1017
 
1018
  def __loadResources(self, image_height, image_width, batch_size):
1019
+ self.stream = cudart.cudaStreamCreate()[1]
1020
 
1021
  # Allocate buffers for TensorRT engine bindings
1022
  for model_name, obj in self.models.items():
 
1129
  # VAE decode latent
1130
  images = self.__decode_latent(latents)
1131
 
1132
+ images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype)
1133
  images = self.numpy_to_pil(images)
1134
+ return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)