text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. vae_scale_factor (`int`, *optional*, defaults to `8`): VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. resample (`str`, *optional*, defaults to `lanczos`): Resampling filter to use when resizing the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image to [-1,1]. do_binarize (`bool`, *optional*, defaults to `False`): Whether to binarize the image to 0/1. do_convert_rgb (`bool`, *optional*, defaults to be `False`): Whether to convert the images to RGB format.
6
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/image_processor.py
do_convert_grayscale (`bool`, *optional*, defaults to be `False`): Whether to convert the images to grayscale format. """
6
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/image_processor.py
@register_to_config def __init__( self, do_resize: bool = True, vae_scale_factor: int = 8, resample: str = "lanczos", do_normalize: bool = True, do_binarize: bool = False, do_convert_grayscale: bool = False, ): super().__init__( do_resize=do_resize, vae_scale_factor=vae_scale_factor, resample=resample, do_normalize=do_normalize, do_binarize=do_binarize, do_convert_grayscale=do_convert_grayscale, ) @staticmethod def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]: r""" Returns the binned height and width based on the aspect ratio. Args: height (`int`): The height of the image. width (`int`): The width of the image. ratios (`dict`): A dictionary where keys are aspect ratios and values are tuples of (height, width).
6
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/image_processor.py
Returns: `Tuple[int, int]`: The closest binned height and width. """ ar = float(height / width) closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) default_hw = ratios[closest_ratio] return int(default_hw[0]), int(default_hw[1]) @staticmethod def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int) -> torch.Tensor: r""" Resizes and crops a tensor of images to the specified dimensions. Args: samples (`torch.Tensor`): A tensor of shape (N, C, H, W) where N is the batch size, C is the number of channels, H is the height, and W is the width. new_width (`int`): The desired width of the output images. new_height (`int`): The desired height of the output images.
6
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/image_processor.py
Returns: `torch.Tensor`: A tensor containing the resized and cropped images. """ orig_height, orig_width = samples.shape[2], samples.shape[3] # Check if resizing is needed if orig_height != new_height or orig_width != new_width: ratio = max(new_height / orig_height, new_width / orig_width) resized_width = int(orig_width * ratio) resized_height = int(orig_height * ratio) # Resize samples = F.interpolate( samples, size=(resized_height, resized_width), mode="bilinear", align_corners=False ) # Center Crop start_x = (resized_width - new_width) // 2 end_x = start_x + new_width start_y = (resized_height - new_height) // 2 end_y = start_y + new_height samples = samples[:, :, start_y:end_y, start_x:end_x] return samples
6
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/image_processor.py
class VideoProcessor(VaeImageProcessor): r"""Simple video processor.""" def preprocess_video(self, video, height: Optional[int] = None, width: Optional[int] = None) -> torch.Tensor: r""" Preprocesses input video(s).
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
Args: video (`List[PIL.Image]`, `List[List[PIL.Image]]`, `torch.Tensor`, `np.array`, `List[torch.Tensor]`, `List[np.array]`): The input video. It can be one of the following: * List of the PIL images. * List of list of PIL images. * 4D Torch tensors (expected shape for each tensor `(num_frames, num_channels, height, width)`). * 4D NumPy arrays (expected shape for each array `(num_frames, height, width, num_channels)`). * List of 4D Torch tensors (expected shape for each tensor `(num_frames, num_channels, height, width)`). * List of 4D NumPy arrays (expected shape for each array `(num_frames, height, width, num_channels)`). * 5D NumPy arrays: expected shape for each array `(batch_size, num_frames, height, width, num_channels)`.
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
* 5D Torch tensors: expected shape for each array `(batch_size, num_frames, num_channels, height, width)`. height (`int`, *optional*, defaults to `None`): The height in preprocessed frames of the video. If `None`, will use the `get_default_height_width()` to get default height. width (`int`, *optional*`, defaults to `None`): The width in preprocessed frames of the video. If `None`, will use get_default_height_width()` to get the default width. """ if isinstance(video, list) and isinstance(video[0], np.ndarray) and video[0].ndim == 5: warnings.warn( "Passing `video` as a list of 5d np.ndarray is deprecated." "Please concatenate the list along the batch dimension and pass it as a single 5d np.ndarray", FutureWarning, ) video = np.concatenate(video, axis=0)
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
if isinstance(video, list) and isinstance(video[0], torch.Tensor) and video[0].ndim == 5: warnings.warn( "Passing `video` as a list of 5d torch.Tensor is deprecated." "Please concatenate the list along the batch dimension and pass it as a single 5d torch.Tensor", FutureWarning, ) video = torch.cat(video, axis=0)
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
# ensure the input is a list of videos: # - if it is a batch of videos (5d torch.Tensor or np.ndarray), it is converted to a list of videos (a list of 4d torch.Tensor or np.ndarray) # - if it is is a single video, it is convereted to a list of one video. if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5: video = list(video) elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video): video = [video] elif isinstance(video, list) and is_valid_image_imagelist(video[0]): video = video else: raise ValueError( "Input is in incorrect format. Currently, we only support numpy.ndarray, torch.Tensor, PIL.Image.Image" ) video = torch.stack([self.preprocess(img, height=height, width=width) for img in video], dim=0) # move the number of channels before the number of frames. video = video.permute(0, 2, 1, 3, 4)
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
return video def postprocess_video( self, video: torch.Tensor, output_type: str = "np" ) -> Union[np.ndarray, torch.Tensor, List[PIL.Image.Image]]: r""" Converts a video tensor to a list of frames for export. Args: video (`torch.Tensor`): The video as a tensor. output_type (`str`, defaults to `"np"`): Output type of the postprocessed `video` tensor. """ batch_size = video.shape[0] outputs = [] for batch_idx in range(batch_size): batch_vid = video[batch_idx].permute(1, 0, 2, 3) batch_output = self.postprocess(batch_vid, output_type) outputs.append(batch_output) if output_type == "np": outputs = np.stack(outputs) elif output_type == "pt": outputs = torch.stack(outputs) elif not output_type == "pil": raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
return outputs
7
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/video_processor.py
class SchedulerType(Enum): LINEAR = "linear" COSINE = "cosine" COSINE_WITH_RESTARTS = "cosine_with_restarts" POLYNOMIAL = "polynomial" CONSTANT = "constant" CONSTANT_WITH_WARMUP = "constant_with_warmup" PIECEWISE_CONSTANT = "piecewise_constant"
8
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/optimization.py
class EMAModel: """ Exponential Moving Average of models weights """
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
def __init__( self, parameters: Iterable[torch.nn.Parameter], decay: float = 0.9999, min_decay: float = 0.0, update_after_step: int = 0, use_ema_warmup: bool = False, inv_gamma: Union[float, int] = 1.0, power: Union[float, int] = 2 / 3, foreach: bool = False, model_cls: Optional[Any] = None, model_config: Dict[str, Any] = None, **kwargs, ): """ Args: parameters (Iterable[torch.nn.Parameter]): The parameters to track. decay (float): The decay factor for the exponential moving average. min_decay (float): The minimum decay factor for the exponential moving average. update_after_step (int): The number of steps to wait before starting to update the EMA weights. use_ema_warmup (bool): Whether to use EMA warmup. inv_gamma (float):
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
Inverse multiplicative factor of EMA warmup. Default: 1. Only used if `use_ema_warmup` is True. power (float): Exponential factor of EMA warmup. Default: 2/3. Only used if `use_ema_warmup` is True. foreach (bool): Use torch._foreach functions for updating shadow parameters. Should be faster. device (Optional[Union[str, torch.device]]): The device to store the EMA weights on. If None, the EMA weights will be stored on CPU.
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
@crowsonkb's notes on EMA Warmup: If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). """ if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters()
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility use_ema_warmup = True if kwargs.get("max_value", None) is not None: deprecation_message = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value", "1.0.0", deprecation_message, standard_warn=False) decay = kwargs["max_value"] if kwargs.get("min_value", None) is not None: deprecation_message = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value", "1.0.0", deprecation_message, standard_warn=False) min_decay = kwargs["min_value"] parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters]
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
if kwargs.get("device", None) is not None: deprecation_message = "The `device` argument is deprecated. Please use `to` instead." deprecate("device", "1.0.0", deprecation_message, standard_warn=False) self.to(device=kwargs["device"]) self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None # set in `step()` self.foreach = foreach self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls, foreach=False) -> "EMAModel": _, ema_kwargs = model_cls.from_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path)
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config, foreach=foreach) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.") if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.") model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop("shadow_params", None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: """ Compute the decay factor for the exponential moving average. """ step = max(0, optimization_step - self.update_after_step - 1)
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** -self.power else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) # make sure decay is not smaller than min_decay cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
@torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() parameters = list(parameters) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay context_manager = contextlib.nullcontext()
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
if self.foreach: if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(parameters, modifier_rank=None) with context_manager: params_grad = [param for param in parameters if param.requires_grad] s_params_grad = [ s_param for s_param, param in zip(self.shadow_params, parameters) if param.requires_grad ] if len(params_grad) < len(parameters): torch._foreach_copy_( [s_param for s_param, param in zip(self.shadow_params, parameters) if not param.requires_grad], [param for param in parameters if not param.requires_grad], non_blocking=True, )
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
torch._foreach_sub_( s_params_grad, torch._foreach_sub(s_params_grad, params_grad), alpha=one_minus_decay ) else: for s_param, param in zip(self.shadow_params, parameters): if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) with context_manager: if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: """ Copy current averaged parameters into given collection of parameters.
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ parameters = list(parameters) if self.foreach: torch._foreach_copy_( [param.data for param in parameters], [s_param.to(param.device).data for s_param, param in zip(self.shadow_params, parameters)], ) else: for s_param, param in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def pin_memory(self) -> None: r""" Move internal buffers of the ExponentialMovingAverage to pinned memory. Useful for non-blocking transfers for offloading EMA params to the host. """ self.shadow_params = [p.pin_memory() for p in self.shadow_params]
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
def to(self, device=None, dtype=None, non_blocking=False) -> None: r""" Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` """ # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype, non_blocking=non_blocking) if p.is_floating_point() else p.to(device=device, non_blocking=non_blocking) for p in self.shadow_params ]
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
def state_dict(self) -> dict: r""" Returns the state of the ExponentialMovingAverage as a dict. This method is used by accelerate during checkpointing to save the ema state dict. """ # Following PyTorch conventions, references to tensors are returned: # "returns a reference to the state and not its copy!" - # https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Saves the current parameters for restoring later.
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
Args: parameters: Iterable of `torch.nn.Parameter`. The parameters to be temporarily stored. """ self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without: affecting the original optimization process. Store the parameters before the `copy_to()` method. After validation (or model saving), use this to restore the former parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`") if self.foreach: torch._foreach_copy_( [param.data for param in parameters], [c_param.data for c_param in self.temp_stored_params] ) else: for c_param, param in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) # Better memory-wise. self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: r""" Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the ema state dict. Args: state_dict (dict): EMA state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = copy.deepcopy(state_dict)
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
self.decay = state_dict.get("decay", self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1") self.min_decay = state_dict.get("min_decay", self.min_decay) if not isinstance(self.min_decay, float): raise ValueError("Invalid min_decay") self.optimization_step = state_dict.get("optimization_step", self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError("Invalid optimization_step") self.update_after_step = state_dict.get("update_after_step", self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError("Invalid update_after_step") self.use_ema_warmup = state_dict.get("use_ema_warmup", self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError("Invalid use_ema_warmup")
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
self.inv_gamma = state_dict.get("inv_gamma", self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError("Invalid inv_gamma") self.power = state_dict.get("power", self.power) if not isinstance(self.power, (float, int)): raise ValueError("Invalid power") shadow_params = state_dict.get("shadow_params", None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError("shadow_params must be a list") if not all(isinstance(p, torch.Tensor) for p in self.shadow_params): raise ValueError("shadow_params must all be Tensors")
9
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/training_utils.py
class PipelineCallback(ConfigMixin): """ Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing custom callbacks and ensures that all callbacks have a consistent interface. Please implement the following: `tensor_inputs`: This should return a list of tensor inputs specific to your callback. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. `callback_fn`: This method defines the core functionality of your callback. """ config_name = CONFIG_NAME @register_to_config def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): super().__init__()
10
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
if (cutoff_step_ratio is None and cutoff_step_index is None) or ( cutoff_step_ratio is not None and cutoff_step_index is not None ): raise ValueError("Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.") if cutoff_step_ratio is not None and ( not isinstance(cutoff_step_ratio, float) or not (0.0 <= cutoff_step_ratio <= 1.0) ): raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.") @property def tensor_inputs(self) -> List[str]: raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}") def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}")
10
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: return self.callback_fn(pipeline, step_index, timestep, callback_kwargs)
10
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
class MultiPipelineCallbacks: """ This class is designed to handle multiple pipeline callbacks. It accepts a list of PipelineCallback objects and provides a unified interface for calling all of them. """ def __init__(self, callbacks: List[PipelineCallback]): self.callbacks = callbacks @property def tensor_inputs(self) -> List[str]: return [input for callback in self.callbacks for input in callback.tensor_inputs] def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: """ Calls all the callbacks in order with the given arguments and returns the final callback_kwargs. """ for callback in self.callbacks: callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) return callback_kwargs
11
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
class SDCFGCutoffCallback(PipelineCallback): """ Callback function for Stable Diffusion Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = ["prompt_embeds"] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) )
12
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds return callback_kwargs
12
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
class SDXLCFGCutoffCallback(PipelineCallback): """ Callback function for the base Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = [ "prompt_embeds", "add_text_embeds", "add_time_ids", ] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) )
13
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. add_text_embeds = callback_kwargs[self.tensor_inputs[1]] add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens add_time_ids = callback_kwargs[self.tensor_inputs[2]] add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = add_text_embeds callback_kwargs[self.tensor_inputs[2]] = add_time_ids return callback_kwargs
13
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
class SDXLControlnetCFGCutoffCallback(PipelineCallback): """ Callback function for the Controlnet Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = [ "prompt_embeds", "add_text_embeds", "add_time_ids", "image", ] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) )
14
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. add_text_embeds = callback_kwargs[self.tensor_inputs[1]] add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens add_time_ids = callback_kwargs[self.tensor_inputs[2]] add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector # For Controlnet image = callback_kwargs[self.tensor_inputs[3]] image = image[-1:] pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = add_text_embeds callback_kwargs[self.tensor_inputs[2]] = add_time_ids callback_kwargs[self.tensor_inputs[3]] = image
14
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
return callback_kwargs
14
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
class IPAdapterScaleCutoffCallback(PipelineCallback): """ Callback function for any pipeline that inherits `IPAdapterMixin`. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will set the IP Adapter scale to `0.0`. Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step. """ tensor_inputs = [] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: pipeline.set_ip_adapter_scale(0.0) return callback_kwargs
15
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/callbacks.py
class ValueGuidedRLPipeline(DiffusionPipeline): r""" Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: value_function ([`UNet1DModel`]): A specialized UNet for fine-tuning trajectories base on reward. unet ([`UNet1DModel`]): UNet architecture to denoise the encoded trajectories. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this application is [`DDPMScheduler`]. env (): An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. """
16
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
def __init__( self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env, ): super().__init__() self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env) self.data = env.get_dataset() self.means = {} for key in self.data.keys(): try: self.means[key] = self.data[key].mean() except: # noqa: E722 pass self.stds = {} for key in self.data.keys(): try: self.stds[key] = self.data[key].std() except: # noqa: E722 pass self.state_dim = env.observation_space.shape[0] self.action_dim = env.action_space.shape[0] def normalize(self, x_in, key): return (x_in - self.means[key]) / self.stds[key] def de_normalize(self, x_in, key): return x_in * self.stds[key] + self.means[key]
16
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
def to_torch(self, x_in): if isinstance(x_in, dict): return {k: self.to_torch(v) for k, v in x_in.items()} elif torch.is_tensor(x_in): return x_in.to(self.unet.device) return torch.tensor(x_in, device=self.unet.device) def reset_x0(self, x_in, cond, act_dim): for key, val in cond.items(): x_in[:, key, act_dim:] = val.clone() return x_in def run_diffusion(self, x, conditions, n_guide_steps, scale): batch_size = x.shape[0] y = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_()
16
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
# permute to match dimension for pre-trained models y = self.value_function(x.permute(0, 2, 1), timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] posterior_variance = self.scheduler._get_variance(i) model_std = torch.exp(0.5 * posterior_variance) grad = model_std * grad grad[timesteps < 2] = 0 x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) # TODO: verify deprecation of this kwarg x = self.scheduler.step(prev_x, i, x)["prev_sample"] # apply conditions to the trajectory (set the initial state) x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) return x, y
16
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): # normalize the observations and create batch dimension obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) x1 = randn_tensor(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) # run the diffusion process x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
16
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
# sort output trajectories by value sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") # select the action with the highest value if y is not None: selected_index = 0 else: # if we didn't run value guiding, select a random action selected_index = np.random.randint(0, batch_size) denorm_actions = denorm_actions[selected_index, 0] return denorm_actions
16
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py
class QuantizationMethod(str, Enum): BITS_AND_BYTES = "bitsandbytes" GGUF = "gguf" TORCHAO = "torchao"
17
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
class QuantizationConfigMixin: """ Mixin class for quantization config """ quant_method: QuantizationMethod _exclude_attributes_at_init = [] @classmethod def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs): """ Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters. Args: config_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. return_unused_kwargs (`bool`,*optional*, defaults to `False`): Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in `PreTrainedModel`. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters. """ config = cls(**config_dict)
18
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) if return_unused_kwargs: return config, kwargs else: return config def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file.
18
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `QuantizationConfig()` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: config_dict = self.to_dict() json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" writer.write(json_string) def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ return copy.deepcopy(self.__dict__)
18
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
def __iter__(self): """allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin""" for attr, value in copy.deepcopy(self.__dict__).items(): yield attr, value def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" def to_json_string(self, use_diff: bool = True) -> str: """ Serializes this instance to a JSON string. Args: use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` is serialized to JSON string. Returns: `str`: String containing all the attributes that make up this configuration instance in JSON format. """ if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
18
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
def update(self, **kwargs): """ Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes, returning all the unused kwargs. Args: kwargs (`Dict[str, Any]`): Dictionary of attributes to tentatively update this class. Returns: `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. """ to_remove = [] for key, value in kwargs.items(): if hasattr(self, key): setattr(self, key, value) to_remove.append(key) # Remove all the attributes that were updated, without modifying the input dict unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} return unused_kwargs
18
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
class BitsAndBytesConfig(QuantizationConfigMixin): """ This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `bitsandbytes`. This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive. Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`, then more arguments will be added to this class.
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
Args: load_in_8bit (`bool`, *optional*, defaults to `False`): This flag is used to enable 8-bit quantization with LLM.int8(). load_in_4bit (`bool`, *optional*, defaults to `False`): This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from `bitsandbytes`. llm_int8_threshold (`float`, *optional*, defaults to 6.0): This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models.
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning). llm_int8_skip_modules (`List[str]`, *optional*): An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as Jukebox that has several heads in different places and not necessarily at the last position. For example for `CausalLM` models, the last `lm_head` is typically kept in its original `dtype`. llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`): This flag is used for advanced use cases and users that are aware of this feature. If you want to split
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8 operations will not be run on CPU. llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`): This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not have to be converted back and forth for the backward pass. bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`): This sets the computational type which might be different than the input type. For example, inputs might be fp32, but computation can be set to bf16 for speedups. bnb_4bit_quant_type (`str`, *optional*, defaults to `"fp4"`): This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
which are specified by `fp4` or `nf4`. bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`): This flag is used for nested quantization where the quantization constants from the first quantization are quantized again. bnb_4bit_quant_storage (`torch.dtype` or str, *optional*, defaults to `torch.uint8`): This sets the storage type to pack the quanitzed 4-bit prarams. kwargs (`Dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. """
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
_exclude_attributes_at_init = ["_load_in_4bit", "_load_in_8bit", "quant_method"] def __init__( self, load_in_8bit=False, load_in_4bit=False, llm_int8_threshold=6.0, llm_int8_skip_modules=None, llm_int8_enable_fp32_cpu_offload=False, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=None, bnb_4bit_quant_type="fp4", bnb_4bit_use_double_quant=False, bnb_4bit_quant_storage=None, **kwargs, ): self.quant_method = QuantizationMethod.BITS_AND_BYTES if load_in_4bit and load_in_8bit: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time")
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
self._load_in_8bit = load_in_8bit self._load_in_4bit = load_in_4bit self.llm_int8_threshold = llm_int8_threshold self.llm_int8_skip_modules = llm_int8_skip_modules self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight self.bnb_4bit_quant_type = bnb_4bit_quant_type self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant if bnb_4bit_compute_dtype is None: self.bnb_4bit_compute_dtype = torch.float32 elif isinstance(bnb_4bit_compute_dtype, str): self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype) elif isinstance(bnb_4bit_compute_dtype, torch.dtype): self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
if bnb_4bit_quant_storage is None: self.bnb_4bit_quant_storage = torch.uint8 elif isinstance(bnb_4bit_quant_storage, str): if bnb_4bit_quant_storage not in ["float16", "float32", "int8", "uint8", "float64", "bfloat16"]: raise ValueError( "`bnb_4bit_quant_storage` must be a valid string (one of 'float16', 'float32', 'int8', 'uint8', 'float64', 'bfloat16') " ) self.bnb_4bit_quant_storage = getattr(torch, bnb_4bit_quant_storage) elif isinstance(bnb_4bit_quant_storage, torch.dtype): self.bnb_4bit_quant_storage = bnb_4bit_quant_storage else: raise ValueError("bnb_4bit_quant_storage must be a string or a torch.dtype") if kwargs and not all(k in self._exclude_attributes_at_init for k in kwargs): logger.warning(f"Unused kwargs: {list(kwargs.keys())}. These kwargs are not used in {self.__class__}.") self.post_init()
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
@property def load_in_4bit(self): return self._load_in_4bit @load_in_4bit.setter def load_in_4bit(self, value: bool): if not isinstance(value, bool): raise TypeError("load_in_4bit must be a boolean") if self.load_in_8bit and value: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") self._load_in_4bit = value @property def load_in_8bit(self): return self._load_in_8bit @load_in_8bit.setter def load_in_8bit(self, value: bool): if not isinstance(value, bool): raise TypeError("load_in_8bit must be a boolean") if self.load_in_4bit and value: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") self._load_in_8bit = value
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.load_in_4bit, bool): raise TypeError("load_in_4bit must be a boolean") if not isinstance(self.load_in_8bit, bool): raise TypeError("load_in_8bit must be a boolean") if not isinstance(self.llm_int8_threshold, float): raise TypeError("llm_int8_threshold must be a float") if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): raise TypeError("llm_int8_skip_modules must be a list of strings") if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): raise TypeError("llm_int8_enable_fp32_cpu_offload must be a boolean") if not isinstance(self.llm_int8_has_fp16_weight, bool): raise TypeError("llm_int8_has_fp16_weight must be a boolean")
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise TypeError("bnb_4bit_compute_dtype must be torch.dtype") if not isinstance(self.bnb_4bit_quant_type, str): raise TypeError("bnb_4bit_quant_type must be a string") if not isinstance(self.bnb_4bit_use_double_quant, bool): raise TypeError("bnb_4bit_use_double_quant must be a boolean") if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def is_quantizable(self): r""" Returns `True` if the model is quantizable, `False` otherwise. """ return self.load_in_8bit or self.load_in_4bit
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
def quantization_method(self): r""" This method returns the quantization method used for the model. If the model is not quantizable, it returns `None`. """ if self.load_in_8bit: return "llm_int8" elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4": return "fp4" elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4": return "nf4" else: return None
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] output["bnb_4bit_quant_storage"] = str(output["bnb_4bit_quant_storage"]).split(".")[1] output["load_in_4bit"] = self.load_in_4bit output["load_in_8bit"] = self.load_in_8bit return output def __repr__(self): config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" def to_diff_dict(self) -> Dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = BitsAndBytesConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict
19
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
class GGUFQuantizationConfig(QuantizationConfigMixin): """This is a config class for GGUF Quantization techniques. Args: compute_dtype: (`torch.dtype`, defaults to `torch.float32`): This sets the computational type which might be different than the input type. For example, inputs might be fp32, but computation can be set to bf16 for speedups. """ def __init__(self, compute_dtype: Optional["torch.dtype"] = None): self.quant_method = QuantizationMethod.GGUF self.compute_dtype = compute_dtype self.pre_quantized = True # TODO: (Dhruv) Add this as an init argument when we can support loading unquantized checkpoints. self.modules_to_not_convert = None if self.compute_dtype is None: self.compute_dtype = torch.float32
20
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
class TorchAoConfig(QuantizationConfigMixin): """This is a config class for torchao quantization/sparsity techniques. Args: quant_type (`str`): The type of quantization we want to use, currently supporting: - **Integer quantization:** - Full function names: `int4_weight_only`, `int8_dynamic_activation_int4_weight`, `int8_weight_only`, `int8_dynamic_activation_int8_weight` - Shorthands: `int4wo`, `int4dq`, `int8wo`, `int8dq` - **Floating point 8-bit quantization:** - Full function names: `float8_weight_only`, `float8_dynamic_activation_float8_weight`, `float8_static_activation_float8_weight` - Shorthands: `float8wo`, `float8wo_e5m2`, `float8wo_e4m3`, `float8dq`, `float8dq_e4m3`, `float8_e4m3_tensor`, `float8_e4m3_row`,
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
- **Floating point X-bit quantization:** - Full function names: `fpx_weight_only` - Shorthands: `fpX_eAwB`, where `X` is the number of bits (between `1` to `7`), `A` is the number of exponent bits and `B` is the number of mantissa bits. The constraint of `X == A + B + 1` must be satisfied for a given shorthand notation.
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
- **Unsigned Integer quantization:** - Full function names: `uintx_weight_only` - Shorthands: `uint1wo`, `uint2wo`, `uint3wo`, `uint4wo`, `uint5wo`, `uint6wo`, `uint7wo` modules_to_not_convert (`List[str]`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision. kwargs (`Dict[str, Any]`, *optional*): The keyword arguments for the chosen type of quantization, for example, int4_weight_only quantization supports two keyword arguments `group_size` and `inner_k_tiles` currently. More API examples and documentation of arguments can be found in https://github.com/pytorch/ao/tree/main/torchao/quantization#other-available-quantization-techniques Example: ```python from diffusers import FluxTransformer2DModel, TorchAoConfig
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
quantization_config = TorchAoConfig("int8wo") transformer = FluxTransformer2DModel.from_pretrained( "black-forest-labs/Flux.1-Dev", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) ``` """ def __init__(self, quant_type: str, modules_to_not_convert: Optional[List[str]] = None, **kwargs) -> None: self.quant_method = QuantizationMethod.TORCHAO self.quant_type = quant_type self.modules_to_not_convert = modules_to_not_convert # When we load from serialized config, "quant_type_kwargs" will be the key if "quant_type_kwargs" in kwargs: self.quant_type_kwargs = kwargs["quant_type_kwargs"] else: self.quant_type_kwargs = kwargs
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method() if self.quant_type not in TORCHAO_QUANT_TYPE_METHODS.keys(): raise ValueError( f"Requested quantization type: {self.quant_type} is not supported yet or is incorrect. If you think the " f"provided quantization type should be supported, please open an issue at https://github.com/huggingface/diffusers/issues." ) method = TORCHAO_QUANT_TYPE_METHODS[self.quant_type] signature = inspect.signature(method) all_kwargs = { param.name for param in signature.parameters.values() if param.kind in [inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD] } unsupported_kwargs = list(self.quant_type_kwargs.keys() - all_kwargs)
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
if len(unsupported_kwargs) > 0: raise ValueError( f'The quantization method "{quant_type}" does not support the following keyword arguments: ' f"{unsupported_kwargs}. The following keywords arguments are supported: {all_kwargs}." ) @classmethod def _get_torchao_quant_type_to_method(cls): r""" Returns supported torchao quantization types with all commonly used notations. """ if is_torchao_available(): # TODO(aryan): Support autoquant and sparsify from torchao.quantization import ( float8_dynamic_activation_float8_weight, float8_static_activation_float8_weight, float8_weight_only, fpx_weight_only, int4_weight_only, int8_dynamic_activation_int4_weight, int8_dynamic_activation_int8_weight, int8_weight_only, uintx_weight_only, )
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
# TODO(aryan): Add a note on how to use PerAxis and PerGroup observers from torchao.quantization.observer import PerRow, PerTensor def generate_float8dq_types(dtype: torch.dtype): name = "e5m2" if dtype == torch.float8_e5m2 else "e4m3" types = {} for granularity_cls in [PerTensor, PerRow]: # Note: Activation and Weights cannot have different granularities granularity_name = "tensor" if granularity_cls is PerTensor else "row" types[f"float8dq_{name}_{granularity_name}"] = partial( float8_dynamic_activation_float8_weight, activation_dtype=dtype, weight_dtype=dtype, granularity=(granularity_cls(), granularity_cls()), ) return types def generate_fpx_quantization_types(bits: int): types = {}
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
for ebits in range(1, bits): mbits = bits - ebits - 1 types[f"fp{bits}_e{ebits}m{mbits}"] = partial(fpx_weight_only, ebits=ebits, mbits=mbits) non_sign_bits = bits - 1 default_ebits = (non_sign_bits + 1) // 2 default_mbits = non_sign_bits - default_ebits types[f"fp{bits}"] = partial(fpx_weight_only, ebits=default_ebits, mbits=default_mbits) return types INT4_QUANTIZATION_TYPES = { # int4 weight + bfloat16/float16 activation "int4wo": int4_weight_only, "int4_weight_only": int4_weight_only, # int4 weight + int8 activation "int4dq": int8_dynamic_activation_int4_weight, "int8_dynamic_activation_int4_weight": int8_dynamic_activation_int4_weight, }
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
INT8_QUANTIZATION_TYPES = { # int8 weight + bfloat16/float16 activation "int8wo": int8_weight_only, "int8_weight_only": int8_weight_only, # int8 weight + int8 activation "int8dq": int8_dynamic_activation_int8_weight, "int8_dynamic_activation_int8_weight": int8_dynamic_activation_int8_weight, }
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
# TODO(aryan): handle torch 2.2/2.3 FLOATX_QUANTIZATION_TYPES = { # float8_e5m2 weight + bfloat16/float16 activation "float8wo": partial(float8_weight_only, weight_dtype=torch.float8_e5m2), "float8_weight_only": float8_weight_only, "float8wo_e5m2": partial(float8_weight_only, weight_dtype=torch.float8_e5m2), # float8_e4m3 weight + bfloat16/float16 activation "float8wo_e4m3": partial(float8_weight_only, weight_dtype=torch.float8_e4m3fn), # float8_e5m2 weight + float8 activation (dynamic) "float8dq": float8_dynamic_activation_float8_weight, "float8_dynamic_activation_float8_weight": float8_dynamic_activation_float8_weight, # ===== Matrix multiplication is not supported in float8_e5m2 so the following errors out. # However, changing activation_dtype=torch.float8_e4m3 might work here =====
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
# "float8dq_e5m2": partial( # float8_dynamic_activation_float8_weight, # activation_dtype=torch.float8_e5m2, # weight_dtype=torch.float8_e5m2, # ), # **generate_float8dq_types(torch.float8_e5m2), # ===== ===== # float8_e4m3 weight + float8 activation (dynamic) "float8dq_e4m3": partial( float8_dynamic_activation_float8_weight, activation_dtype=torch.float8_e4m3fn, weight_dtype=torch.float8_e4m3fn, ), **generate_float8dq_types(torch.float8_e4m3fn), # float8 weight + float8 activation (static) "float8_static_activation_float8_weight": float8_static_activation_float8_weight, # For fpx, only x <= 8 is supported by default. Other dtypes can be explored by users directly
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
# fpx weight + bfloat16/float16 activation **generate_fpx_quantization_types(3), **generate_fpx_quantization_types(4), **generate_fpx_quantization_types(5), **generate_fpx_quantization_types(6), **generate_fpx_quantization_types(7), }
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
UINTX_QUANTIZATION_DTYPES = { "uintx_weight_only": uintx_weight_only, "uint1wo": partial(uintx_weight_only, dtype=torch.uint1), "uint2wo": partial(uintx_weight_only, dtype=torch.uint2), "uint3wo": partial(uintx_weight_only, dtype=torch.uint3), "uint4wo": partial(uintx_weight_only, dtype=torch.uint4), "uint5wo": partial(uintx_weight_only, dtype=torch.uint5), "uint6wo": partial(uintx_weight_only, dtype=torch.uint6), "uint7wo": partial(uintx_weight_only, dtype=torch.uint7), # "uint8wo": partial(uintx_weight_only, dtype=torch.uint8), # uint8 quantization is not supported } QUANTIZATION_TYPES = {} QUANTIZATION_TYPES.update(INT4_QUANTIZATION_TYPES) QUANTIZATION_TYPES.update(INT8_QUANTIZATION_TYPES) QUANTIZATION_TYPES.update(UINTX_QUANTIZATION_DTYPES)
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
if cls._is_cuda_capability_atleast_8_9(): QUANTIZATION_TYPES.update(FLOATX_QUANTIZATION_TYPES) return QUANTIZATION_TYPES else: raise ValueError( "TorchAoConfig requires torchao to be installed, please install with `pip install torchao`" ) @staticmethod def _is_cuda_capability_atleast_8_9() -> bool: if not torch.cuda.is_available(): raise RuntimeError("TorchAO requires a CUDA compatible GPU and installation of PyTorch.") major, minor = torch.cuda.get_device_capability() if major == 8: return minor >= 9 return major >= 9 def get_apply_tensor_subclass(self): TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method() return TORCHAO_QUANT_TYPE_METHODS[self.quant_type](**self.quant_type_kwargs) def __repr__(self): r""" Example of how this looks for `TorchAoConfig("uint_a16w4", group_size=32)`:
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
``` TorchAoConfig { "modules_to_not_convert": null, "quant_method": "torchao", "quant_type": "uint_a16w4", "quant_type_kwargs": { "group_size": 32 } } ``` """ config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n"
21
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/quantization_config.py
class DiffusersQuantizer(ABC): """ Abstract class of the HuggingFace quantizer. Supports for now quantizing HF diffusers models for inference and/or quantization. This class is used only for diffusers.models.modeling_utils.ModelMixin.from_pretrained and cannot be easily used outside the scope of that method yet. Attributes quantization_config (`diffusers.quantizers.quantization_config.QuantizationConfigMixin`): The quantization config that defines the quantization parameters of your model that you want to quantize. modules_to_not_convert (`List[str]`, *optional*): The list of module names to not convert when quantizing the model. required_packages (`List[str]`, *optional*): The list of required pip packages to install prior to using the quantizer requires_calibration (`bool`): Whether the quantization method requires to calibrate the model before using it. """
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
requires_calibration = False required_packages = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): self.quantization_config = quantization_config # -- Handle extra kwargs below -- self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", []) self.pre_quantized = kwargs.pop("pre_quantized", True) if not self.pre_quantized and self.requires_calibration: raise ValueError( f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized." f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to " f"pass `pre_quantized=True` while knowing what you are doing." )
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Some quantization methods require to explicitly set the dtype of the model to a target dtype. You need to override this method in case you want to make sure that behavior is preserved Args: torch_dtype (`torch.dtype`): The input dtype that is passed in `from_pretrained` """ return torch_dtype def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: """ Override this method if you want to pass a override the existing device map with a new one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is passed, the device_map is set to `"auto"`` Args: device_map (`Union[dict, str]`, *optional*): The device_map that is passed through the `from_pretrained` method. """ return device_map
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained` to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype` to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`. Args: torch_dtype (`torch.dtype`, *optional*): The torch_dtype that is used to compute the device_map. """ return torch_dtype def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: """ Override this method if you want to adjust the `missing_keys`. Args: missing_keys (`List[str]`, *optional*): The list of missing keys in the checkpoint compared to the state dict of the model """ return missing_keys
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]: """ returns dtypes for modules that are not quantized - used for the computation of the device_map in case one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified in `_process_model_before_weight_loading`. `diffusers` models don't have any `modules_to_not_convert` attributes yet but this can change soon in the future. Args: model (`~diffusers.models.modeling_utils.ModelMixin`): The model to quantize torch_dtype (`torch.dtype`): The dtype passed in `from_pretrained` method. """ return { name: torch_dtype for name, _ in model.named_parameters() if any(m in name for m in self.modules_to_not_convert) }
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization""" return max_memory def check_if_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: """ checks if a loaded state_dict component is part of quantized param + some validation; only defined for quantization methods that require to create a new parameters for quantization. """ return False def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter": """ takes needed components from state_dict and creates quantized param. """ return
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
def check_quantized_param_shape(self, *args, **kwargs): """ checks if the quantized param has expected shape. """ return True def validate_environment(self, *args, **kwargs): """ This method is used to potentially check for potential conflicts with arguments that are passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with diffusers. If no explicit check are needed, simply return nothing. """ return def preprocess_model(self, model: "ModelMixin", **kwargs): """ Setting model attributes and/or converting model before weights loading. At this point the model should be initialized on the meta device so you can freely manipulate the skeleton of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
Args: model (`~diffusers.models.modeling_utils.ModelMixin`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_before_weight_loading`. """ model.is_quantized = True model.quantization_method = self.quantization_config.quant_method return self._process_model_before_weight_loading(model, **kwargs) def postprocess_model(self, model: "ModelMixin", **kwargs): """ Post-process the model post weights loading. Make sure to override the abstract method `_process_model_after_weight_loading`. Args: model (`~diffusers.models.modeling_utils.ModelMixin`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_after_weight_loading`. """ return self._process_model_after_weight_loading(model, **kwargs)
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
def dequantize(self, model): """ Potentially dequantize the model to retrive the original model, with some loss in accuracy / performance. Note not all quantization schemes support this. """ model = self._dequantize(model) # Delete quantizer and quantization config del model.hf_quantizer return model def _dequantize(self, model): raise NotImplementedError( f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub." ) @abstractmethod def _process_model_before_weight_loading(self, model, **kwargs): ... @abstractmethod def _process_model_after_weight_loading(self, model, **kwargs): ... @property @abstractmethod def is_serializable(self): ... @property @abstractmethod def is_trainable(self): ...
22
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/base.py
class DiffusersAutoQuantizer: """ The auto diffusers quantizer class that takes care of automatically instantiating to the correct `DiffusersQuantizer` given the `QuantizationConfig`. """ @classmethod def from_dict(cls, quantization_config_dict: Dict): quant_method = quantization_config_dict.get("quant_method", None) # We need a special care for bnb models to make sure everything is BC .. if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False): suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit" quant_method = QuantizationMethod.BITS_AND_BYTES + suffix elif quant_method is None: raise ValueError( "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized" )
23
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/auto.py
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys(): raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method] return target_cls.from_dict(quantization_config_dict) @classmethod def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs): # Convert it to a QuantizationConfig if the q_config is a dict if isinstance(quantization_config, dict): quantization_config = cls.from_dict(quantization_config) quant_method = quantization_config.quant_method
23
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/auto.py
# Again, we need a special care for bnb as we have a single quantization config # class for both 4-bit and 8-bit quantization if quant_method == QuantizationMethod.BITS_AND_BYTES: if quantization_config.load_in_8bit: quant_method += "_8bit" else: quant_method += "_4bit" if quant_method not in AUTO_QUANTIZER_MAPPING.keys(): raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZER_MAPPING[quant_method] return target_cls(quantization_config, **kwargs)
23
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/auto.py
@classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): model_config = cls.load_config(pretrained_model_name_or_path, **kwargs) if getattr(model_config, "quantization_config", None) is None: raise ValueError( f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized." ) quantization_config_dict = model_config.quantization_config quantization_config = cls.from_dict(quantization_config_dict) # Update with potential kwargs that are passed through from_pretrained. quantization_config.update(kwargs) return cls.from_config(quantization_config)
23
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/auto.py
@classmethod def merge_quantization_configs( cls, quantization_config: Union[dict, QuantizationConfigMixin], quantization_config_from_args: Optional[QuantizationConfigMixin], ): """ handles situations where both quantization_config from args and quantization_config from model config are present. """ if quantization_config_from_args is not None: warning_msg = ( "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading" " already has a `quantization_config` attribute. The `quantization_config` from the model will be used." ) else: warning_msg = "" if isinstance(quantization_config, dict): quantization_config = cls.from_dict(quantization_config) if warning_msg != "": warnings.warn(warning_msg) return quantization_config
23
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/auto.py
class GGUFQuantizer(DiffusersQuantizer): use_keep_in_fp32_modules = True def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.compute_dtype = quantization_config.compute_dtype self.pre_quantized = quantization_config.pre_quantized self.modules_to_not_convert = quantization_config.modules_to_not_convert if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert]
24
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/quantizers/gguf/gguf_quantizer.py