Diffusers Support

#2
by kshatriya - opened

Hi, Does these loras work with diffusers pipeline. I tried to use these using this statement but it does not work. Please kindly clarify.

Statement:
pipeline.load_lora_weights("Cseti/AD_Motion_LORAs", weight_name="280_cseti_2906282_drone-orbitright-mv2_r64_w576_h384_fr16.safetensors")

Error:

----> 5 pipe.load_lora_weights("Cseti/AD_Motion_LORAs", weight_name="280_cseti_2906282_drone-orbitright-mv2_r64_w576_h384_fr16.safetensors")

~/.conda/envs/default/lib/python3.9/site-packages/diffusers/loaders/lora_pipeline.py in load_lora_weights(self, pretrained_model_name_or_path_or_dict, adapter_name, **kwargs)
99 raise ValueError("Invalid LoRA checkpoint.")
100
--> 101 self.load_lora_into_unet(
102 state_dict,
103 network_alphas=network_alphas,

~/.conda/envs/default/lib/python3.9/site-packages/diffusers/loaders/lora_pipeline.py in load_lora_into_unet(cls, state_dict, network_alphas, unet, adapter_name, _pipeline)
258 # Load the layers corresponding to UNet.
259 logger.info(f"Loading {cls.unet_name}.")
--> 260 unet.load_attn_procs(
261 state_dict, network_alphas=network_alphas, adapter_name=adapter_name, _pipeline=_pipeline
262 )

~/.conda/envs/default/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py in _inner_fn(*args, **kwargs)
112 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.name, has_token=has_token, kwargs=kwargs)
113
--> 114 return fn(*args, **kwargs)
115
116 return _inner_fn # type: ignore

~/.conda/envs/default/lib/python3.9/site-packages/diffusers/loaders/unet.py in load_attn_procs(self, pretrained_model_name_or_path_or_dict, **kwargs)
204 attn_processors = self._process_custom_diffusion(state_dict=state_dict)
205 elif is_lora:
--> 206 is_model_cpu_offload, is_sequential_cpu_offload = self._process_lora(
207 state_dict=state_dict,
208 unet_identifier_key=self.unet_name,

~/.conda/envs/default/lib/python3.9/site-packages/diffusers/loaders/unet.py in _process_lora(self, state_dict, unet_identifier_key, network_alphas, adapter_name, _pipeline)
337 is_model_cpu_offload, is_sequential_cpu_offload = self._optionally_disable_offloading(_pipeline)
338
--> 339 inject_adapter_in_model(lora_config, self, adapter_name=adapter_name)
340 incompatible_keys = set_peft_model_state_dict(self, state_dict, adapter_name)
341

~/.conda/envs/default/lib/python3.9/site-packages/peft/mapping.py in inject_adapter_in_model(peft_config, model, adapter_name)
213
214 # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.
--> 215 peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name)
216
217 return peft_model.model

~/.conda/envs/default/lib/python3.9/site-packages/peft/tuners/lora/model.py in init(self, model, config, adapter_name)
137
138 def init(self, model, config, adapter_name) -> None:
--> 139 super().init(model, config, adapter_name)
140
141 def _check_new_adapter_config(self, config: LoraConfig) -> None:

~/.conda/envs/default/lib/python3.9/site-packages/peft/tuners/tuners_utils.py in init(self, model, peft_config, adapter_name)
173 self._pre_injection_hook(self.model, self.peft_config[adapter_name], adapter_name)
174 if peft_config != PeftType.XLORA or peft_config[adapter_name] != PeftType.XLORA:
--> 175 self.inject_adapter(self.model, adapter_name)
176
177 # Copy the peft_config in the injected model.

~/.conda/envs/default/lib/python3.9/site-packages/peft/tuners/tuners_utils.py in inject_adapter(self, model, adapter_name, autocast_adapter_dtype)
433 # Handle X-LoRA case.
434 if not is_target_modules_in_base_model and hasattr(peft_config, "target_modules"):
--> 435 raise ValueError(
436 f"Target modules {peft_config.target_modules} not found in the base model. "
437 f"Please check the target modules and try again."

ValueError: Target modules {'down_blocks.1.motion_modules.0.temporal_transformer.transformer_blocks.0.attention_blocks.0.to_out.0_lora.down.weight', ..............not found in the base model.

Sign up or log in to comment