Diffusers Bot
commited on
Upload folder using huggingface_hub
Browse files
main/fresco_v2v.py
CHANGED
@@ -2436,7 +2436,7 @@ class FrescoV2VPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
2436 |
)
|
2437 |
|
2438 |
if guess_mode and self.do_classifier_free_guidance:
|
2439 |
-
#
|
2440 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
2441 |
# add 0 to the unconditional batch to keep it unchanged.
|
2442 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
2436 |
)
|
2437 |
|
2438 |
if guess_mode and self.do_classifier_free_guidance:
|
2439 |
+
# Inferred ControlNet only for the conditional batch.
|
2440 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
2441 |
# add 0 to the unconditional batch to keep it unchanged.
|
2442 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
main/pipeline_stable_diffusion_xl_instandid_img2img.py
CHANGED
@@ -1002,7 +1002,7 @@ class StableDiffusionXLInstantIDImg2ImgPipeline(StableDiffusionXLControlNetImg2I
|
|
1002 |
)
|
1003 |
|
1004 |
if guess_mode and self.do_classifier_free_guidance:
|
1005 |
-
#
|
1006 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1007 |
# add 0 to the unconditional batch to keep it unchanged.
|
1008 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
1002 |
)
|
1003 |
|
1004 |
if guess_mode and self.do_classifier_free_guidance:
|
1005 |
+
# Inferred ControlNet only for the conditional batch.
|
1006 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1007 |
# add 0 to the unconditional batch to keep it unchanged.
|
1008 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
main/pipeline_stable_diffusion_xl_instantid.py
CHANGED
@@ -991,7 +991,7 @@ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
|
|
991 |
)
|
992 |
|
993 |
if guess_mode and self.do_classifier_free_guidance:
|
994 |
-
#
|
995 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
996 |
# add 0 to the unconditional batch to keep it unchanged.
|
997 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
991 |
)
|
992 |
|
993 |
if guess_mode and self.do_classifier_free_guidance:
|
994 |
+
# Inferred ControlNet only for the conditional batch.
|
995 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
996 |
# add 0 to the unconditional batch to keep it unchanged.
|
997 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
main/rerender_a_video.py
CHANGED
@@ -864,7 +864,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
864 |
)
|
865 |
|
866 |
if guess_mode and do_classifier_free_guidance:
|
867 |
-
#
|
868 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
869 |
# add 0 to the unconditional batch to keep it unchanged.
|
870 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
@@ -1038,7 +1038,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
1038 |
)
|
1039 |
|
1040 |
if guess_mode and do_classifier_free_guidance:
|
1041 |
-
#
|
1042 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1043 |
# add 0 to the unconditional batch to keep it unchanged.
|
1044 |
down_block_res_samples = [
|
|
|
864 |
)
|
865 |
|
866 |
if guess_mode and do_classifier_free_guidance:
|
867 |
+
# Inferred ControlNet only for the conditional batch.
|
868 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
869 |
# add 0 to the unconditional batch to keep it unchanged.
|
870 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
1038 |
)
|
1039 |
|
1040 |
if guess_mode and do_classifier_free_guidance:
|
1041 |
+
# Inferred ControlNet only for the conditional batch.
|
1042 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1043 |
# add 0 to the unconditional batch to keep it unchanged.
|
1044 |
down_block_res_samples = [
|
main/stable_diffusion_controlnet_reference.py
CHANGED
@@ -752,7 +752,7 @@ class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeli
|
|
752 |
)
|
753 |
|
754 |
if guess_mode and do_classifier_free_guidance:
|
755 |
-
#
|
756 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
757 |
# add 0 to the unconditional batch to keep it unchanged.
|
758 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
752 |
)
|
753 |
|
754 |
if guess_mode and do_classifier_free_guidance:
|
755 |
+
# Inferred ControlNet only for the conditional batch.
|
756 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
757 |
# add 0 to the unconditional batch to keep it unchanged.
|
758 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|