dikdimon commited on
Commit
f4a41d8
·
verified ·
1 Parent(s): 7bed60d

Upload extensions using SD-Hub extension

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +196 -0
  2. extensions/321Prompt/321Prompt/Readme.md +8 -0
  3. extensions/321Prompt/321Prompt/__init__.py +51 -0
  4. extensions/321Prompt/321prompt.php +105 -0
  5. extensions/321Prompt/321prompt.png +0 -0
  6. extensions/321Prompt/LICENSE +121 -0
  7. extensions/321Prompt/README.md +41 -0
  8. extensions/ABG_extension/.gitignore +1 -0
  9. extensions/ABG_extension/README.md +32 -0
  10. extensions/ABG_extension/install.py +11 -0
  11. extensions/ABG_extension/scripts/__pycache__/app.cpython-310.pyc +0 -0
  12. extensions/ABG_extension/scripts/app.py +183 -0
  13. extensions/AdverseCleanerExtension/.gitignore +2 -0
  14. extensions/AdverseCleanerExtension/LICENSE +201 -0
  15. extensions/AdverseCleanerExtension/README.md +8 -0
  16. extensions/AdverseCleanerExtension/install.py +12 -0
  17. extensions/AdverseCleanerExtension/scripts/__pycache__/denoise.cpython-310.pyc +0 -0
  18. extensions/AdverseCleanerExtension/scripts/denoise.py +74 -0
  19. extensions/Automatic1111-Geeky-Remb/LICENSE +21 -0
  20. extensions/Automatic1111-Geeky-Remb/README.md +173 -0
  21. extensions/Automatic1111-Geeky-Remb/__init__.py +4 -0
  22. extensions/Automatic1111-Geeky-Remb/install.py +7 -0
  23. extensions/Automatic1111-Geeky-Remb/requirements.txt +4 -0
  24. extensions/Automatic1111-Geeky-Remb/scripts/geeky-remb.py +435 -0
  25. extensions/CFgfade/LICENSE +24 -0
  26. extensions/CFgfade/README.md +66 -0
  27. extensions/CFgfade/screenshot.png +0 -0
  28. extensions/CFgfade/scripts/__pycache__/forge_cfgfade.cpython-310.pyc +0 -0
  29. extensions/CFgfade/scripts/forge_cfgfade.py +317 -0
  30. extensions/CloneCleaner/.gitattributes +2 -0
  31. extensions/CloneCleaner/.gitignore +4 -0
  32. extensions/CloneCleaner/LICENSE +21 -0
  33. extensions/CloneCleaner/README.md +86 -0
  34. extensions/CloneCleaner/prompt_tree.yml +286 -0
  35. extensions/CloneCleaner/scripts/__pycache__/clonecleaner.cpython-310.pyc +0 -0
  36. extensions/CloneCleaner/scripts/clonecleaner.py +223 -0
  37. extensions/CloneCleaner/style.css +33 -0
  38. extensions/ComfyUI-AutomaticCFG/.github/workflows/publish.yml +21 -0
  39. extensions/ComfyUI-AutomaticCFG/README.md +175 -0
  40. extensions/ComfyUI-AutomaticCFG/__init__.py +27 -0
  41. extensions/ComfyUI-AutomaticCFG/experimental_temperature.py +208 -0
  42. extensions/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png +3 -0
  43. extensions/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png +3 -0
  44. extensions/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg +3 -0
  45. extensions/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg +3 -0
  46. extensions/ComfyUI-AutomaticCFG/grids_example/presets.jpg +3 -0
  47. extensions/ComfyUI-AutomaticCFG/nodes.py +1286 -0
  48. extensions/ComfyUI-AutomaticCFG/nodes_sag_custom.py +190 -0
  49. extensions/ComfyUI-AutomaticCFG/presets/A subtle touch.json +1 -0
  50. extensions/ComfyUI-AutomaticCFG/presets/Crossed conds customized 1.json +1 -0
.gitattributes CHANGED
@@ -134,3 +134,199 @@ extensionsa/stable-diffusion-webui-composable-lora/readme/fig8.png filter=lfs di
134
  extensionsa/stable-diffusion-webui-composable-lora/readme/fig9.png filter=lfs diff=lfs merge=lfs -text
135
  extensionsa/stable-diffusion-webui-rembg/preview.png filter=lfs diff=lfs merge=lfs -text
136
  extensionsa/stable-diffusion-webui-tripclipskip/images/xy_plot.jpg filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  extensionsa/stable-diffusion-webui-composable-lora/readme/fig9.png filter=lfs diff=lfs merge=lfs -text
135
  extensionsa/stable-diffusion-webui-rembg/preview.png filter=lfs diff=lfs merge=lfs -text
136
  extensionsa/stable-diffusion-webui-tripclipskip/images/xy_plot.jpg filter=lfs diff=lfs merge=lfs -text
137
+ extensions/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png filter=lfs diff=lfs merge=lfs -text
138
+ extensions/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png filter=lfs diff=lfs merge=lfs -text
139
+ extensions/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg filter=lfs diff=lfs merge=lfs -text
140
+ extensions/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg filter=lfs diff=lfs merge=lfs -text
141
+ extensions/ComfyUI-AutomaticCFG/grids_example/presets.jpg filter=lfs diff=lfs merge=lfs -text
142
+ extensions/ComfyUI-AutomaticCFG/workflows/10[[:space:]]steps[[:space:]]SDXL[[:space:]]AYS[[:space:]]Warp[[:space:]]drive[[:space:]]variation.png filter=lfs diff=lfs merge=lfs -text
143
+ extensions/ComfyUI-AutomaticCFG/workflows/11728UI_00001_.png filter=lfs diff=lfs merge=lfs -text
144
+ extensions/ComfyUI-AutomaticCFG/workflows/12[[:space:]]steps[[:space:]]SDXL[[:space:]]AYS[[:space:]]Warp[[:space:]]drive[[:space:]]workflow.png filter=lfs diff=lfs merge=lfs -text
145
+ extensions/ComfyUI-AutomaticCFG/workflows/12steps.png filter=lfs diff=lfs merge=lfs -text
146
+ extensions/ComfyUI-AutomaticCFG/workflows/24steps.png filter=lfs diff=lfs merge=lfs -text
147
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/00382UI_00001_.png filter=lfs diff=lfs merge=lfs -text
148
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/01207UI_00001_.png filter=lfs diff=lfs merge=lfs -text
149
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/01217UI_00001_.png filter=lfs diff=lfs merge=lfs -text
150
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/a[[:space:]]bad[[:space:]]upscale[[:space:]]looks[[:space:]]like[[:space:]]low[[:space:]]quality[[:space:]]jpeg.png filter=lfs diff=lfs merge=lfs -text
151
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/another[[:space:]]bad[[:space:]]upscale[[:space:]]looking[[:space:]]like[[:space:]]jpeg.png filter=lfs diff=lfs merge=lfs -text
152
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/intradasting.png filter=lfs diff=lfs merge=lfs -text
153
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/laule.png filter=lfs diff=lfs merge=lfs -text
154
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/niiiiiice.png filter=lfs diff=lfs merge=lfs -text
155
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/special[[:space:]]double[[:space:]]pass.png filter=lfs diff=lfs merge=lfs -text
156
+ extensions/ComfyUI-AutomaticCFG/workflows/I'm[[:space:]]just[[:space:]]throwing[[:space:]]a[[:space:]]few[[:space:]]here[[:space:]]that[[:space:]]I[[:space:]]find[[:space:]]nice/web.png filter=lfs diff=lfs merge=lfs -text
157
+ extensions/ComfyUI-AutomaticCFG/workflows/My[[:space:]]current[[:space:]]go-to[[:space:]]settings.png filter=lfs diff=lfs merge=lfs -text
158
+ extensions/ComfyUI-AutomaticCFG/workflows/Start_by_this_one.png filter=lfs diff=lfs merge=lfs -text
159
+ extensions/ComfyUI-AutomaticCFG/workflows/attention_modifiers_explainations.png filter=lfs diff=lfs merge=lfs -text
160
+ extensions/ComfyUI-AutomaticCFG/workflows/potato[[:space:]]attention[[:space:]]guidance.png filter=lfs diff=lfs merge=lfs -text
161
+ extensions/ComfyUI-AutomaticCFG/workflows/simple[[:space:]]SD[[:space:]]upscale.png filter=lfs diff=lfs merge=lfs -text
162
+ extensions/ComfyUI-nodes-hnmr/examples/workflow_mbw_multi.png filter=lfs diff=lfs merge=lfs -text
163
+ extensions/ComfyUI-nodes-hnmr/examples/workflow_xyz.png filter=lfs diff=lfs merge=lfs -text
164
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/anime_3.png filter=lfs diff=lfs merge=lfs -text
165
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/anime_4.png filter=lfs diff=lfs merge=lfs -text
166
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/canny_1.png filter=lfs diff=lfs merge=lfs -text
167
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/inpaint_before_fix.png filter=lfs diff=lfs merge=lfs -text
168
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/ip2p_1.png filter=lfs diff=lfs merge=lfs -text
169
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/ip2p_2.png filter=lfs diff=lfs merge=lfs -text
170
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/ip2p_3.png filter=lfs diff=lfs merge=lfs -text
171
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/lineart_1.png filter=lfs diff=lfs merge=lfs -text
172
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/lineart_2.png filter=lfs diff=lfs merge=lfs -text
173
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/lineart_3.png filter=lfs diff=lfs merge=lfs -text
174
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/mlsd_1.png filter=lfs diff=lfs merge=lfs -text
175
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/normal_1.png filter=lfs diff=lfs merge=lfs -text
176
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/normal_2.png filter=lfs diff=lfs merge=lfs -text
177
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/scribble_2.png filter=lfs diff=lfs merge=lfs -text
178
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/seg_2.png filter=lfs diff=lfs merge=lfs -text
179
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/shuffle_1.png filter=lfs diff=lfs merge=lfs -text
180
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/shuffle_2.png filter=lfs diff=lfs merge=lfs -text
181
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/softedge_1.png filter=lfs diff=lfs merge=lfs -text
182
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/tile_new_1.png filter=lfs diff=lfs merge=lfs -text
183
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/tile_new_2.png filter=lfs diff=lfs merge=lfs -text
184
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/tile_new_3.png filter=lfs diff=lfs merge=lfs -text
185
+ extensions/DWPose/ControlNet-v1-1-nightly/github_docs/imgs/tile_new_4.png filter=lfs diff=lfs merge=lfs -text
186
+ extensions/DWPose/ControlNet-v1-1-nightly/test_imgs/bird.png filter=lfs diff=lfs merge=lfs -text
187
+ extensions/DWPose/ControlNet-v1-1-nightly/test_imgs/building.png filter=lfs diff=lfs merge=lfs -text
188
+ extensions/DWPose/ControlNet-v1-1-nightly/test_imgs/building2.png filter=lfs diff=lfs merge=lfs -text
189
+ extensions/DWPose/ControlNet-v1-1-nightly/test_imgs/girls.jpg filter=lfs diff=lfs merge=lfs -text
190
+ extensions/DWPose/ControlNet-v1-1-nightly/test_imgs/person-leaves.png filter=lfs diff=lfs merge=lfs -text
191
+ extensions/DWPose/ControlNet-v1-1-nightly/test_imgs/sn.jpg filter=lfs diff=lfs merge=lfs -text
192
+ extensions/DWPose/mmpose/demo/resources/demo_coco.gif filter=lfs diff=lfs merge=lfs -text
193
+ extensions/DWPose/mmpose/tests/data/humanart/2D_virtual_human/digital_art/000000001648.jpg filter=lfs diff=lfs merge=lfs -text
194
+ extensions/DWPose/resources/architecture.jpg filter=lfs diff=lfs merge=lfs -text
195
+ extensions/DWPose/resources/generation.jpg filter=lfs diff=lfs merge=lfs -text
196
+ extensions/DWPose/resources/iron.gif filter=lfs diff=lfs merge=lfs -text
197
+ extensions/DWPose/resources/jay_pose.jpg filter=lfs diff=lfs merge=lfs -text
198
+ extensions/DWPose/resources/lalaland.gif filter=lfs diff=lfs merge=lfs -text
199
+ extensions/OneButtonPrompt/images/background.png filter=lfs diff=lfs merge=lfs -text
200
+ extensions/SD-WebUI-BatchCheckpointPrompt/img/grid.png filter=lfs diff=lfs merge=lfs -text
201
+ extensions/artjiggler/thesaurus.jsonl filter=lfs diff=lfs merge=lfs -text
202
+ extensions/canvas-zoom/dist/templates/frontend/assets/index-0c8f6dbd.js.map filter=lfs diff=lfs merge=lfs -text
203
+ extensions/canvas-zoom/dist/v1_1_v1_5_1/templates/frontend/assets/index-2a280c06.js.map filter=lfs diff=lfs merge=lfs -text
204
+ extensions/hypernetwork-modify/res/example_1.jpg filter=lfs diff=lfs merge=lfs -text
205
+ extensions/hypernetwork-modify/res/example_2.jpg filter=lfs diff=lfs merge=lfs -text
206
+ extensions/latent-upscale/assets/default.png filter=lfs diff=lfs merge=lfs -text
207
+ extensions/latent-upscale/assets/img2img_latent_upscale_process.png filter=lfs diff=lfs merge=lfs -text
208
+ extensions/latent-upscale/assets/nearest-exact-normal1.png filter=lfs diff=lfs merge=lfs -text
209
+ extensions/latent-upscale/assets/nearest-exact-normal2.png filter=lfs diff=lfs merge=lfs -text
210
+ extensions/latent-upscale/assets/nearest-exact-simple1.png filter=lfs diff=lfs merge=lfs -text
211
+ extensions/latent-upscale/assets/nearest-exact-simple2.png filter=lfs diff=lfs merge=lfs -text
212
+ extensions/latent-upscale/assets/nearest-exact-simple8.png filter=lfs diff=lfs merge=lfs -text
213
+ extensions/lazy-pony-prompter/images/ef-showcase.jpg filter=lfs diff=lfs merge=lfs -text
214
+ extensions/lazy-pony-prompter/images/lulu.png filter=lfs diff=lfs merge=lfs -text
215
+ extensions/ponyverse/showcase.jpg filter=lfs diff=lfs merge=lfs -text
216
+ extensions/posex/image/sample-webui2.jpg filter=lfs diff=lfs merge=lfs -text
217
+ extensions/posex/image/sample-webui2.png filter=lfs diff=lfs merge=lfs -text
218
+ extensions/sd-3dmodel-loader/doc/images/depth/depth3.png filter=lfs diff=lfs merge=lfs -text
219
+ extensions/sd-3dmodel-loader/doc/images/sendto/sendto5.png filter=lfs diff=lfs merge=lfs -text
220
+ extensions/sd-3dmodel-loader/models/body/kachujin.fbx filter=lfs diff=lfs merge=lfs -text
221
+ extensions/sd-3dmodel-loader/models/body/vanguard.fbx filter=lfs diff=lfs merge=lfs -text
222
+ extensions/sd-3dmodel-loader/models/body/warrok.fbx filter=lfs diff=lfs merge=lfs -text
223
+ extensions/sd-3dmodel-loader/models/body/ybot.fbx filter=lfs diff=lfs merge=lfs -text
224
+ extensions/sd-3dmodel-loader/models/hand/hand_right.fbx filter=lfs diff=lfs merge=lfs -text
225
+ extensions/sd-3dmodel-loader/models/pose.vrm filter=lfs diff=lfs merge=lfs -text
226
+ extensions/sd-3dmodel-loader/models/pose2.fbx filter=lfs diff=lfs merge=lfs -text
227
+ extensions/sd-canvas-editor/doc/images/overall.png filter=lfs diff=lfs merge=lfs -text
228
+ extensions/sd-canvas-editor/doc/images/panels.png filter=lfs diff=lfs merge=lfs -text
229
+ extensions/sd-canvas-editor/doc/images/photos.png filter=lfs diff=lfs merge=lfs -text
230
+ extensions/sd-model-organizer/pic/readme/logo.png filter=lfs diff=lfs merge=lfs -text
231
+ extensions/sd-promptbook/static/masterpiece[[:space:]]+[[:space:]]unwanted[[:space:]]+[[:space:]]bad[[:space:]]anatomy.png filter=lfs diff=lfs merge=lfs -text
232
+ extensions/sd-webui-3d-open-pose-editor/downloads/pose/0.5.1675469404/pose_solution_packed_assets.data filter=lfs diff=lfs merge=lfs -text
233
+ extensions/sd-webui-Lora-queue-helper/docs/output_sample.png filter=lfs diff=lfs merge=lfs -text
234
+ extensions/sd-webui-agent-scheduler/docs/images/walkthrough.png filter=lfs diff=lfs merge=lfs -text
235
+ extensions/sd-webui-dycfg/images/05.png filter=lfs diff=lfs merge=lfs -text
236
+ extensions/sd-webui-dycfg/images/09.png filter=lfs diff=lfs merge=lfs -text
237
+ extensions/sd-webui-img2txt/sd-webui-img2txt.gif filter=lfs diff=lfs merge=lfs -text
238
+ extensions/sd-webui-inpaint-anything/images/inpaint_anything_ui_image_1.png filter=lfs diff=lfs merge=lfs -text
239
+ extensions/sd-webui-lcm-sampler/images/img2.jpg filter=lfs diff=lfs merge=lfs -text
240
+ extensions/sd-webui-manga-inpainting/manga_inpainting/repo/examples/representative.png filter=lfs diff=lfs merge=lfs -text
241
+ extensions/sd-webui-panorama-tools/images/example_2.jpg filter=lfs diff=lfs merge=lfs -text
242
+ extensions/sd-webui-panorama-tools/images/example_3.jpg filter=lfs diff=lfs merge=lfs -text
243
+ extensions/sd-webui-panorama-tools/images/panorama_tools_ui_screenshot.jpg filter=lfs diff=lfs merge=lfs -text
244
+ extensions/sd-webui-picbatchwork/bin/ebsynth.dll filter=lfs diff=lfs merge=lfs -text
245
+ extensions/sd-webui-picbatchwork/bin/ebsynth.exe filter=lfs diff=lfs merge=lfs -text
246
+ extensions/sd-webui-picbatchwork/img/2.gif filter=lfs diff=lfs merge=lfs -text
247
+ extensions/sd-webui-pixelart/examples/custom_palette_demo.mp4 filter=lfs diff=lfs merge=lfs -text
248
+ extensions/sd-webui-real-image-artifacts/examples/before.png filter=lfs diff=lfs merge=lfs -text
249
+ extensions/sd-webui-rich-text/assets/color.png filter=lfs diff=lfs merge=lfs -text
250
+ extensions/sd-webui-rich-text/assets/font.png filter=lfs diff=lfs merge=lfs -text
251
+ extensions/sd-webui-rich-text/assets/footnote.png filter=lfs diff=lfs merge=lfs -text
252
+ extensions/sd-webui-rich-text/assets/size.png filter=lfs diff=lfs merge=lfs -text
253
+ extensions/sd-webui-samplers-scheduler-for-v1.6/images/example2.png filter=lfs diff=lfs merge=lfs -text
254
+ extensions/sd-webui-samplers-scheduler-for-v1.6/images/example3.png filter=lfs diff=lfs merge=lfs -text
255
+ extensions/sd-webui-sd-webui-DPMPP_2M_Karras_v2/images/sample1.jpg filter=lfs diff=lfs merge=lfs -text
256
+ extensions/sd-webui-state-manager/preview-docked.png filter=lfs diff=lfs merge=lfs -text
257
+ extensions/sd-webui-state-manager/preview-modal.png filter=lfs diff=lfs merge=lfs -text
258
+ extensions/sd-webui-state-manager/toma-chan.png filter=lfs diff=lfs merge=lfs -text
259
+ extensions/sd-webui-timemachine/images/tm_result.png filter=lfs diff=lfs merge=lfs -text
260
+ extensions/sd-webui-udav2/metric_depth/assets/compare_zoedepth.png filter=lfs diff=lfs merge=lfs -text
261
+ extensions/sd-webui-udav2/metric_depth/dataset/splits/hypersim/train.txt filter=lfs diff=lfs merge=lfs -text
262
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/anime/noise0_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
263
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/anime/noise1_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
264
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/anime/noise2_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
265
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/anime/noise3_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
266
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/photo/noise0_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
267
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/photo/noise1_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
268
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/photo/noise2_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
269
+ extensions/sd-webui-waifu2x-upscaler/waifu2x/yu45020/model_check_points/Upconv_7/photo/noise3_scale2.0x_model.json filter=lfs diff=lfs merge=lfs -text
270
+ extensions/sd-webui-xyz-addon/img/Extra-Network-Weight.png filter=lfs diff=lfs merge=lfs -text
271
+ extensions/sd-webui-xyz-addon/img/Multi-Axis-2.png filter=lfs diff=lfs merge=lfs -text
272
+ extensions/sd-webui-xyz-addon/img/Multi-Axis-3.png filter=lfs diff=lfs merge=lfs -text
273
+ extensions/sd-webui-xyz-addon/img/Prompt-SR-Combinations.png filter=lfs diff=lfs merge=lfs -text
274
+ extensions/sd-webui-xyz-addon/img/Prompt-SR-P.png filter=lfs diff=lfs merge=lfs -text
275
+ extensions/sd-webui-xyz-addon/img/Prompt-SR-Permutations-1-2.png filter=lfs diff=lfs merge=lfs -text
276
+ extensions/sd-webui-xyz-addon/img/Prompt-SR-Permutations-2.png filter=lfs diff=lfs merge=lfs -text
277
+ extensions/sd-webui-xyz-addon/img/Prompt-SR-Permutations.png filter=lfs diff=lfs merge=lfs -text
278
+ extensions/sd-webui-xyz-addon/img/Prompt-SR.png filter=lfs diff=lfs merge=lfs -text
279
+ extensions/sd_webui_masactrl/resources/img/xyz_grid-0010-1508457017.png filter=lfs diff=lfs merge=lfs -text
280
+ extensions/sd_webui_masactrl-ash/resources/img/xyz_grid-0010-1508457017.png filter=lfs diff=lfs merge=lfs -text
281
+ extensions/sd_webui_ootdiffusion/preview.png filter=lfs diff=lfs merge=lfs -text
282
+ extensions/sd_webui_realtime_lcm_canvas/preview.png filter=lfs diff=lfs merge=lfs -text
283
+ extensions/sd_webui_sghm/preview.png filter=lfs diff=lfs merge=lfs -text
284
+ extensions/stable-diffusion-webui-dumpunet/images/IN00.jpg filter=lfs diff=lfs merge=lfs -text
285
+ extensions/stable-diffusion-webui-dumpunet/images/IN05.jpg filter=lfs diff=lfs merge=lfs -text
286
+ extensions/stable-diffusion-webui-dumpunet/images/OUT06.jpg filter=lfs diff=lfs merge=lfs -text
287
+ extensions/stable-diffusion-webui-dumpunet/images/OUT11.jpg filter=lfs diff=lfs merge=lfs -text
288
+ extensions/stable-diffusion-webui-dumpunet/images/README_00_01_color.png filter=lfs diff=lfs merge=lfs -text
289
+ extensions/stable-diffusion-webui-dumpunet/images/README_00_01_gray.png filter=lfs diff=lfs merge=lfs -text
290
+ extensions/stable-diffusion-webui-dumpunet/images/README_02.png filter=lfs diff=lfs merge=lfs -text
291
+ extensions/stable-diffusion-webui-dumpunet/images/attn-IN01.png filter=lfs diff=lfs merge=lfs -text
292
+ extensions/stable-diffusion-webui-dumpunet/images/attn-OUT10.png filter=lfs diff=lfs merge=lfs -text
293
+ extensions/stable-diffusion-webui-eyemask/models/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
294
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/highres.png filter=lfs diff=lfs merge=lfs -text
295
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_10x12.png filter=lfs diff=lfs merge=lfs -text
296
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_1x120.png filter=lfs diff=lfs merge=lfs -text
297
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_2x60.png filter=lfs diff=lfs merge=lfs -text
298
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_3x40.png filter=lfs diff=lfs merge=lfs -text
299
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_4x15.png filter=lfs diff=lfs merge=lfs -text
300
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_4x23.png filter=lfs diff=lfs merge=lfs -text
301
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_4x3.png filter=lfs diff=lfs merge=lfs -text
302
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_4x30.png filter=lfs diff=lfs merge=lfs -text
303
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_4x5.png filter=lfs diff=lfs merge=lfs -text
304
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_4x8.png filter=lfs diff=lfs merge=lfs -text
305
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_6x20.png filter=lfs diff=lfs merge=lfs -text
306
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/pg_8x15.png filter=lfs diff=lfs merge=lfs -text
307
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/std_10.png filter=lfs diff=lfs merge=lfs -text
308
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/std_120.png filter=lfs diff=lfs merge=lfs -text
309
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/std_20.png filter=lfs diff=lfs merge=lfs -text
310
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/std_30.png filter=lfs diff=lfs merge=lfs -text
311
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/std_60.png filter=lfs diff=lfs merge=lfs -text
312
+ extensions/stable-diffusion-webui-hires-fix-progressive/img/std_90.png filter=lfs diff=lfs merge=lfs -text
313
+ extensions/stable-diffusion-webui-intm/images/IMAGE.png filter=lfs diff=lfs merge=lfs -text
314
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/03_images/imgPromptHelper/bottom/00106--rebuild.png filter=lfs diff=lfs merge=lfs -text
315
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/03_images/imgPromptHelper/skin/wrinkles-skin.png filter=lfs diff=lfs merge=lfs -text
316
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/03_images/imgPromptHelper/top-bottom/00067-2139881315.png filter=lfs diff=lfs merge=lfs -text
317
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/851CHIKARA-DZUYOKU_kanaA_004/851CHIKARA-DZUYOKU_kanaA_004.ttf filter=lfs diff=lfs merge=lfs -text
318
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/851MkPOP_101/851MkPOP_101.ttf filter=lfs diff=lfs merge=lfs -text
319
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/ChalkJP_3/Chalk-JP.otf filter=lfs diff=lfs merge=lfs -text
320
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/DokiDokiFont2/DokiDokiFantasia.otf filter=lfs diff=lfs merge=lfs -text
321
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/DotGothic16/DotGothic16-Regular.ttf filter=lfs diff=lfs merge=lfs -text
322
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/Klee_One/KleeOne-Regular.ttf filter=lfs diff=lfs merge=lfs -text
323
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/OhisamaFont11/OhisamaFont11.ttf filter=lfs diff=lfs merge=lfs -text
324
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/Rampart_One/RampartOne-Regular.ttf filter=lfs diff=lfs merge=lfs -text
325
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/Stick/Stick-Regular.ttf filter=lfs diff=lfs merge=lfs -text
326
+ extensions/stable-diffusion-webui-simple-manga-maker/SP-MangaEditer/font/Train_One/TrainOne-Regular.ttf filter=lfs diff=lfs merge=lfs -text
327
+ extensions/stable-diffusion-webui-size-travel/img/ddim_advance.png filter=lfs diff=lfs merge=lfs -text
328
+ extensions/stable-diffusion-webui-size-travel/img/ddim_simple.png filter=lfs diff=lfs merge=lfs -text
329
+ extensions/stable-diffusion-webui-size-travel/img/eular_a_advance.png filter=lfs diff=lfs merge=lfs -text
330
+ extensions/stable-diffusion-webui-sonar/img/momentum.png filter=lfs diff=lfs merge=lfs -text
331
+ extensions/stable-diffusion-webui-text2prompt/pic/pic0.png filter=lfs diff=lfs merge=lfs -text
332
+ extensions/stable-diffusion-webui-two-shot/gradio-3.16.2-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
extensions/321Prompt/321Prompt/Readme.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ This is my attempt to integrate this script as an extension into A1111, but it's not working and I don't have time!
2
+ (I'll let you figure out why, with all my apologies but if this piece of code can help you, so much the better)
3
+
4
+ Place this directory "321prompt" with this file in it "__init__.py" in the extension directory of A1111.
5
+ Relaunch A1111 and, it's does'nt work !
6
+
7
+ Hope you do it better !
8
+ Thanks
extensions/321Prompt/321Prompt/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from modules import scripts
3
+
4
+ class PromptGeneratorScript(scripts.Script):
5
+ def title(self):
6
+ return "Prompt Generator"
7
+
8
+ def show(self, is_img2img):
9
+ return True
10
+
11
+ def ui(self, is_img2img):
12
+ start_phrase1 = gr.Textbox(label="Début de la phrase 1", placeholder="Un [ chat: chien: ")
13
+ start_value1 = gr.Number(label="Valeur de départ 1", value=0.0)
14
+ end_value1 = gr.Number(label="Valeur d'arrivée 1", value=1.0)
15
+ end_phrase1 = gr.Textbox(label="Fin de la phrase 1", placeholder="] dans un jardin.")
16
+ num_phrases1 = gr.Number(label="Nombre de phrases 1", value=10)
17
+
18
+ start_phrase2 = gr.Textbox(label="Début de la phrase 2", placeholder="le temps est [ beau: pluvieux: ", optional=True)
19
+ start_value2 = gr.Number(label="Valeur de départ 2", value=0.0, optional=True)
20
+ end_value2 = gr.Number(label="Valeur d'arrivée 2", value=1.0, optional=True)
21
+ end_phrase2 = gr.Textbox(label="Fin de la phrase 2", placeholder="] sur la montagne.", optional=True)
22
+ num_phrases2 = gr.Number(label="Nombre de phrases 2", value=10, optional=True)
23
+
24
+ generate_button = gr.Button("Générer")
25
+ output = gr.Textbox(label="Résultats", lines=10, interactive=False)
26
+
27
+ def generate_phrases(start_phrase1, start_value1, end_value1, end_phrase1, num_phrases1,
28
+ start_phrase2, start_value2, end_value2, end_phrase2, num_phrases2):
29
+ increment1 = (end_value1 - start_value1) / (num_phrases1 - 1)
30
+ increment2 = (end_value2 - start_value2) / (num_phrases2 - 1) if start_phrase2 else 0
31
+
32
+ combined_results = ""
33
+ max_phrases = max(num_phrases1, num_phrases2)
34
+ for i in range(max_phrases):
35
+ current_value1 = start_value1 + (increment1 * i)
36
+ current_value2 = start_value2 + (increment2 * i) if start_phrase2 else ''
37
+ phrase1 = f"{start_phrase1}{current_value1}{end_phrase1}"
38
+ phrase2 = f", {start_phrase2}{current_value2}{end_phrase2}" if start_phrase2 else ''
39
+ combined_results += phrase1 + phrase2 + "\n"
40
+
41
+ return combined_results
42
+
43
+ generate_button.click(fn=generate_phrases,
44
+ inputs=[start_phrase1, start_value1, end_value1, end_phrase1, num_phrases1,
45
+ start_phrase2, start_value2, end_value2, end_phrase2, num_phrases2],
46
+ outputs=output)
47
+
48
+ return [start_phrase1, start_value1, end_value1, end_phrase1, num_phrases1,
49
+ start_phrase2, start_value2, end_value2, end_phrase2, num_phrases2, generate_button, output]
50
+
51
+ scripts.register_script(PromptGeneratorScript())
extensions/321Prompt/321prompt.php ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Générateur de Phrases</title>
5
+ <style>
6
+ .container {
7
+ display: flex;
8
+ flex-wrap: wrap;
9
+ }
10
+ .box {
11
+ flex: 1;
12
+ min-width: 45%;
13
+ padding: 10px;
14
+ box-sizing: border-box;
15
+ }
16
+ textarea {
17
+ width: 100%;
18
+ height: 200px;
19
+ }
20
+ </style>
21
+ </head>
22
+ <body>
23
+
24
+ <h1>Générateur de Phrases</h1>
25
+
26
+ <form method="post" action="">
27
+ <div class="container">
28
+ <!-- Boîte 1 -->
29
+ <div class="box">
30
+ <label for="start_phrase1">Début de la phrase 1 :</label><br>
31
+ <input type="text" id="start_phrase1" name="start_phrase1" value="<?php echo isset($_POST['start_phrase1']) ? htmlspecialchars($_POST['start_phrase1']) : ''; ?>" required><br><br>
32
+
33
+ <label for="start_value1">Valeur de départ :</label><br>
34
+ <input type="number" step="0.01" id="start_value1" name="start_value1" value="<?php echo isset($_POST['start_value1']) ? htmlspecialchars($_POST['start_value1']) : ''; ?>" required><br><br>
35
+
36
+ <label for="end_value1">Valeur d'arrivée :</label><br>
37
+ <input type="number" step="0.01" id="end_value1" name="end_value1" value="<?php echo isset($_POST['end_value1']) ? htmlspecialchars($_POST['end_value1']) : ''; ?>" required><br><br>
38
+
39
+ <label for="end_phrase1">Fin de la phrase 1 :</label><br>
40
+ <input type="text" id="end_phrase1" name="end_phrase1" value="<?php echo isset($_POST['end_phrase1']) ? htmlspecialchars($_POST['end_phrase1']) : ''; ?>" required><br><br>
41
+
42
+ <label for="num_phrases1">Nombre de phrases :</label><br>
43
+ <input type="number" id="num_phrases1" name="num_phrases1" value="<?php echo isset($_POST['num_phrases1']) ? htmlspecialchars($_POST['num_phrases1']) : ''; ?>" required><br><br>
44
+ </div>
45
+
46
+ <!-- Boîte 2 -->
47
+ <div class="box">
48
+ <label for="start_phrase2">Début de la phrase 2 :</label><br>
49
+ <input type="text" id="start_phrase2" name="start_phrase2" value="<?php echo isset($_POST['start_phrase2']) ? htmlspecialchars($_POST['start_phrase2']) : ''; ?>"><br><br>
50
+
51
+ <label for="start_value2">Valeur de départ :</label><br>
52
+ <input type="number" step="0.01" id="start_value2" name="start_value2" value="<?php echo isset($_POST['start_value2']) ? htmlspecialchars($_POST['start_value2']) : ''; ?>"><br><br>
53
+
54
+ <label for="end_value2">Valeur d'arrivée :</label><br>
55
+ <input type="number" step="0.01" id="end_value2" name="end_value2" value="<?php echo isset($_POST['end_value2']) ? htmlspecialchars($_POST['end_value2']) : ''; ?>"><br><br>
56
+
57
+ <label for="end_phrase2">Fin de la phrase 2 :</label><br>
58
+ <input type="text" id="end_phrase2" name="end_phrase2" value="<?php echo isset($_POST['end_phrase2']) ? htmlspecialchars($_POST['end_phrase2']) : ''; ?>"><br><br>
59
+
60
+ <label for="num_phrases2">Nombre de phrases :</label><br>
61
+ <input type="number" id="num_phrases2" name="num_phrases2" value="<?php echo isset($_POST['num_phrases2']) ? htmlspecialchars($_POST['num_phrases2']) : ''; ?>"><br><br>
62
+ </div>
63
+ </div>
64
+ <input type="submit" value="Générer">
65
+ </form>
66
+
67
+ <?php
68
+ if ($_SERVER["REQUEST_METHOD"] == "POST") {
69
+ // Récupérer les entrées du formulaire pour la première phrase
70
+ $startPhrase1 = $_POST['start_phrase1'];
71
+ $startValue1 = floatval($_POST['start_value1']);
72
+ $endValue1 = floatval($_POST['end_value1']);
73
+ $endPhrase1 = $_POST['end_phrase1'];
74
+ $numPhrases1 = intval($_POST['num_phrases1']);
75
+
76
+ // Récupérer les entrées du formulaire pour la deuxième phrase (si présentes)
77
+ $startPhrase2 = isset($_POST['start_phrase2']) ? $_POST['start_phrase2'] : '';
78
+ $startValue2 = isset($_POST['start_value2']) ? floatval($_POST['start_value2']) : 0;
79
+ $endValue2 = isset($_POST['end_value2']) ? floatval($_POST['end_value2']) : 0;
80
+ $endPhrase2 = isset($_POST['end_phrase2']) ? $_POST['end_phrase2'] : '';
81
+ $numPhrases2 = isset($_POST['num_phrases2']) ? intval($_POST['num_phrases2']) : $numPhrases1;
82
+
83
+ // Calculer l'incrément pour les phrases
84
+ $increment1 = ($endValue1 - $startValue1) / ($numPhrases1 - 1);
85
+ $increment2 = ($startPhrase2 !== '') ? ($endValue2 - $startValue2) / ($numPhrases2 - 1) : 0;
86
+
87
+ // Générer les phrases combinées
88
+ $combinedResults = "";
89
+ $maxPhrases = max($numPhrases1, $numPhrases2);
90
+ for ($i = 0; $i < $maxPhrases; $i++) {
91
+ $currentValue1 = $startValue1 + ($increment1 * $i);
92
+ $currentValue2 = ($startPhrase2 !== '') ? $startValue2 + ($increment2 * $i) : '';
93
+ $phrase1 = $startPhrase1 . $currentValue1 . $endPhrase1;
94
+ $phrase2 = ($startPhrase2 !== '') ? ' , ' . $startPhrase2 . $currentValue2 . $endPhrase2 : '';
95
+ $combinedResults .= $phrase1 . $phrase2 . "\n";
96
+ }
97
+
98
+ // Afficher les résultats
99
+ echo "<h2>Résultats :</h2>";
100
+ echo '<textarea readonly>' . htmlspecialchars($combinedResults) . '</textarea>';
101
+ }
102
+ ?>
103
+
104
+ </body>
105
+ </html>
extensions/321Prompt/321prompt.png ADDED
extensions/321Prompt/LICENSE ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Creative Commons Legal Code
2
+
3
+ CC0 1.0 Universal
4
+
5
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
6
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
7
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
8
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
9
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
10
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
11
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
12
+ HEREUNDER.
13
+
14
+ Statement of Purpose
15
+
16
+ The laws of most jurisdictions throughout the world automatically confer
17
+ exclusive Copyright and Related Rights (defined below) upon the creator
18
+ and subsequent owner(s) (each and all, an "owner") of an original work of
19
+ authorship and/or a database (each, a "Work").
20
+
21
+ Certain owners wish to permanently relinquish those rights to a Work for
22
+ the purpose of contributing to a commons of creative, cultural and
23
+ scientific works ("Commons") that the public can reliably and without fear
24
+ of later claims of infringement build upon, modify, incorporate in other
25
+ works, reuse and redistribute as freely as possible in any form whatsoever
26
+ and for any purposes, including without limitation commercial purposes.
27
+ These owners may contribute to the Commons to promote the ideal of a free
28
+ culture and the further production of creative, cultural and scientific
29
+ works, or to gain reputation or greater distribution for their Work in
30
+ part through the use and efforts of others.
31
+
32
+ For these and/or other purposes and motivations, and without any
33
+ expectation of additional consideration or compensation, the person
34
+ associating CC0 with a Work (the "Affirmer"), to the extent that he or she
35
+ is an owner of Copyright and Related Rights in the Work, voluntarily
36
+ elects to apply CC0 to the Work and publicly distribute the Work under its
37
+ terms, with knowledge of his or her Copyright and Related Rights in the
38
+ Work and the meaning and intended legal effect of CC0 on those rights.
39
+
40
+ 1. Copyright and Related Rights. A Work made available under CC0 may be
41
+ protected by copyright and related or neighboring rights ("Copyright and
42
+ Related Rights"). Copyright and Related Rights include, but are not
43
+ limited to, the following:
44
+
45
+ i. the right to reproduce, adapt, distribute, perform, display,
46
+ communicate, and translate a Work;
47
+ ii. moral rights retained by the original author(s) and/or performer(s);
48
+ iii. publicity and privacy rights pertaining to a person's image or
49
+ likeness depicted in a Work;
50
+ iv. rights protecting against unfair competition in regards to a Work,
51
+ subject to the limitations in paragraph 4(a), below;
52
+ v. rights protecting the extraction, dissemination, use and reuse of data
53
+ in a Work;
54
+ vi. database rights (such as those arising under Directive 96/9/EC of the
55
+ European Parliament and of the Council of 11 March 1996 on the legal
56
+ protection of databases, and under any national implementation
57
+ thereof, including any amended or successor version of such
58
+ directive); and
59
+ vii. other similar, equivalent or corresponding rights throughout the
60
+ world based on applicable law or treaty, and any national
61
+ implementations thereof.
62
+
63
+ 2. Waiver. To the greatest extent permitted by, but not in contravention
64
+ of, applicable law, Affirmer hereby overtly, fully, permanently,
65
+ irrevocably and unconditionally waives, abandons, and surrenders all of
66
+ Affirmer's Copyright and Related Rights and associated claims and causes
67
+ of action, whether now known or unknown (including existing as well as
68
+ future claims and causes of action), in the Work (i) in all territories
69
+ worldwide, (ii) for the maximum duration provided by applicable law or
70
+ treaty (including future time extensions), (iii) in any current or future
71
+ medium and for any number of copies, and (iv) for any purpose whatsoever,
72
+ including without limitation commercial, advertising or promotional
73
+ purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
74
+ member of the public at large and to the detriment of Affirmer's heirs and
75
+ successors, fully intending that such Waiver shall not be subject to
76
+ revocation, rescission, cancellation, termination, or any other legal or
77
+ equitable action to disrupt the quiet enjoyment of the Work by the public
78
+ as contemplated by Affirmer's express Statement of Purpose.
79
+
80
+ 3. Public License Fallback. Should any part of the Waiver for any reason
81
+ be judged legally invalid or ineffective under applicable law, then the
82
+ Waiver shall be preserved to the maximum extent permitted taking into
83
+ account Affirmer's express Statement of Purpose. In addition, to the
84
+ extent the Waiver is so judged Affirmer hereby grants to each affected
85
+ person a royalty-free, non transferable, non sublicensable, non exclusive,
86
+ irrevocable and unconditional license to exercise Affirmer's Copyright and
87
+ Related Rights in the Work (i) in all territories worldwide, (ii) for the
88
+ maximum duration provided by applicable law or treaty (including future
89
+ time extensions), (iii) in any current or future medium and for any number
90
+ of copies, and (iv) for any purpose whatsoever, including without
91
+ limitation commercial, advertising or promotional purposes (the
92
+ "License"). The License shall be deemed effective as of the date CC0 was
93
+ applied by Affirmer to the Work. Should any part of the License for any
94
+ reason be judged legally invalid or ineffective under applicable law, such
95
+ partial invalidity or ineffectiveness shall not invalidate the remainder
96
+ of the License, and in such case Affirmer hereby affirms that he or she
97
+ will not (i) exercise any of his or her remaining Copyright and Related
98
+ Rights in the Work or (ii) assert any associated claims and causes of
99
+ action with respect to the Work, in either case contrary to Affirmer's
100
+ express Statement of Purpose.
101
+
102
+ 4. Limitations and Disclaimers.
103
+
104
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
105
+ surrendered, licensed or otherwise affected by this document.
106
+ b. Affirmer offers the Work as-is and makes no representations or
107
+ warranties of any kind concerning the Work, express, implied,
108
+ statutory or otherwise, including without limitation warranties of
109
+ title, merchantability, fitness for a particular purpose, non
110
+ infringement, or the absence of latent or other defects, accuracy, or
111
+ the present or absence of errors, whether or not discoverable, all to
112
+ the greatest extent permissible under applicable law.
113
+ c. Affirmer disclaims responsibility for clearing rights of other persons
114
+ that may apply to the Work or any use thereof, including without
115
+ limitation any person's Copyright and Related Rights in the Work.
116
+ Further, Affirmer disclaims responsibility for obtaining any necessary
117
+ consents, permissions or other rights required for any use of the
118
+ Work.
119
+ d. Affirmer understands and acknowledges that Creative Commons is not a
120
+ party to this document and has no duty or obligation with respect to
121
+ this CC0 or use of the Work.
extensions/321Prompt/README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 321Prompt
2
+ "321prompt.php" is a php script to generate prompts for A1111. (stable diffusion)
3
+ This involves creating a prompt text file with incremented weights to put in the A1111 interface: "Script/Prompts from file or textbox".
4
+
5
+ the PHP script works on a classic web server and I tried to make an extension for A1111 but one thing escapes me (it's buggy).
6
+ Do with it what you want!
7
+
8
+ You will be able to generate a prompt text of this type in 3-4 clicks:
9
+
10
+ a [cat: dog:0] in the garden , it's [Rainy: sunny: 1 ] Outside
11
+
12
+ a [cat: dog:0.125] in the garden , it's [Rainy: sunny: 0.875 ] Outside
13
+
14
+ a [cat: dog:0.25] in the garden , it's [Rainy: sunny: 0.75 ] Outside
15
+
16
+ a [cat: dog:0.375] in the garden , it's [Rainy: sunny: 0.625 ] Outside
17
+
18
+ a [cat: dog:0.5] in the garden , it's [Rainy: sunny: 0.5 ] Outside
19
+
20
+ a [cat: dog:0.625] in the garden , it's [Rainy: sunny: 0.375 ] Outside
21
+
22
+ a [cat: dog:0.75] in the garden , it's [Rainy: sunny: 0.25 ] Outside
23
+
24
+ a [cat: dog:0.875] in the garden , it's [Rainy: sunny: 0.125 ] Outside
25
+
26
+ a [cat: dog:1] in the garden , it's [Rainy: sunny: 0 ] Outside
27
+
28
+
29
+ Look at the screenshot (321prompt.png) for understand how to play with this script !
30
+
31
+ It's work with one sentence (sentence 1, and you do not have to fulfill sentence 2 !)
32
+ But you can fullfill sentence 1 + sentence 2 for more fun ! it's work !
33
+ And you can increase or decrease the weight for sentence 1 or increase or decrease weight for sentence 2.
34
+ Bye !
35
+
36
+ Demo here: [https://lostcantina.com/321prompt.php](https://lostcantina.com/321prompt.php)
37
+ (sorry, i'm french but you can translate easily with Chrome !)
38
+
39
+
40
+
41
+
extensions/ABG_extension/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ scripts/__pycache__
extensions/ABG_extension/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h3 align="center">
2
+ <b>ABG extension</b>
3
+ </h3>
4
+
5
+ <p align="center">
6
+ <a href="https://github.com/KutsuyaYuki/ABG_extension/stargazers"><img src="https://img.shields.io/github/stars/KutsuyaYuki/ABG_extension?style=for-the-badge"></a>
7
+ <a href="https://github.com/KutsuyaYuki/ABG_extension/issues"><img src="https://img.shields.io/github/issues/KutsuyaYuki/ABG_extension?style=for-the-badge"></a>
8
+ <a href="https://github.com/KutsuyaYuki/ABG_extension/commits/main"><img src="https://img.shields.io/github/last-commit/KutsuyaYuki/ABG_extension?style=for-the-badge"></a>
9
+ </p>
10
+
11
+ ## Installation
12
+
13
+ 1. Install extension by going to Extensions tab -> Install from URL -> Paste github URL and click Install.
14
+ 2. After it's installed, go back to the Installed tab in Extensions and press Apply and restart UI.
15
+ 3. Installation finished.
16
+ 4. If the script does not show up or work, please restart the WebUI.
17
+
18
+ ## Usage
19
+
20
+ ### txt2img
21
+
22
+ 1. In the bottom of the WebUI in Script, select **ABG Remover**.
23
+ 2. Select the desired options: **Only save background free pictures** or **Do not auto save**.
24
+ 3. Generate an image and you will see the result in the output area.
25
+
26
+ ### img2img
27
+
28
+ 1. In the bottom of the WebUI in Script, select **ABG Remover**.
29
+ 2. Select the desired options: **Only save background free pictures** or **Do not auto save**.
30
+ 3. **IMPORTANT**: Set **Denoising strength** to a low value, like **0.01**
31
+
32
+ Based on https://huggingface.co/spaces/skytnt/anime-remove-background
extensions/ABG_extension/install.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import launch
2
+
3
+ for dep in ['onnx', 'onnxruntime', 'numpy']:
4
+ if not launch.is_installed(dep):
5
+ launch.run_pip(f"install {dep}", f"{dep} for ABG_extension")
6
+
7
+ if not launch.is_installed("cv2"):
8
+ launch.run_pip("install opencv-python", "opencv-python")
9
+
10
+ if not launch.is_installed("PIL"):
11
+ launch.run_pip("install Pillow", "Pillow")
extensions/ABG_extension/scripts/__pycache__/app.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
extensions/ABG_extension/scripts/app.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import modules.scripts as scripts
3
+ from modules import images
4
+ from modules.processing import process_images, Processed
5
+ from modules.processing import Processed
6
+ from modules.shared import opts, cmd_opts, state
7
+
8
+ import gradio as gr
9
+ import huggingface_hub
10
+ import onnxruntime as rt
11
+ import copy
12
+ import numpy as np
13
+ import cv2
14
+ from PIL import Image as im, ImageDraw
15
+
16
+
17
+ # Declare Execution Providers
18
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
19
+
20
+ # Download and host the model
21
+ model_path = huggingface_hub.hf_hub_download(
22
+ "skytnt/anime-seg", "isnetis.onnx")
23
+ rmbg_model = rt.InferenceSession(model_path, providers=providers)
24
+
25
+ # Function to get mask
26
+ def get_mask(img, s=1024):
27
+ # Resize the img to a square shape with dimension s
28
+ # Convert img pixel values from integers 0-255 to float 0-1
29
+ img = (img / 255).astype(np.float32)
30
+ # get the amount of dimensions of img
31
+ dim = img.shape[2]
32
+ # Convert the input image to RGB format if it has an alpha channel
33
+ if dim == 4:
34
+ img = img[..., :3]
35
+ dim = 3
36
+ # Get height and width of the image
37
+ h, w = h0, w0 = img.shape[:-1]
38
+ # IF height is greater than width, set h as s and w as s*width/height
39
+ # ELSE, set w as s and h as s*height/width
40
+ h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
41
+ # Calculate padding for height and width
42
+ ph, pw = s - h, s - w
43
+ # Create a 1024x1024x3 array of 0's
44
+ img_input = np.zeros([s, s, dim], dtype=np.float32)
45
+ # Resize the original image to (w,h) and then pad with the calculated ph,pw
46
+ img_input[ph // 2:ph // 2 + h, pw //
47
+ 2:pw // 2 + w] = cv2.resize(img, (w, h))
48
+ # Change the axes
49
+ img_input = np.transpose(img_input, (2, 0, 1))
50
+ # Add an extra axis (1,0)
51
+ img_input = img_input[np.newaxis, :]
52
+ # Run the model to get the mask
53
+ mask = rmbg_model.run(None, {'img': img_input})[0][0]
54
+ # Transpose axis
55
+ mask = np.transpose(mask, (1, 2, 0))
56
+ # Crop it to the images original dimensions (h0,w0)
57
+ mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
58
+ # Resize the mask to original image size (h0,w0)
59
+ mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
60
+ return mask
61
+
62
+ ### Function to remove background
63
+ def rmbg_fn(img):
64
+ # Call get_mask() to get the mask
65
+ mask = get_mask(img)
66
+ # Multiply the image and the mask together to get the output image
67
+ img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
68
+ # Convert mask value back to int 0-255
69
+ mask = (mask * 255).astype(np.uint8)
70
+ # Concatenate the output image and mask
71
+ img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
72
+ # Stacking 3 identical copies of the mask for displaying
73
+ mask = mask.repeat(3, axis=2)
74
+ return mask, img
75
+
76
+
77
+ class Script(scripts.Script):
78
+ is_txt2img = False
79
+
80
+ # Function to set title
81
+ def title(self):
82
+ return "ABG Remover"
83
+
84
+ def ui(self, is_img2img):
85
+
86
+ with gr.Column():
87
+ only_save_background_free_pictures = gr.Checkbox(
88
+ label='Only save background free pictures')
89
+ do_not_auto_save = gr.Checkbox(label='Do not auto save')
90
+ with gr.Row():
91
+ custom_background = gr.Checkbox(label='Custom Background')
92
+ custom_background_color = gr.ColorPicker(
93
+ label='Background Color', default='#ff0000')
94
+ custom_background_random = gr.Checkbox(
95
+ label='Random Custom Background')
96
+
97
+ return [only_save_background_free_pictures, do_not_auto_save, custom_background, custom_background_color, custom_background_random]
98
+
99
+ # Function to show the script
100
+ def show(self, is_img2img):
101
+ return True
102
+
103
+ # Function to run the script
104
+ def run(self, p, only_save_background_free_pictures, do_not_auto_save, custom_background, custom_background_color, custom_background_random):
105
+ # If only_save_background_free_pictures is true, set do_not_save_samples to true
106
+ if only_save_background_free_pictures:
107
+ p.do_not_save_samples = True
108
+
109
+ # Create a process_images object
110
+ proc = process_images(p)
111
+
112
+ has_grid = False
113
+
114
+ unwanted_grid_because_of_img_count = len(
115
+ proc.images) < 2 and opts.grid_only_if_multiple
116
+ if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
117
+ has_grid = True
118
+
119
+ # Loop through all the images in proc
120
+ for i in range(len(proc.images)):
121
+ # Separate the background from the foreground
122
+ nmask, nimg = rmbg_fn(np.array(proc.images[i]))
123
+
124
+ # Check the number of channels in the nimg array, select only the first 3 or 4 channels
125
+ num_channels = nimg.shape[2]
126
+ if num_channels > 4:
127
+ nimg = nimg[:, :, :4]
128
+
129
+ # Ensure the data type is uint8 and convert the image back to a format that can be saved
130
+ nimg = nimg.astype(np.uint8)
131
+ img = im.fromarray(nimg)
132
+
133
+ # If only_save_background_free_pictures is true, check if the image has a background
134
+ if custom_background or custom_background_random:
135
+ # If custom_background_random is true, set the background color to a random color
136
+ if custom_background_random:
137
+ custom_background_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
138
+
139
+ # Create a new image with the same size as the original image
140
+ background = im.new('RGBA', img.size, custom_background_color)
141
+
142
+ # Draw a colored rectangle onto the new image
143
+ draw = ImageDraw.Draw(background)
144
+ draw.rectangle([(0, 0), img.size],
145
+ fill=custom_background_color)
146
+
147
+ # Merge the two images
148
+ img = im.alpha_composite(background, img)
149
+
150
+ # determine output path
151
+ outpath = p.outpath_grids if has_grid and i == 0 else p.outpath_samples
152
+
153
+ # If we are saving all images, save the mask and the image
154
+ if not only_save_background_free_pictures:
155
+ mask = im.fromarray(nmask)
156
+ # Dot not save the new images if checkbox is checked
157
+ if not do_not_auto_save:
158
+ # Save the new images
159
+ images.save_image(
160
+ mask, outpath, "mask_", proc.seed + i, proc.prompt, "png", info=proc.info, p=p)
161
+ images.save_image(
162
+ img, outpath, "img_", proc.seed + i, proc.prompt, "png", info=proc.info, p=p)
163
+ # Add the images to the proc object
164
+ proc.images.append(mask)
165
+ proc.images.append(img)
166
+ # If we are only saving background-free images, save the image and replace it in the proc object
167
+ else:
168
+ proc.images[i] = img
169
+
170
+ # Check if automatic saving is enabled
171
+ if not do_not_auto_save:
172
+ # Check if the image is the first one and has a grid
173
+ if has_grid and i == 0:
174
+ # Save the image
175
+ images.save_image(img, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0],
176
+ opts.grid_format, info=proc.info, short_filename=not opts.grid_extended_filename, p=p)
177
+ else:
178
+ # Save the image
179
+ images.save_image(img, outpath, "", proc.seed,
180
+ proc.prompt, "png", info=proc.info, p=p)
181
+
182
+ # Return the proc object
183
+ return proc
extensions/AdverseCleanerExtension/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ **scripts/__pycache__/
2
+ **dist
extensions/AdverseCleanerExtension/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
extensions/AdverseCleanerExtension/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # AdverseCleaner Extension for AUTOMATIC1111/stable-diffusion-webui
2
+ This extension provides a simple and easy-to-use way to denoise images using the cv2 bilateral filter and guided filter.
3
+ Original script by: https://github.com/lllyasviel/AdverseCleaner
4
+ ## Usage
5
+ Select AdverseCleaner as your active script
6
+ ![image](https://user-images.githubusercontent.com/2740936/226215666-17d876fa-b314-4457-9a9f-9bd1d8cf7561.png)
7
+ You will then have access to all the parameters for both filter steps
8
+ ![image](https://user-images.githubusercontent.com/2740936/226215696-962efbb5-7ce8-4ea8-a94d-d1c32561796b.png)
extensions/AdverseCleanerExtension/install.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import launch
2
+
3
+ if not launch.is_installed("opencv-contrib-python"):
4
+ if launch.is_installed("opencv-python"):
5
+ launch.run_pip("uninstall opencv-python")
6
+ print("Uninstalling opencv-python...")
7
+ launch.run_pip("install opencv-contrib-python")
8
+ print("Installing opencv-contrib-python...")
9
+
10
+ if not launch.is_installed("numpy"):
11
+ launch.run_pip("install numpy")
12
+ print("Installing numpy...")
extensions/AdverseCleanerExtension/scripts/__pycache__/denoise.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
extensions/AdverseCleanerExtension/scripts/denoise.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import numpy as np
4
+ import cv2
5
+ import modules.scripts as scripts
6
+
7
+ from modules import images
8
+ from modules.shared import opts
9
+
10
+ from cv2.ximgproc import guidedFilter
11
+ from modules.processing import process_images
12
+
13
+
14
+ class Script(scripts.Script):
15
+ def title(self):
16
+ return "AdverseCleaner"
17
+
18
+ def show(self, is_img2img):
19
+ return True
20
+
21
+ def ui(self, is_img2img):
22
+ info = gr.Markdown('''
23
+ ### Bilateral Filter
24
+ ''')
25
+ bilateral_steps = gr.Slider(minimum=1, maximum=128, step=1,
26
+ value=64, label="Bilateral Steps")
27
+ diameter = gr.Slider(minimum=1, maximum=30, step=1,
28
+ value=5, label="Diameter")
29
+ sigma_color = gr.Slider(minimum=1, maximum=30,
30
+ step=1, value=8, label="SigmaColor")
31
+ sigma_space = gr.Slider(minimum=1, maximum=30,
32
+ step=1, value=8, label="SigmaSpace")
33
+ info2 = gr.Markdown('''
34
+ ### Guided Filter
35
+ ''')
36
+ guided_steps = gr.Slider(minimum=1, maximum=64, step=1,
37
+ value=4, label="Guided Steps")
38
+ radius = gr.Slider(minimum=1, maximum=30, step=1,
39
+ value=4, label="Radius")
40
+ eps = gr.Slider(minimum=1, maximum=30, step=1,
41
+ value=16, label="Accuracy")
42
+ return [info, bilateral_steps, diameter, sigma_color, sigma_space, info2, guided_steps, radius, eps]
43
+
44
+ def run(self, p, _, bilateral_steps, diameter, sigma_color, sigma_space, __, guided_steps, radius, eps):
45
+ from PIL import Image
46
+ has_grid = False
47
+
48
+ proc = process_images(p)
49
+ unwanted_grid_because_of_img_count = len(
50
+ proc.images) < 2 and opts.grid_only_if_multiple
51
+ if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
52
+ has_grid = True
53
+
54
+
55
+ def process(im,i):
56
+ outpath = p.outpath_grids if has_grid and i == 0 else p.outpath_samples
57
+ img = cv2.cvtColor(
58
+ np.array(im), cv2.COLOR_RGB2BGR).astype(np.float32)
59
+ y = img.copy()
60
+ for _ in range(bilateral_steps):
61
+ y = cv2.bilateralFilter(y, diameter, sigma_color, sigma_space)
62
+
63
+ for _ in range(guided_steps):
64
+ y = guidedFilter(img, y, radius, eps)
65
+
66
+ out_image = Image.fromarray(cv2.cvtColor(
67
+ y.clip(0, 255).astype(np.uint8), cv2.COLOR_BGR2RGB))
68
+ images.save_image(out_image, outpath, "img_", proc.seed +
69
+ i, proc.prompt, "png", info=proc.info, p=p)
70
+ return out_image
71
+
72
+ for i in range(len(proc.images)):
73
+ proc.images.append(process(proc.images[i], i))
74
+ return proc
extensions/Automatic1111-Geeky-Remb/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 GeekyGhost
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
extensions/Automatic1111-Geeky-Remb/README.md ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GeekyRemB: Advanced Background Removal for Automatic1111 Web UI
2
+
3
+ ![download](https://github.com/user-attachments/assets/9a23a8aa-9ab8-4c6a-ae1a-44879b4a696d)
4
+
5
+ ## Overview
6
+
7
+ GeekyRemB is a powerful extension for Automatic1111 that provides advanced background removal and image/video manipulation capabilities. It is a port of the ComfyUI node, bringing its functionality to the Automatic1111 environment. The extension allows users to remove backgrounds from images and videos, apply various effects, and manipulate foreground elements with precision.
8
+
9
+ ## Key Features
10
+
11
+ - Background removal for images and videos
12
+ - Support for multiple background removal models
13
+ - Chroma key functionality
14
+ - Foreground manipulation (scaling, rotation, positioning)
15
+ - Various image effects (edge detection, shadow, color adjustments)
16
+ - Mask processing options
17
+ - Custom output dimensions
18
+ - Video background support
19
+
20
+ ## How to Use
21
+
22
+ 1. Install the extension by placing the `geeky_remb.py` file in the `scripts` folder of your Automatic1111 installation.
23
+ 2. Restart Automatic1111 or reload the UI.
24
+ 3. Navigate to the "GeekyRemB" tab in the Automatic1111 interface.
25
+ 4. Choose your input type (Image or Video) and upload your content.
26
+ 5. Adjust the settings as needed (described in detail below).
27
+ 6. Click the "Run GeekyRemB" button to process your input.
28
+
29
+ ## UI Settings and Their Functions
30
+
31
+ ### Input/Output Settings
32
+
33
+ - **Input Type**: Choose between "Image" or "Video" as your input source.
34
+ - **Foreground Image/Video**: Upload your image or video file to be processed.
35
+ - **Output Type**: Select whether you want the output to be an "Image" or "Video".
36
+
37
+ ### Foreground Adjustments
38
+
39
+ - **Scale**: Adjust the size of the foreground element (0.1 to 5.0).
40
+ - **Aspect Ratio**: Modify the aspect ratio of the foreground (0.1 to 10.0).
41
+ - **X Position**: Move the foreground horizontally (-1000 to 1000 pixels).
42
+ - **Y Position**: Move the foreground vertically (-1000 to 1000 pixels).
43
+ - **Rotation**: Rotate the foreground (-360 to 360 degrees).
44
+ - **Opacity**: Adjust the transparency of the foreground (0.0 to 1.0).
45
+ - **Flip Horizontal**: Mirror the foreground horizontally.
46
+ - **Flip Vertical**: Mirror the foreground vertically.
47
+
48
+ ### Background Options
49
+
50
+ - **Remove Background**: Toggle background removal on/off.
51
+ - **Background Mode**: Choose between "transparent", "color", "image", or "video" backgrounds.
52
+ - **Background Color**: Select a color when "color" mode is chosen.
53
+ - **Background Image**: Upload an image to use as the background.
54
+ - **Background Video**: Upload a video to use as the background.
55
+
56
+ ### Advanced Settings
57
+
58
+ #### Removal Settings
59
+
60
+ - **Model**: Select the background removal model (e.g., "u2net", "isnet-general-use").
61
+ - **Output Format**: Choose between "RGBA" (with transparency) or "RGB".
62
+ - **Alpha Matting**: Enable for improved edge detection in complex images.
63
+ - **Alpha Matting Foreground Threshold**: Adjust sensitivity for foreground detection (0-255).
64
+ - **Alpha Matting Background Threshold**: Adjust sensitivity for background detection (0-255).
65
+ - **Post Process Mask**: Apply additional processing to the generated mask.
66
+
67
+ #### Chroma Key Settings
68
+
69
+ - **Chroma Key**: Choose a color ("none", "green", "blue", "red") for chroma keying.
70
+ - **Chroma Threshold**: Adjust the sensitivity of the chroma key effect (0-255).
71
+ - **Color Tolerance**: Set the range of colors to be considered part of the chroma key (0-255).
72
+
73
+ #### Effects
74
+
75
+ - **Invert Mask**: Invert the generated mask.
76
+ - **Feather Amount**: Soften the edges of the mask (0-100).
77
+ - **Edge Detection**: Apply an edge detection effect.
78
+ - **Edge Thickness**: Adjust the thickness of detected edges (1-10).
79
+ - **Edge Color**: Choose the color for detected edges.
80
+ - **Shadow**: Add a shadow effect to the foreground.
81
+ - **Shadow Blur**: Adjust the blurriness of the shadow (0-20).
82
+ - **Shadow Opacity**: Set the transparency of the shadow (0.0-1.0).
83
+ - **Color Adjustment**: Enable color adjustments for the result.
84
+ - **Brightness**: Adjust the brightness of the result (0.0-2.0).
85
+ - **Contrast**: Adjust the contrast of the result (0.0-2.0).
86
+ - **Saturation**: Adjust the color saturation of the result (0.0-2.0).
87
+ - **Mask Blur**: Apply blur to the mask (0-100).
88
+ - **Mask Expansion**: Expand or contract the mask (-100 to 100).
89
+
90
+ ### Output Settings
91
+
92
+ - **Image Format**: Choose the output format for images (PNG, JPEG, WEBP).
93
+ - **Video Format**: Select the output format for videos (MP4, AVI, MOV).
94
+ - **Video Quality**: Adjust the quality of the output video (0-100).
95
+ - **Use Custom Dimensions**: Enable to specify custom output dimensions.
96
+ - **Custom Width**: Set a custom width for the output.
97
+ - **Custom Height**: Set a custom height for the output.
98
+
99
+ ## Technical Implementation for Developers
100
+
101
+ The GeekyRemB extension is implemented as a Python class (`GeekyRemB`) with several key methods:
102
+
103
+ 1. `__init__`: Initializes the class and prepares for background removal sessions.
104
+
105
+ 2. `apply_chroma_key`: Implements chroma key functionality using OpenCV.
106
+
107
+ 3. `process_mask`: Handles mask processing operations like inversion, feathering, and expansion.
108
+
109
+ 4. `remove_background`: The core method that processes images, removing backgrounds and applying effects.
110
+
111
+ 5. `process_frame`: Processes individual video frames.
112
+
113
+ 6. `process_video`: Handles video processing, including background video integration.
114
+
115
+ The UI is built using Gradio components, with the `on_ui_tabs` function setting up the interface. Key functions include:
116
+
117
+ - `update_input_type`, `update_output_type`, `update_background_mode`, `update_custom_dimensions`: Dynamic UI updates based on user selections.
118
+ - `process_image` and `process_video`: Wrapper functions for image and video processing.
119
+ - `run_geeky_remb`: The main function that orchestrates the entire process based on user inputs.
120
+
121
+ The extension uses libraries like `rembg` for background removal, `PIL` for image processing, `cv2` for video handling, and `numpy` for array operations.
122
+
123
+ Developers can extend the functionality by adding new background removal models, implementing additional effects, or enhancing video processing capabilities. The modular structure allows for easy integration of new features.
124
+
125
+ ## Performance Considerations
126
+
127
+ - Background removal and video processing can be computationally intensive. Consider implementing progress bars or asynchronous processing for better user experience with large files.
128
+ - The extension currently processes videos frame-by-frame. For longer videos, consider implementing batch processing or multi-threading for improved performance.
129
+ - Memory usage can be high when processing large images or videos. Implement memory management techniques for handling large files.
130
+
131
+ ## Future Enhancements
132
+
133
+ Potential areas for improvement include:
134
+ - Support for more background removal models
135
+ - Advanced video editing features (e.g., keyframe animation for foreground properties)
136
+ - Integration with other Automatic1111 extensions or workflows
137
+ - GPU acceleration for video processing
138
+ - Real-time preview for adjustments
139
+
140
+ This extension provides a powerful set of tools for background removal and image/video manipulation, bringing the capabilities of the ComfyUI node to the Automatic1111 environment.
141
+
142
+ ## Troubleshooting
143
+
144
+ - If the background removal is imperfect, try adjusting the alpha matting thresholds or using a different model.
145
+ - For subjects with similar colors to the background, experiment with the chroma key feature in combination with AI removal.
146
+ - If the resulting image looks unnatural, play with the shadow and color adjustment settings to better integrate the subject with the new background.
147
+
148
+ ## Contributing
149
+
150
+ We welcome contributions to GeekyRemB! If you have ideas for improvements or new features, feel free to open an issue or submit a pull request.
151
+
152
+ ## License
153
+
154
+ GeekyRemB is released under the MIT License. Feel free to use, modify, and distribute it as you see fit.
155
+
156
+ ## Acknowledgments
157
+
158
+ GeekyRemB is built upon the excellent [rembg](https://github.com/danielgatis/rembg) library and integrates seamlessly with the Automatic1111 Stable Diffusion Web UI. We're grateful to the developers of these projects for their fantastic work.
159
+
160
+ ---
161
+
162
+ <img width="1247" alt="Screenshot 2024-08-08 123752" src="https://github.com/user-attachments/assets/2491ce81-09a7-4d8a-9bdc-58d48f82dfaf">
163
+
164
+
165
+ <img width="1235" alt="Screenshot 2024-08-08 124700" src="https://github.com/user-attachments/assets/cd672db7-97fe-4c1b-b8a3-2b50fb152d04">
166
+
167
+
168
+ <img width="1238" alt="Screenshot 2024-08-08 123732" src="https://github.com/user-attachments/assets/f7b91764-041e-4ae6-a46a-a81eaa8692c9">
169
+
170
+
171
+
172
+
173
+ We hope you enjoy using GeekyRemB to create stunning images with ease! If you find it useful, consider starring the repository and sharing it with your friends and colleagues.
extensions/Automatic1111-Geeky-Remb/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .geeky_remb import GeekyRemBExtras
2
+
3
+ def __init__():
4
+ return [GeekyRemBExtras()]
extensions/Automatic1111-Geeky-Remb/install.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import launch
2
+
3
+ if not launch.is_installed("rembg"):
4
+ launch.run_pip("install rembg", "requirement for Geeky RemB")
5
+
6
+ if not launch.is_installed("opencv-python"):
7
+ launch.run_pip("install opencv-python", "requirement for Geeky RemB")
extensions/Automatic1111-Geeky-Remb/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ rembg
2
+ numpy
3
+ opencv-python
4
+ Pillow
extensions/Automatic1111-Geeky-Remb/scripts/geeky-remb.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from rembg import remove, new_session
4
+ from PIL import Image, ImageOps, ImageFilter, ImageEnhance
5
+ import cv2
6
+ from tqdm import tqdm
7
+ import gradio as gr
8
+ from modules import script_callbacks, shared
9
+ import torch
10
+ import tempfile
11
+
12
+ class GeekyRemB:
13
+ def __init__(self):
14
+ self.session = None
15
+
16
+ def apply_chroma_key(self, image, color, threshold, color_tolerance=20):
17
+ hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
18
+ if color == "green":
19
+ lower = np.array([40 - color_tolerance, 40, 40])
20
+ upper = np.array([80 + color_tolerance, 255, 255])
21
+ elif color == "blue":
22
+ lower = np.array([90 - color_tolerance, 40, 40])
23
+ upper = np.array([130 + color_tolerance, 255, 255])
24
+ elif color == "red":
25
+ lower = np.array([0, 40, 40])
26
+ upper = np.array([20 + color_tolerance, 255, 255])
27
+ else:
28
+ return np.zeros(image.shape[:2], dtype=np.uint8)
29
+
30
+ mask = cv2.inRange(hsv, lower, upper)
31
+ mask = 255 - cv2.threshold(mask, threshold, 255, cv2.THRESH_BINARY)[1]
32
+ return mask
33
+
34
+ def process_mask(self, mask, invert_mask, feather_amount, mask_blur, mask_expansion):
35
+ if invert_mask:
36
+ mask = 255 - mask
37
+
38
+ if mask_expansion != 0:
39
+ kernel = np.ones((abs(mask_expansion), abs(mask_expansion)), np.uint8)
40
+ if mask_expansion > 0:
41
+ mask = cv2.dilate(mask, kernel, iterations=1)
42
+ else:
43
+ mask = cv2.erode(mask, kernel, iterations=1)
44
+
45
+ if feather_amount > 0:
46
+ mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=feather_amount)
47
+
48
+ if mask_blur > 0:
49
+ mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=mask_blur)
50
+
51
+ return mask
52
+
53
+ def remove_background(self, image, background_image, model, alpha_matting, alpha_matting_foreground_threshold,
54
+ alpha_matting_background_threshold, post_process_mask, chroma_key, chroma_threshold,
55
+ color_tolerance, background_mode, background_color, output_format="RGBA",
56
+ invert_mask=False, feather_amount=0, edge_detection=False,
57
+ edge_thickness=1, edge_color="#FFFFFF", shadow=False, shadow_blur=5,
58
+ shadow_opacity=0.5, color_adjustment=False, brightness=1.0, contrast=1.0,
59
+ saturation=1.0, x_position=0, y_position=0, rotation=0, opacity=1.0,
60
+ flip_horizontal=False, flip_vertical=False, mask_blur=0, mask_expansion=0,
61
+ foreground_scale=1.0, foreground_aspect_ratio=None, remove_bg=True,
62
+ use_custom_dimensions=False, custom_width=None, custom_height=None,
63
+ output_dimension_source="Foreground"):
64
+ if self.session is None or self.session.model_name != model:
65
+ self.session = new_session(model)
66
+
67
+ bg_color = tuple(int(background_color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4)) + (255,)
68
+ edge_color = tuple(int(edge_color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))
69
+
70
+ pil_image = image if isinstance(image, Image.Image) else Image.fromarray(np.clip(255. * image[0].cpu().numpy(), 0, 255).astype(np.uint8))
71
+ original_image = np.array(pil_image)
72
+
73
+ if chroma_key != "none":
74
+ chroma_mask = self.apply_chroma_key(original_image, chroma_key, chroma_threshold, color_tolerance)
75
+ input_mask = chroma_mask
76
+ else:
77
+ input_mask = None
78
+
79
+ if remove_bg:
80
+ removed_bg = remove(
81
+ pil_image,
82
+ session=self.session,
83
+ alpha_matting=alpha_matting,
84
+ alpha_matting_foreground_threshold=alpha_matting_foreground_threshold,
85
+ alpha_matting_background_threshold=alpha_matting_background_threshold,
86
+ post_process_mask=post_process_mask,
87
+ )
88
+ rembg_mask = np.array(removed_bg)[:,:,3]
89
+ else:
90
+ removed_bg = pil_image.convert("RGBA")
91
+ rembg_mask = np.full(pil_image.size[::-1], 255, dtype=np.uint8)
92
+
93
+ if input_mask is not None:
94
+ final_mask = cv2.bitwise_and(rembg_mask, input_mask)
95
+ else:
96
+ final_mask = rembg_mask
97
+
98
+ final_mask = self.process_mask(final_mask, invert_mask, feather_amount, mask_blur, mask_expansion)
99
+
100
+ orig_width, orig_height = pil_image.size
101
+ bg_width, bg_height = background_image.size if background_image else (orig_width, orig_height)
102
+
103
+ if use_custom_dimensions and custom_width and custom_height:
104
+ output_width, output_height = int(custom_width), int(custom_height)
105
+ elif output_dimension_source == "Background" and background_image:
106
+ output_width, output_height = bg_width, bg_height
107
+ else:
108
+ output_width, output_height = orig_width, orig_height
109
+
110
+ new_width = int(orig_width * foreground_scale)
111
+ if foreground_aspect_ratio is not None:
112
+ new_height = int(new_width / foreground_aspect_ratio)
113
+ else:
114
+ new_height = int(orig_height * foreground_scale)
115
+
116
+ fg_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
117
+ fg_mask = Image.fromarray(final_mask).resize((new_width, new_height), Image.LANCZOS)
118
+
119
+ if background_mode == "transparent":
120
+ result = Image.new("RGBA", (output_width, output_height), (0, 0, 0, 0))
121
+ elif background_mode == "color":
122
+ result = Image.new("RGBA", (output_width, output_height), bg_color)
123
+ else: # background_mode == "image"
124
+ if background_image is not None:
125
+ result = background_image.resize((output_width, output_height), Image.LANCZOS).convert("RGBA")
126
+ else:
127
+ result = Image.new("RGBA", (output_width, output_height), (0, 0, 0, 0))
128
+
129
+ if flip_horizontal:
130
+ fg_image = fg_image.transpose(Image.FLIP_LEFT_RIGHT)
131
+ fg_mask = fg_mask.transpose(Image.FLIP_LEFT_RIGHT)
132
+ if flip_vertical:
133
+ fg_image = fg_image.transpose(Image.FLIP_TOP_BOTTOM)
134
+ fg_mask = fg_mask.transpose(Image.FLIP_TOP_BOTTOM)
135
+
136
+ fg_image = fg_image.rotate(rotation, resample=Image.BICUBIC, expand=True)
137
+ fg_mask = fg_mask.rotate(rotation, resample=Image.BICUBIC, expand=True)
138
+
139
+ paste_x = x_position + (output_width - fg_image.width) // 2
140
+ paste_y = y_position + (output_height - fg_image.height) // 2
141
+
142
+ fg_rgba = fg_image.convert("RGBA")
143
+ fg_with_opacity = Image.new("RGBA", fg_rgba.size, (0, 0, 0, 0))
144
+ for x in range(fg_rgba.width):
145
+ for y in range(fg_rgba.height):
146
+ r, g, b, a = fg_rgba.getpixel((x, y))
147
+ fg_with_opacity.putpixel((x, y), (r, g, b, int(a * opacity)))
148
+
149
+ fg_mask_with_opacity = fg_mask.point(lambda p: int(p * opacity))
150
+
151
+ result.paste(fg_with_opacity, (paste_x, paste_y), fg_mask_with_opacity)
152
+
153
+ if edge_detection:
154
+ edge_mask = cv2.Canny(np.array(fg_mask), 100, 200)
155
+ edge_mask = cv2.dilate(edge_mask, np.ones((edge_thickness, edge_thickness), np.uint8), iterations=1)
156
+ edge_overlay = Image.new("RGBA", (output_width, output_height), (0, 0, 0, 0))
157
+ edge_overlay.paste(Image.new("RGB", fg_image.size, edge_color), (paste_x, paste_y), Image.fromarray(edge_mask))
158
+ result = Image.alpha_composite(result, edge_overlay)
159
+
160
+ if shadow:
161
+ shadow_mask = fg_mask.filter(ImageFilter.GaussianBlur(shadow_blur))
162
+ shadow_image = Image.new("RGBA", (output_width, output_height), (0, 0, 0, 0))
163
+ shadow_image.paste((0, 0, 0, int(255 * shadow_opacity)), (paste_x, paste_y), shadow_mask)
164
+ result = Image.alpha_composite(result, shadow_image.filter(ImageFilter.GaussianBlur(shadow_blur)))
165
+
166
+ if color_adjustment:
167
+ enhancer = ImageEnhance.Brightness(result)
168
+ result = enhancer.enhance(brightness)
169
+ enhancer = ImageEnhance.Contrast(result)
170
+ result = enhancer.enhance(contrast)
171
+ enhancer = ImageEnhance.Color(result)
172
+ result = enhancer.enhance(saturation)
173
+
174
+ if output_format == "RGB":
175
+ result = result.convert("RGB")
176
+
177
+ return result, fg_mask
178
+
179
+ def process_frame(self, frame, *args):
180
+ pil_frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
181
+ processed_frame, _ = self.remove_background(pil_frame, *args)
182
+ return cv2.cvtColor(np.array(processed_frame), cv2.COLOR_RGB2BGR)
183
+
184
+ def process_video(self, input_path, output_path, background_video_path, *args):
185
+ cap = cv2.VideoCapture(input_path)
186
+ fps = cap.get(cv2.CAP_PROP_FPS)
187
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
188
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
189
+
190
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
191
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
192
+
193
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
194
+
195
+ if background_video_path:
196
+ bg_cap = cv2.VideoCapture(background_video_path)
197
+ bg_total_frames = int(bg_cap.get(cv2.CAP_PROP_FRAME_COUNT))
198
+
199
+ for frame_num in tqdm(range(total_frames), desc="Processing video"):
200
+ ret, frame = cap.read()
201
+ if not ret:
202
+ break
203
+
204
+ if background_video_path:
205
+ bg_frame_num = frame_num % bg_total_frames
206
+ bg_cap.set(cv2.CAP_PROP_POS_FRAMES, bg_frame_num)
207
+ bg_ret, bg_frame = bg_cap.read()
208
+ if bg_ret:
209
+ bg_frame_resized = cv2.resize(bg_frame, (width, height))
210
+ args = list(args)
211
+ args[1] = Image.fromarray(cv2.cvtColor(bg_frame_resized, cv2.COLOR_BGR2RGB))
212
+ args = tuple(args)
213
+
214
+ processed_frame = self.process_frame(frame, *args)
215
+ out.write(processed_frame)
216
+
217
+ cap.release()
218
+ if background_video_path:
219
+ bg_cap.release()
220
+ out.release()
221
+
222
+ # Convert output video to MP4 container
223
+ temp_output = output_path + "_temp.mp4"
224
+ os.rename(output_path, temp_output)
225
+ os.system(f"ffmpeg -i {temp_output} -c copy {output_path}")
226
+ os.remove(temp_output)
227
+
228
+ def on_ui_tabs():
229
+ with gr.Blocks(analytics_enabled=False) as geeky_remb_tab:
230
+ gr.Markdown("# GeekyRemB: Background Removal and Image/Video Manipulation")
231
+
232
+ with gr.Row():
233
+ with gr.Column(scale=1):
234
+ input_type = gr.Radio(["Image", "Video"], label="Input Type", value="Image")
235
+ foreground_input = gr.Image(label="Foreground Image", type="pil", visible=True)
236
+ foreground_video = gr.Video(label="Foreground Video", visible=False)
237
+
238
+ with gr.Group():
239
+ gr.Markdown("### Foreground Adjustments")
240
+ foreground_scale = gr.Slider(label="Scale", minimum=0.1, maximum=5.0, value=1.0, step=0.1)
241
+ foreground_aspect_ratio = gr.Slider(label="Aspect Ratio", minimum=0.1, maximum=10.0, value=1.0, step=0.1)
242
+ x_position = gr.Slider(label="X Position", minimum=-1000, maximum=1000, value=0, step=1)
243
+ y_position = gr.Slider(label="Y Position", minimum=-1000, maximum=1000, value=0, step=1)
244
+ rotation = gr.Slider(label="Rotation", minimum=-360, maximum=360, value=0, step=0.1)
245
+ opacity = gr.Slider(label="Opacity", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
246
+ flip_horizontal = gr.Checkbox(label="Flip Horizontal", value=False)
247
+ flip_vertical = gr.Checkbox(label="Flip Vertical", value=False)
248
+
249
+ with gr.Column(scale=1):
250
+ result_type = gr.Radio(["Image", "Video"], label="Output Type", value="Image")
251
+ result_image = gr.Image(label="Result Image", type="pil", visible=True)
252
+ result_video = gr.Video(label="Result Video", visible=False)
253
+
254
+ with gr.Group():
255
+ gr.Markdown("### Background Options")
256
+ remove_background = gr.Checkbox(label="Remove Background", value=True)
257
+ background_mode = gr.Radio(label="Background Mode", choices=["transparent", "color", "image", "video"], value="transparent")
258
+ background_color = gr.ColorPicker(label="Background Color", value="#000000", visible=False)
259
+ background_image = gr.Image(label="Background Image", type="pil", visible=False)
260
+ background_video = gr.Video(label="Background Video", visible=False)
261
+
262
+ with gr.Accordion("Advanced Settings", open=False):
263
+ with gr.Row():
264
+ with gr.Column():
265
+ gr.Markdown("### Removal Settings")
266
+ model = gr.Dropdown(label="Model", choices=["u2net", "u2netp", "u2net_human_seg", "u2net_cloth_seg", "silueta", "isnet-general-use", "isnet-anime"], value="u2net")
267
+ output_format = gr.Radio(label="Output Format", choices=["RGBA", "RGB"], value="RGBA")
268
+ alpha_matting = gr.Checkbox(label="Alpha Matting", value=False)
269
+ alpha_matting_foreground_threshold = gr.Slider(label="Alpha Matting Foreground Threshold", minimum=0, maximum=255, value=240, step=1)
270
+ alpha_matting_background_threshold = gr.Slider(label="Alpha Matting Background Threshold", minimum=0, maximum=255, value=10, step=1)
271
+ post_process_mask = gr.Checkbox(label="Post Process Mask", value=False)
272
+
273
+ with gr.Column():
274
+ gr.Markdown("### Chroma Key Settings")
275
+ chroma_key = gr.Dropdown(label="Chroma Key", choices=["none", "green", "blue", "red"], value="none")
276
+ chroma_threshold = gr.Slider(label="Chroma Threshold", minimum=0, maximum=255, value=30, step=1)
277
+ color_tolerance = gr.Slider(label="Color Tolerance", minimum=0, maximum=255, value=20, step=1)
278
+
279
+ with gr.Column():
280
+ gr.Markdown("### Effects")
281
+ invert_mask = gr.Checkbox(label="Invert Mask", value=False)
282
+ feather_amount = gr.Slider(label="Feather Amount", minimum=0, maximum=100, value=0, step=1)
283
+ edge_detection = gr.Checkbox(label="Edge Detection", value=False)
284
+ edge_thickness = gr.Slider(label="Edge Thickness", minimum=1, maximum=10, value=1, step=1)
285
+ edge_color = gr.ColorPicker(label="Edge Color", value="#FFFFFF")
286
+ shadow = gr.Checkbox(label="Shadow", value=False)
287
+ shadow_blur = gr.Slider(label="Shadow Blur", minimum=0, maximum=20, value=5, step=1)
288
+ shadow_opacity = gr.Slider(label="Shadow Opacity", minimum=0.0, maximum=1.0, value=0.5, step=0.1)
289
+ color_adjustment = gr.Checkbox(label="Color Adjustment", value=False)
290
+ brightness = gr.Slider(label="Brightness", minimum=0.0, maximum=2.0, value=1.0, step=0.1)
291
+ contrast = gr.Slider(label="Contrast", minimum=0.0, maximum=2.0, value=1.0, step=0.1)
292
+ saturation = gr.Slider(label="Saturation", minimum=0.0, maximum=2.0, value=1.0, step=0.1)
293
+ mask_blur = gr.Slider(label="Mask Blur", minimum=0, maximum=100, value=0, step=1)
294
+ mask_expansion = gr.Slider(label="Mask Expansion", minimum=-100, maximum=100, value=0, step=1)
295
+
296
+ with gr.Row():
297
+ gr.Markdown("### Output Settings")
298
+ image_format = gr.Dropdown(label="Image Format", choices=["PNG", "JPEG", "WEBP"], value="PNG")
299
+ video_format = gr.Dropdown(label="Video Format", choices=["MP4", "AVI", "MOV"], value="MP4")
300
+ video_quality = gr.Slider(label="Video Quality", minimum=0, maximum=100, value=95, step=1)
301
+ use_custom_dimensions = gr.Checkbox(label="Use Custom Dimensions", value=False)
302
+ custom_width = gr.Number(label="Custom Width", value=512, visible=False)
303
+ custom_height = gr.Number(label="Custom Height", value=512, visible=False)
304
+ output_dimension_source = gr.Radio(
305
+ label="Output Dimension Source",
306
+ choices=["Foreground", "Background"],
307
+ value="Foreground",
308
+ visible=True
309
+ )
310
+
311
+ run_button = gr.Button(label="Run GeekyRemB")
312
+
313
+ def update_input_type(choice):
314
+ return {
315
+ foreground_input: gr.update(visible=choice == "Image"),
316
+ foreground_video: gr.update(visible=choice == "Video"),
317
+ }
318
+
319
+ def update_output_type(choice):
320
+ return {
321
+ result_image: gr.update(visible=choice == "Image"),
322
+ result_video: gr.update(visible=choice == "Video"),
323
+ }
324
+
325
+ def update_background_mode(mode):
326
+ return {
327
+ background_color: gr.update(visible=mode == "color"),
328
+ background_image: gr.update(visible=mode == "image"),
329
+ background_video: gr.update(visible=mode == "video"),
330
+ }
331
+
332
+ def update_custom_dimensions(use_custom):
333
+ return {
334
+ custom_width: gr.update(visible=use_custom),
335
+ custom_height: gr.update(visible=use_custom),
336
+ output_dimension_source: gr.update(visible=not use_custom)
337
+ }
338
+
339
+ def process_image(image, background_image, *args):
340
+ geeky_remb = GeekyRemB()
341
+ result, _ = geeky_remb.remove_background(image, background_image, *args)
342
+ return result
343
+
344
+ def process_video(video_path, background_video_path, *args):
345
+ geeky_remb = GeekyRemB()
346
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
347
+ output_path = temp_file.name
348
+ geeky_remb.process_video(video_path, output_path, background_video_path, *args)
349
+ return output_path
350
+
351
+ def run_geeky_remb(input_type, foreground_input, foreground_video, result_type, model, output_format,
352
+ alpha_matting, alpha_matting_foreground_threshold, alpha_matting_background_threshold,
353
+ post_process_mask, chroma_key, chroma_threshold, color_tolerance, background_mode,
354
+ background_color, background_image, background_video, invert_mask, feather_amount,
355
+ edge_detection, edge_thickness, edge_color, shadow, shadow_blur, shadow_opacity,
356
+ color_adjustment, brightness, contrast, saturation, x_position, y_position, rotation,
357
+ opacity, flip_horizontal, flip_vertical, mask_blur, mask_expansion, foreground_scale,
358
+ foreground_aspect_ratio, remove_background, image_format, video_format, video_quality,
359
+ use_custom_dimensions, custom_width, custom_height, output_dimension_source):
360
+
361
+ args = (model, alpha_matting, alpha_matting_foreground_threshold,
362
+ alpha_matting_background_threshold, post_process_mask, chroma_key, chroma_threshold,
363
+ color_tolerance, background_mode, background_color, output_format,
364
+ invert_mask, feather_amount, edge_detection, edge_thickness, edge_color, shadow, shadow_blur,
365
+ shadow_opacity, color_adjustment, brightness, contrast, saturation, x_position,
366
+ y_position, rotation, opacity, flip_horizontal, flip_vertical, mask_blur,
367
+ mask_expansion, foreground_scale, foreground_aspect_ratio, remove_background,
368
+ use_custom_dimensions, custom_width, custom_height, output_dimension_source)
369
+
370
+ if input_type == "Image" and result_type == "Image":
371
+ result = process_image(foreground_input, background_image, *args)
372
+ if image_format != "PNG":
373
+ result = result.convert("RGB")
374
+ with tempfile.NamedTemporaryFile(delete=False, suffix=f".{image_format.lower()}") as temp_file:
375
+ result.save(temp_file.name, format=image_format, quality=95 if image_format == "JPEG" else None)
376
+ return temp_file.name, None
377
+ elif input_type == "Video" and result_type == "Video":
378
+ output_video = process_video(foreground_video, background_video if background_mode == "video" else None, *args)
379
+ if video_format != "MP4":
380
+ temp_output = output_video + f"_temp.{video_format.lower()}"
381
+ os.system(f"ffmpeg -i {output_video} -c:v libx264 -crf {int(20 - (video_quality / 5))} {temp_output}")
382
+ os.remove(output_video)
383
+ output_video = temp_output
384
+ return None, output_video
385
+ elif input_type == "Image" and result_type == "Video":
386
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
387
+ output_path = temp_file.name
388
+ frame = cv2.cvtColor(np.array(foreground_input), cv2.COLOR_RGB2BGR)
389
+ height, width = frame.shape[:2]
390
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
391
+ out = cv2.VideoWriter(output_path, fourcc, 24, (width, height))
392
+ for _ in range(24 * 5): # 5 seconds at 24 fps
393
+ out.write(frame)
394
+ out.release()
395
+ return None, process_video(output_path, background_video if background_mode == "video" else None, *args)
396
+ elif input_type == "Video" and result_type == "Image":
397
+ cap = cv2.VideoCapture(foreground_video)
398
+ ret, frame = cap.read()
399
+ cap.release()
400
+ if ret:
401
+ pil_frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
402
+ result = process_image(pil_frame, background_image, *args)
403
+ if image_format != "PNG":
404
+ result = result.convert("RGB")
405
+ with tempfile.NamedTemporaryFile(delete=False, suffix=f".{image_format.lower()}") as temp_file:
406
+ result.save(temp_file.name, format=image_format, quality=95 if image_format == "JPEG" else None)
407
+ return temp_file.name, None
408
+ else:
409
+ return None, None
410
+
411
+ input_type.change(update_input_type, inputs=[input_type], outputs=[foreground_input, foreground_video])
412
+ result_type.change(update_output_type, inputs=[result_type], outputs=[result_image, result_video])
413
+ background_mode.change(update_background_mode, inputs=[background_mode], outputs=[background_color, background_image, background_video])
414
+ use_custom_dimensions.change(update_custom_dimensions, inputs=[use_custom_dimensions], outputs=[custom_width, custom_height, output_dimension_source])
415
+
416
+ run_button.click(
417
+ fn=run_geeky_remb,
418
+ inputs=[
419
+ input_type, foreground_input, foreground_video, result_type,
420
+ model, output_format, alpha_matting, alpha_matting_foreground_threshold,
421
+ alpha_matting_background_threshold, post_process_mask, chroma_key, chroma_threshold,
422
+ color_tolerance, background_mode, background_color, background_image, background_video,
423
+ invert_mask, feather_amount, edge_detection, edge_thickness, edge_color,
424
+ shadow, shadow_blur, shadow_opacity, color_adjustment, brightness, contrast,
425
+ saturation, x_position, y_position, rotation, opacity, flip_horizontal,
426
+ flip_vertical, mask_blur, mask_expansion, foreground_scale, foreground_aspect_ratio,
427
+ remove_background, image_format, video_format, video_quality,
428
+ use_custom_dimensions, custom_width, custom_height, output_dimension_source
429
+ ],
430
+ outputs=[result_image, result_video]
431
+ )
432
+
433
+ return [(geeky_remb_tab, "GeekyRemB", "geeky_remb_tab")]
434
+
435
+ script_callbacks.on_ui_tabs(on_ui_tabs)
extensions/CFgfade/LICENSE ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is free and unencumbered software released into the public domain.
2
+
3
+ Anyone is free to copy, modify, publish, use, compile, sell, or
4
+ distribute this software, either in source code form or as a compiled
5
+ binary, for any purpose, commercial or non-commercial, and by any
6
+ means.
7
+
8
+ In jurisdictions that recognize copyright laws, the author or authors
9
+ of this software dedicate any and all copyright interest in the
10
+ software to the public domain. We make this dedication for the benefit
11
+ of the public at large and to the detriment of our heirs and
12
+ successors. We intend this dedication to be an overt act of
13
+ relinquishment in perpetuity of all present and future rights to this
14
+ software under copyright law.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
+ OTHER DEALINGS IN THE SOFTWARE.
23
+
24
+ For more information, please refer to <https://unlicense.org>
extensions/CFgfade/README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CFG fade #
2
+ ### extension for Forge webui for Stable Diffusion ###
3
+
4
+ ---
5
+ ## Install ##
6
+ Go to the **Extensions** tab, then **Install from URL**, use the URL for this repository.
7
+
8
+ ---
9
+ #### 25/06/2024 ####
10
+ added/corrected saving of parameters: now they get written to *params.txt* too
11
+
12
+ ### update 15/05/2024 ###
13
+ Now I also patch the main sampling function, to calculate the new CFG there. This means that processing of the uncond can be skipped for low CFG, which is a free performance gain. Speed up depends on proportion of steps that end up with CFG <= 1.0. As far as CFG boost/fade is concerned, results are identical. Heuristic CFG and Reinhard CFG use the uncond for their calculations, but now they have to be skipped on steps when it is not available, so results from them can change. The difference seems minor; I tested an option to force uncond calculation but decided it was unnecessary.
14
+
15
+ ---
16
+ ### screenshot of UI ###
17
+ ![](screenshot.png "image of extension UI")
18
+
19
+ ---
20
+ ## Basic usage ##
21
+ A variety of ways to adjust CFG dynamically. Some are effective.
22
+ Settings used are saved with metadata, and restored from loading through the **PNG Info** tab.
23
+
24
+ ---
25
+ ## Advanced / Details ##
26
+ centre conds to mean: a simple calculation to keep channel average centered around zero. Often seems like a free quality win.
27
+
28
+ CFG 1 until/after step: Sets CFG to 1.0 for early/late steps. Normally seen based on sigmas, but done by step here for consistency with the other settings. This is higher priority than boosting/fading.
29
+
30
+ CFG boost: a linear scaling increase to weight applied to CFG
31
+
32
+ CFG fade: a linear scaling decrease to weight applied to CFG. Ranges of boost/fade can overlap.
33
+
34
+ Heuristic CFG: 0 is disabled. Set higher than actual CFG to increase contrast, details, sharpness; or lower for the opposite. Delaying the start can allow pushing harder, but setting too high will cause burning.
35
+
36
+ Reinhard Target CFG: 0 is disabled. Uses Reinhard tonemapping to dynamically adjust CFG. Could allow using higher CFGs than normal.
37
+
38
+ Rescale CFG: 0 is disabled, best seems typically to be 0.5-0.75. Often a free win.
39
+
40
+
41
+ ---
42
+ ## To do? ##
43
+ 1. perp_neg? The [Neutral Prompt extension](https://github.com/ljleb/sd-webui-neutral-prompt) is already aiming to cover this
44
+ 2. slew limiting: not convinced by this. Seems better overall to limit change using the scheduler, though the effects are different.
45
+ 3. different tonemappers
46
+ 4. XYZ support
47
+
48
+ ---
49
+ ## License ##
50
+ Public domain. Unlicense. Free to a good home.
51
+ All terrible code is my own. Use at your own risk.
52
+
53
+ ---
54
+ ## Credits ##
55
+ Thanks to Alex "mcmonkey" Goodwin for the Dynamic Thresholding extension (Forge built-in version). I started this project with zero knowledge, and this source got me started. The first - basic, unreleased - version was essentially hacked out of that extension.
56
+
57
+ Also thanks to https://github.com/Haoming02. I learned a lot about how to implement this from their work.
58
+
59
+ rescaleCFG and Reinhard tonemapping based on https://github.com/comfyanonymous/ComfyUI_experiments/
60
+
61
+ [combating-mean-drift-in-cfg (birchlabs.co.uk)](https://birchlabs.co.uk/machine-learning#combating-mean-drift-in-cfg)
62
+
63
+ ---
64
+
65
+
66
+ > Written with [StackEdit](https://stackedit.io/).
extensions/CFgfade/screenshot.png ADDED
extensions/CFgfade/scripts/__pycache__/forge_cfgfade.cpython-310.pyc ADDED
Binary file (7.75 kB). View file
 
extensions/CFgfade/scripts/forge_cfgfade.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from modules import scripts
4
+ import modules.shared as shared
5
+ from modules.script_callbacks import on_cfg_denoiser, remove_current_script_callbacks
6
+ import torch, math
7
+
8
+ #import torchvision.transforms.functional as TF
9
+
10
+ import ldm_patched.modules.samplers as LDM
11
+ import modules_forge.forge_sampler
12
+
13
+ # button to spit weighted cfg to console, better: gradio lineplot for display of weights
14
+
15
+
16
+
17
+ class CFGfadeForge(scripts.Script):
18
+ weight = 1.0
19
+ backup_sampling_function = None
20
+
21
+ def __init__(self):
22
+ self.boostStep = 0.0
23
+ self.highStep = 0.5
24
+ self.maxScale = 1.0
25
+ self.fadeStep = 0.5
26
+ self.zeroStep = 1.0
27
+ self.minScale = 0.0
28
+ self.reinhard = 1.0
29
+ self.rfcgmult = 1.0
30
+ self.centreMean = False
31
+ self.heuristic = 0
32
+ self.hStart = 0.0
33
+
34
+ def title(self):
35
+ return "CFG fade"
36
+
37
+ def show(self, is_img2img):
38
+ # make this extension visible in both txt2img and img2img tab.
39
+ return scripts.AlwaysVisible
40
+
41
+ def ui(self, *args, **kwargs):
42
+ with gr.Accordion(open=False, label=self.title()):
43
+ with gr.Row():
44
+ enabled = gr.Checkbox(value=False, label='Enable modifications to CFG')
45
+ cntrMean = gr.Checkbox(value=False, label='centre conds to mean')
46
+ # scaleCFGs = gr.Checkbox(value=False, label='scale hCFG and rCFG')
47
+ with gr.Row():
48
+ lowCFG1 = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.1, label='CFG 1 until step')
49
+ maxScale = gr.Slider(minimum=1.0, maximum=4.0, step=0.01, value=1.0, label='boost factor')
50
+ with gr.Row():
51
+ boostStep = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.2, label='CFG boost start step')
52
+ minScale = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label='fade factor')
53
+ with gr.Row():
54
+ highStep = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.4, label='full boost at step')
55
+ heuristic = gr.Slider(minimum=0.0, maximum=16.0, step=0.5, value=0, label='Heuristic CFG')
56
+ with gr.Row():
57
+ fadeStep = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label='CFG fade start step')
58
+ hStart = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.0, label='... start step')
59
+ with gr.Row():
60
+ zeroStep = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.7, label='full fade at step')
61
+ reinhard = gr.Slider(minimum=0.0, maximum=16.0, step=0.5, value=0.0, label='Reinhard CFG')
62
+ with gr.Row():
63
+ highCFG1 = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.8, label='CFG 1 after step')
64
+ rcfgmult = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.0, label='Rescale CFG')
65
+
66
+ self.infotext_fields = [
67
+ (enabled, lambda d: enabled.update(value=("cfgfade_enabled" in d))),
68
+ (cntrMean, "cfgfade_cntrMean"),
69
+ (boostStep, "cfgfade_boostStep"),
70
+ (highStep, "cfgfade_highStep"),
71
+ (maxScale, "cfgfade_maxScale"),
72
+ (fadeStep, "cfgfade_fadeStep"),
73
+ (zeroStep, "cfgfade_zeroStep"),
74
+ (minScale, "cfgfade_minScale"),
75
+ (lowCFG1, "cfgfade_lowCFG1"),
76
+ (highCFG1, "cfgfade_highCFG1"),
77
+ (reinhard, "cfgfade_reinhard"),
78
+ (rcfgmult, "cfgfade_rcfgmult"),
79
+ (heuristic, "cfgfade_heuristic"),
80
+ (hStart, "cfgfade_hStart"),
81
+ ]
82
+
83
+ return enabled, cntrMean, boostStep, highStep, maxScale, fadeStep, zeroStep, minScale, lowCFG1, highCFG1, reinhard, rcfgmult, heuristic, hStart
84
+
85
+ # edited from ldm_patched/modules/samplers to add cond_scaling (initial 3 lines)
86
+ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None):
87
+ cond_scale *= CFGfadeForge.weight
88
+ if cond_scale < 1.0:
89
+ cond_scale = 1.0
90
+
91
+ edit_strength = sum((item['strength'] if 'strength' in item else 1) for item in cond)
92
+
93
+ if math.isclose(cond_scale, 1.0) and model_options.get("disable_cfg1_optimization", False) == False:
94
+ uncond_ = None
95
+ else:
96
+ uncond_ = uncond
97
+
98
+ for fn in model_options.get("sampler_pre_cfg_function", []):
99
+ model, cond, uncond_, x, timestep, model_options = fn(model, cond, uncond_, x, timestep, model_options)
100
+
101
+ cond_pred, uncond_pred = LDM.calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options)
102
+
103
+ if "sampler_cfg_function" in model_options:
104
+ args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep,
105
+ "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options}
106
+ cfg_result = x - model_options["sampler_cfg_function"](args)
107
+ elif not math.isclose(edit_strength, 1.0):
108
+ cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale * edit_strength
109
+ else:
110
+ cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale
111
+
112
+ for fn in model_options.get("sampler_post_cfg_function", []):
113
+ args = {"denoised": cfg_result, "cond": cond, "uncond": uncond, "model": model, "uncond_denoised": uncond_pred, "cond_denoised": cond_pred,
114
+ "sigma": timestep, "model_options": model_options, "input": x}
115
+ cfg_result = fn(args)
116
+
117
+ return cfg_result
118
+
119
+ def patch(self, model):
120
+ # sigmin = model.model.model_sampling.sigma(model.model.model_sampling.timestep(model.model.model_sampling.sigma_min))
121
+ # sigmax = model.model.model_sampling.sigma(model.model.model_sampling.timestep(model.model.model_sampling.sigma_max))
122
+
123
+ def sampler_cfgfade(args):
124
+ cond = args["cond"]
125
+ cond_scale = args["cond_scale"]
126
+
127
+ if cond_scale == 1.0:
128
+ return cond
129
+ else:
130
+ uncond = args["uncond"]
131
+
132
+ # sometimes this scaling seems like a win, but only when heuristic/reinhard CFG is too high
133
+ # if self.scaleCFGs == True:
134
+ # heuristic = max(1.0, self.heuristic * CFGfadeForge.weight) if (self.heuristic > 0.0) else 0.0
135
+ # reinhard = max(1.0, self.reinhard * CFGfadeForge.weight) if (self.reinhard > 0.0) else 0.0
136
+ # else:
137
+ heuristic = self.heuristic
138
+ reinhard = self.reinhard
139
+
140
+ if self.centreMean == True: # better after, but value here too?
141
+ for b in range(len(cond)):
142
+ for c in range(4):
143
+ cond[b][c] -= cond[b][c].mean()
144
+ uncond[b][c] -= uncond[b][c].mean()
145
+
146
+ # cond_scale weighting now applied in sampling_function, can avoid processing of uncond for performance increase
147
+
148
+ thisStep = shared.state.sampling_step
149
+ lastStep = shared.state.sampling_steps
150
+
151
+ # heuristic scaling, higher hcfg acts to boost contrast/detail/sharpness; low reduces; quantile has effect, but not significant for quality IMO
152
+ noisePrediction = cond - uncond
153
+ if heuristic != 0.0 and heuristic != cond_scale and thisStep >= self.hStart * lastStep:
154
+ base = uncond + cond_scale * noisePrediction
155
+ heur = uncond + heuristic * noisePrediction
156
+
157
+ # center both on zero
158
+ baseC = base - base.mean()
159
+ heurC = heur - heur.mean()
160
+ del base, heur
161
+
162
+ # calc 99.0% quartiles - doesn't seem to have value as an option
163
+ baseQ = torch.quantile(baseC.abs(), 0.99)
164
+ heurQ = torch.quantile(heurC.abs(), 0.99)
165
+ del baseC, heurC
166
+
167
+ if baseQ != 0.0 and heurQ != 0.0:
168
+ cond *= (baseQ / heurQ)
169
+ uncond *= (baseQ / heurQ)
170
+ # end: heuristic scaling
171
+
172
+
173
+ # reinhard tonemap from comfy
174
+ noisePrediction = cond - uncond
175
+ if reinhard != 0.0 and reinhard != cond_scale:
176
+ multiplier = 1.0 / cond_scale * reinhard
177
+ noise_pred_vector_magnitude = (torch.linalg.vector_norm(noisePrediction, dim=(1)) + 0.0000000001)[:,None]
178
+ noisePrediction /= noise_pred_vector_magnitude
179
+
180
+ mean = torch.mean(noise_pred_vector_magnitude, dim=(1,2,3), keepdim=True)
181
+ std = torch.std(noise_pred_vector_magnitude, dim=(1,2,3), keepdim=True)
182
+ top = (std * 3 + mean) * multiplier
183
+
184
+ noise_pred_vector_magnitude *= (1.0 / top)
185
+ new_magnitude = noise_pred_vector_magnitude / (noise_pred_vector_magnitude + 1.0)
186
+ new_magnitude *= top
187
+ cond_scale *= new_magnitude
188
+ # end: reinhard
189
+
190
+
191
+ # rescaleCFG - maybe should be exclusive of other effects, but why restrict?
192
+ result = uncond + cond_scale * noisePrediction
193
+ if self.rcfgmult != 0.0:
194
+ ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True)
195
+ ro_cfg = torch.std(result, dim=(1,2,3), keepdim=True)
196
+
197
+ if ro_pos != 0.0 and ro_cfg != 0.0:
198
+ x_rescaled = result * (ro_pos / ro_cfg)
199
+ result = torch.lerp (result, x_rescaled, self.rcfgmult)
200
+ del x_rescaled
201
+ # end: rescaleCFG
202
+ del noisePrediction
203
+
204
+ return result
205
+
206
+ m = model.clone()
207
+ m.set_model_sampler_cfg_function(sampler_cfgfade)
208
+ return (m, )
209
+
210
+
211
+ def denoiser_callback(self, params):
212
+ lastStep = params.total_sampling_steps - 1
213
+ thisStep = params.sampling_step
214
+ sigma = params.sigma[0]
215
+
216
+ lowCFG1 = self.lowCFG1 * lastStep
217
+ highStep = self.highStep * lastStep
218
+ boostStep = self.boostStep * lastStep
219
+ highCFG1 = self.highCFG1 * lastStep
220
+ fadeStep = self.fadeStep * lastStep
221
+ zeroStep = self.zeroStep * lastStep
222
+
223
+ if thisStep < lowCFG1:
224
+ boostWeight = 0.0
225
+ elif thisStep < boostStep:
226
+ boostWeight = 1.0
227
+ elif thisStep < highStep:
228
+ boostWeight = 1.0 + (self.maxScale - 1.0) * ((thisStep - boostStep) / (highStep - boostStep))
229
+ else:
230
+ boostWeight = self.maxScale
231
+
232
+ if thisStep > highCFG1:
233
+ fadeWeight = 0.0
234
+ else:
235
+ if thisStep < fadeStep:
236
+ fadeWeight = 1.0
237
+ elif thisStep < zeroStep:
238
+ fadeWeight = 1.0 - (thisStep - fadeStep) / (zeroStep - fadeStep)
239
+ else:
240
+ fadeWeight = 0.0
241
+
242
+ # at this point, weight is in the range 0.0->1.0
243
+ fadeWeight *= (1.0 - self.minScale)
244
+ fadeWeight += self.minScale
245
+ # now it is minimum->1.0
246
+
247
+ CFGfadeForge.weight = boostWeight * fadeWeight
248
+
249
+
250
+ def process_before_every_sampling(self, params, *script_args, **kwargs):
251
+ enabled = script_args[0]
252
+ if enabled:
253
+ unet = params.sd_model.forge_objects.unet
254
+ unet = CFGfadeForge.patch(self, unet)[0]
255
+ params.sd_model.forge_objects.unet = unet
256
+
257
+ return
258
+
259
+ def process(self, params, *script_args, **kwargs):
260
+ enabled, cntrMean, boostStep, highStep, maxScale, fadeStep, zeroStep, minScale, lowCFG1, highCFG1, reinhard, rcfgmult, heuristic, hStart = script_args
261
+
262
+ if not enabled:
263
+ return
264
+
265
+ self.centreMean = cntrMean
266
+ self.boostStep = boostStep
267
+ self.highStep = highStep
268
+ self.maxScale = maxScale
269
+ self.fadeStep = fadeStep
270
+ self.zeroStep = zeroStep
271
+ self.minScale = minScale
272
+ self.lowCFG1 = lowCFG1
273
+ self.highCFG1 = highCFG1
274
+ self.reinhard = reinhard
275
+ self.rcfgmult = rcfgmult
276
+ self.heuristic = heuristic
277
+ self.hStart = hStart
278
+
279
+ # logs, could save boost start/full only if boost factor > 1
280
+ # similar for fade
281
+ params.extra_generation_params.update(dict(
282
+ cfgfade_enabled = enabled,
283
+ cfgfade_cntrMean = cntrMean,
284
+ cfgfade_boostStep = boostStep,
285
+ cfgfade_highStep = highStep,
286
+ cfgfade_maxScale = maxScale,
287
+ cfgfade_fadeStep = fadeStep,
288
+ cfgfade_zeroStep = zeroStep,
289
+ cfgfade_minScale = minScale,
290
+ cfgfade_lowCFG1 = lowCFG1,
291
+ cfgfade_highCFG1 = highCFG1,
292
+ cfgfade_reinhard = reinhard,
293
+ cfgfade_rcfgmult = rcfgmult,
294
+ cfgfade_heuristic = heuristic,
295
+ cfgfade_hStart = hStart,
296
+ ))
297
+
298
+ # must log the parameters before fixing minScale
299
+ self.minScale /= self.maxScale
300
+
301
+ on_cfg_denoiser(self.denoiser_callback)
302
+
303
+ if CFGfadeForge.backup_sampling_function == None:
304
+ CFGfadeForge.backup_sampling_function = modules_forge.forge_sampler.sampling_function
305
+
306
+ modules_forge.forge_sampler.sampling_function = CFGfadeForge.sampling_function
307
+ return
308
+
309
+ def postprocess(self, params, processed, *args):
310
+ enabled = args[0]
311
+ if enabled:
312
+ if CFGfadeForge.backup_sampling_function != None:
313
+ modules_forge.forge_sampler.sampling_function = CFGfadeForge.backup_sampling_function
314
+
315
+ remove_current_script_callbacks()
316
+ return
317
+
extensions/CloneCleaner/.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
extensions/CloneCleaner/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ __pycache__/
2
+ *.py[cod]
3
+ *$py.class
4
+
extensions/CloneCleaner/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 artyfacialintelagent
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
extensions/CloneCleaner/README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CloneCleaner
2
+
3
+ An extension for Automatic1111 to work around Stable Diffusion's "clone problem". It automatically modifies your prompts with random names, nationalities, hair style and hair color to create more variations in generated people.
4
+
5
+ # What it does
6
+
7
+ Many generations of model finetuning and merging have greatly improved the image quality of Stable Diffusion 1.5 when generating humans - but at the cost of overtraining and loss of variability. This manifests as "clones", where batch generations using the same or similar prompts but different random seeds often have identical facial features.
8
+
9
+ CloneCleaner adds randomized tokens to your prompt that varies the look of a person for every generated image seed. For example, one seed might get "Elsie from Australia, long waist-length updo ginger hair" and the next "Samreen from Bangladesh, medium shoulder-length frizzy coffee-colored hair". This makes every seed quite unique and effectively mitigates the "sameface" problem of many popular but heavily overtrained Stable Diffusion models. So it's basically wildcards, except the tiresome setup work has been done for you and you're ready to go - with lots of easy customization options to control the randomization.
10
+
11
+ A key detail is that the token randomization seed is (by default) identical to the main image seed - this ensures that the same "person" will be generated again if you modify the prompt or reload the metadata.
12
+
13
+ # Installation in Automatic1111
14
+
15
+ Enter this url **manually** in auto1111's extension tab:
16
+
17
+ https://github.com/artyfacialintelagent/CloneCleaner.git
18
+
19
+ This first release of CloneCleaner is a public beta and **currently only works for female characters**. Options for male (and indeterminate gender if I can get it to work) coming soon-ish!
20
+
21
+ # How it works
22
+
23
+ Prompts are randomized using wildcards, except they're hardcoded in the extension with logic to match ethnicity with typical names and common hair colors for each country, in order to get consistent appearance and quality of the generated images. Main steps:
24
+
25
+ 1. Set the token randomization seed to the main image seed (or optionally a different random seed or a user-specified one).
26
+ 2. Select a random REGION among the following: Europe (mainland incl. Russia), Anglosphere (US, Can, Aus, NZ, UK), Latin America, MENA (Middle-East & North Africa), Sub-Saharan Africa, East Asia or South Asia.
27
+ 3. Select a random COUNTRY in that region - but note that CloneCleaner only has a sample of countries of each region, the database is not (yet) comprehensive.
28
+ 4. Select a random FIRST NAME for people in that country.
29
+ 5. Select a random MAIN HAIR COLOR (only black, brown, red, blonde, other), weighted for what is typical in that country. Then select a SPECIFIC HAIR COLOR with more "colorful" (sorry) language for more variability. Color tokens are carefully selected and limited using tricks like reduced attention and prompt editing to minimize "color bleeding" into the rest of your prompt.
30
+ 6. Select random HAIR STYLE and HAIR LENGTH.
31
+ 7. Assemble the prompt insert: "[FIRST NAME] from [COUNTRY], [HAIR LENGTH] [HAIR STYLE] [SPECIFIC HAIR COLOR] hair". Insert it at beginning or end of the prompt depending on user options
32
+ 8. Iterate for remaining images of the batch.
33
+
34
+ Any of the above token components can be optionally excluded from the prompt randomization. There are also customization options to exclude specific regions, hair lengths and hair color.
35
+
36
+ # Sample images
37
+
38
+ The following images are produced using **consecutive seeds**, so they are **NOT CHERRY-PICKED** in any way. But to be fair, not all models work quite as well as these - some models are so overtrained that they just can't be saved. The sample PNGs hosted here on Github should contain full metadata, so you can download and inspect them in the PNGinfo tab, or send them to txt2img to reproduce them.
39
+
40
+ Right-click and select "Open image in a new tab" to view at 100%, or right-click and select "Save image as" to download.
41
+
42
+ ### Absolute Reality v1
43
+
44
+ These images were created using the [Absolute Reality model](https://civitai.com/models/81458/absolutereality), from a simple test prompt I made up.
45
+
46
+ **Absolute Reality v1 (baseline model images)**
47
+ ![absolutereality_seeds](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/d69ef1e4-6cf6-4401-97bb-bc0eeeef8a2a)
48
+ ![absolutereality1](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/64f4fb70-0764-470d-a00a-07b8137000f5)
49
+
50
+ **Absolute Reality v1 + CloneCleaner (default settings)**
51
+ ![absolutereality2](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/a495ac69-cf17-4be9-814f-389403280c39)
52
+
53
+ **Absolute Reality v1 + CloneCleaner (East Asia only)**
54
+ ![absolutereality3](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/9aeeb238-e980-43a8-b5aa-c54baec0a41d)
55
+
56
+ **Absolute Reality v1 + CloneCleaner (Anglosphere + Europe only, short hair only)**
57
+ ![absolutereality4](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/f0218f86-9d53-4127-b9d9-5233e4e46776)
58
+
59
+ ### Photon v1
60
+
61
+ Using the prompt from the sample image hosted on the [Photon model page on Civitai](https://civitai.com/models/84728/photon), slightly modified to make it more SFW for Github.
62
+
63
+ **Photon v1 (baseline model images)**
64
+ ![photon_seeds](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/33fad4a4-651b-4806-92cc-3c1a01d58fc9)
65
+ ![photon1](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/cedea29e-c2c5-4c53-9569-2929db095971)
66
+
67
+ **Photon v1 + CloneCleaner (default settings)**
68
+ ![photon2](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/5da77a58-06f2-45c7-8920-95bf25390de9)
69
+
70
+ **Photon v1 + CloneCleaner (Africa + South Asia only, no blonde or "other" hair color)**
71
+ ![photon3](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/cf8759c4-9ada-4c6e-be6e-4829e3c34eeb)
72
+
73
+ **Photon v1 + CloneCleaner (Europe only, reddish hair only)**
74
+ ![photon4](https://github.com/artyfacialintelagent/CloneCleaner/assets/137619889/58d1755a-1d0c-4c20-bc98-f00a80816147)
75
+
76
+ # Some tips
77
+
78
+ I am very happy with how well this simple prompt modification scheme works. But the best part about CloneCleaner is how it made me completely re-evaluate my opinion of many models, mostly positively. So be sure to retest your models using CloneCleaner - they may yet surprise you!
79
+
80
+ I recommend using **simple prompts** (< 50 tokens), **simple negatives** (< 30 tokens) and **limited attention weighting** (never > :1.2, except as noted below). An effective minimal negative prompt appears below. Just start with this as a basis and add whatever your image seems to need.
81
+
82
+ **Negative prompt**: *ugly, asian, underage, 3d render, cartoon, doll, (bad low worst quality:1.3)*
83
+
84
+ The token "asian" is included to counter the heavy bias towards Korean and Chinese women among most popular models. Asian characters should still appear and look perfectly fine even including this token. Usually the attention setting of the final quality prompt should stick to the range 1.1 - 1.4, but a small number of Asian-oriented models can benefit from high (~1.5) or extreme values (up to 2.0!). Note that this is the exception that proves the rule - in most models such extreme weights would heavily "overcook" your images and destroy both quality and variability.
85
+
86
+ I rarely use attention weights above 1.0 in my prompts and **never** use attention weights above 1.2 for any other tokens in my prompt other than this general quality negative. In my experience this greatly benefits image consistency and reduces mutations, bad hands and other monstrosities without having to explicitly include these things in your negatives.
extensions/CloneCleaner/prompt_tree.yml ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hair:
2
+ length:
3
+ short: [short, (short-cut:1.2)]
4
+ medium: ["medium (shoulder-length:1.2)", "medium (chin-length:1.2)"]
5
+ long: [long, "long (waist-length:1.2)"]
6
+ style:
7
+ short: [straight, bangs, combed, "[undercut|punk]", undercut, brushed-back, slicked-back, brush-cut, pixie-cut, semi-buzzcut, buzzcut, mohawk-style, curly, spiky hairstyle, tussled, bob-cut, pigtails, twintails, ponytail]
8
+ medium: [straight, windblown, bangs, combed, brushed-back, slicked-back, brush-cut, grown-out pixie-cut, wavy, curly, mohawk-style, frizzy, wild, messy, bob-cut, updo, pigtails, twintails, ponytail]
9
+ long: [straight, windblown, bangs, combed, brushed-back, slicked-back, brush-cut, wavy, curly, frizzy, wild, messy, updo, glorious, pigtails, twintails, ponytail]
10
+ color:
11
+ blonde: [platinum-blonde, ash-blonde, champagne-blonde, sandy-blonde, honey-blonde, dirty-blonde, "[blonde:strawberry-blonde:.1]",
12
+ "[blonde:vanilla-blonde:.2]", "[blonde:golden-blonde:.2]", "[blonde:reddish-blonde:.2]", "[blonde:rose-gold:.2]"]
13
+ red: [auburn, ginger, peach, "[auburn:coral:.3]", "[auburn:copper:.3]", "[auburn:reddish:.4]", "[auburn:crimson:.4]", "[auburn:orange:.4]"]
14
+ brown: [mocca, chestnut, hazelnut, caramel, cinnamon, coffee, chocolate, charcoal, "[honey-brown:.3]", "[golden-brown:.3]",
15
+ "[noisette:.3]", "[bronze:.3]", "[mahogany:.2]", "[maroon:.4]", "[burgundy:.4]"]
16
+ black: ["[black:.3]", "[nearly black:.3]", "[blue-black:.3]", "[violet-black:.3]", "[reddish-black:.3]"]
17
+ other: ["[blonde:pastel:.2]", "[blonde:gray-blonde:.2]", "[blonde:silver-blonde:.2]", "[blonde:neon:.3]", "[blonde:multicolored:.3]",
18
+ "[blonde:rainbow:.3]", "[blonde:pink:.3]", "[blonde:emerald:.3]", "[blonde:mint:.3]", "[blonde:teal:.3]", "[blonde:indigo:.3]",
19
+ "[blonde:lilac:.3]", "[blonde:lavender:.3]", "[blonde:violet:.3]"]
20
+ defaultweight:
21
+ Europe: {blonde: 20, red: 15, brown: 35, black: 25, other: 5}
22
+ Anglosphere: {blonde: 15, red: 10, brown: 35, black: 35, other: 5}
23
+ MENA: {blonde: 5, red: 5, brown: 10, black: 75, other: 5}
24
+ Sub-Saharan Africa: {blonde: 5, red: 5, brown: 10, black: 75, other: 5}
25
+ Latin America: {blonde: 10, red: 5, brown: 30, black: 50, other: 5}
26
+ East Asia: {blonde: 10, red: 5, brown: 10, black: 70, other: 5}
27
+ South Asia: {blonde: 5, red: 5, brown: 10, black: 75, other: 5}
28
+ country:
29
+ Europe:
30
+ Sweden:
31
+ weight: 3
32
+ hair: {blonde: 35, red: 15, brown: 30, black: 15, other: 5}
33
+ names: [Astrid, Freja, Maja, Ebba, Linnea, Agnes, Amelia, Wilma, Elin, Lovisa, Liv, Alva, Märta, Sigrid, Lova, Emelie, Ida, Åsa, Hilma, Frida, Tyra, Siv, Linn]
34
+ Norway:
35
+ weight: 2
36
+ hair: {blonde: 35, red: 15, brown: 30, black: 15, other: 5}
37
+ names: [Hedda, Tiril, Tone, Agnes, Silje, Vilde, Martine, Stine, Maren, Mari, Synne, Hanne, Sigrid, Ingvild, Lene, Wenche, Maiken, Ragnhild]
38
+ Denmark:
39
+ weight: 2
40
+ hair: {blonde: 35, red: 15, brown: 30, black: 15, other: 5}
41
+ names: [Nanna, Signe, Amalie, Asta, Nora, Mette, Kirsten, Hanne, Helle, Lene, Lærke, Gry, Laura, Esther, Mathilde, Vera]
42
+ Finland:
43
+ weight: 2
44
+ hair: {blonde: 35, red: 15, brown: 30, black: 15, other: 5}
45
+ names: [Aino, Helmi, Aada, Onni, Venla, Iida, Eevi, Lilja, Kaarina, Enni, Elli, Pihla, Hannele, Marjatta, Minea, Hilla, Kerttu, Siiri, Veera, Nelli]
46
+ Scotland:
47
+ weight: 2
48
+ hair: {blonde: 20, red: 30, brown: 25, black: 20, other: 5}
49
+ names: [Isla, Ava, Amelia, Jamie, Sarah, Lucy, Fiona, Evie, Iona, Amy, Eilidh, Bonnie, Sienna, Callie, Skye, Holly, Eilish, Caitlin, Mairi, Moira]
50
+ Ireland:
51
+ weight: 2
52
+ hair: {blonde: 20, red: 30, brown: 25, black: 20, other: 5}
53
+ names: [Aoife, Niamh, Caoimhe, Maeve, Cian, Fiadh, Orla, Cara, Molly, Shannon, Kelly, Eabha , Roisin, Riley, Kennedy, Sloane, Fiona, Aisling]
54
+ Netherlands:
55
+ weight: 3
56
+ names: [Mila, Yara, Evi, Saar, Lieke, Noor, Lotte, Milou, Nova, Fenna, Isa, Roos, Loïs, Esmee, Fien, Lara, Jasmijn, Kiki, Veerle, Juul, Femke, Philou, Guusje]
57
+ Belgium:
58
+ weight: 3
59
+ names: [Marie, Olivia, Camille, Mila, Alexis, Julie, Sacha, Martine, Nathalie, Monique, Rita, Nicole, Christine, Isabelle]
60
+ Germany:
61
+ weight: 4
62
+ names: [Johanna, Heidi, Elke, Ilse, Ursula, Frieda, Helga, Anja, Eva, Leni, Berta, Eleonor, Frederike, Franka, Henriette, Irma, Luisa, Margarete]
63
+ Czech Republic:
64
+ weight: 3
65
+ names: [Barbora, Agata, Adela, Marketa, Karolina, Aneta, Jana, Tereza, Andrea, Zuzana, Martina, Dominika, Ema]
66
+ Poland:
67
+ weight: 4
68
+ names: [Antonina, Beata, Anastazja, Aniela, Genowefa, Adelajda, Brygida, Alina, Aldona, Estera, Ewelina, Adela, Augustyna, Aneta, Albina, Alfreda]
69
+ England:
70
+ weight: 4
71
+ names: [Ivy, Amelia, Emily, Alice, Hannah, Eleanor, Daisy, Phoebe, Evie, Florence, Ellie, Poppy, Rosie, Anne, Harriet, Jane, Rebecca, Mary, Alice, Elizabeth, Abigail, Holly, Lucy, Nancy]
72
+ France:
73
+ weight: 4
74
+ hair: {blonde: 15, red: 10, brown: 35, black: 35, other: 5}
75
+ names: [Camille, Chloé, Amélie, Celine, Anais, Dominique, Elise, Genevieve, Colette, Delphine, Jeanne, Sylvie, Margot, Eloise, Cecile, Inès, Lucie, Léonie]
76
+ Switzerland:
77
+ weight: 3
78
+ hair: {blonde: 15, red: 10, brown: 35, black: 35, other: 5}
79
+ names: [Laura, Alina, Mila, Lena, Lina, Alessia, Chiara, Andrea, Jana, Malia, Elea, Liana, Lara, Lia, Livia, Melina]
80
+ Austria:
81
+ weight: 3
82
+ names: [Johanna, Lena, Alina, Lea, Valentina, Amélie, Sophie, Magdalena, Marlene, Daniela, Hanna, Leni, Lara, Paulina, Frieda, Mathea, Liana]
83
+ Hungary:
84
+ weight: 3
85
+ names: [Lili, Aliz, Bianka, Lilla, Dorina, Dora, Mira, Zita, Emese, Izabella, Liza, Regina, Vivien]
86
+ Slovakia:
87
+ weight: 3
88
+ names: [Ema, Nina, Eliška, Viktória, Natália, Nela, Diana, Tamara, Júlia, Karolína, Michaela, Rebeka, Dominika, Lenka, Betka]
89
+ Croatia:
90
+ weight: 3
91
+ names: [Elena, Nika, Marta, Marija, Andrea, Ines, Adrijana, Mirjana, Lucija, Petra, Rita, Iva, Mila, Leona, Tena, Franka, Katja, Maša, Nikol, Bruna]
92
+ Serbia:
93
+ weight: 3
94
+ names: [Natalija, Ljubica, Milena, Marija, Adrijana, Mirjana, Jovana, Aleksa, Anja, Gordana, Tijana, Marina, Iskra, Petra]
95
+ Portugal:
96
+ weight: 3
97
+ names: [Leonor, Matilde, Beatriz, Carolina, Mariana, Ana, Sofia, Francisca, Inês, Margarida, Benedita, Madalena, Joana, Camila]
98
+ Spain:
99
+ weight: 4
100
+ hair: {blonde: 10, red: 5, brown: 30, black: 50, other: 5}
101
+ names: [Ana, Sofia, Isabella, Lucia, Elena, Carmen, Lola, Mariana, Gabriela, Daniela, Adriana, Valeria, Esmeralda, Juana, Natalia]
102
+ Italy:
103
+ weight: 4
104
+ hair: {blonde: 10, red: 5, brown: 30, black: 50, other: 5}
105
+ names: [Francesca, Chiara, Giulia, Aurora, Bianca, Alessia, Andrea, Caterina, Lucia, Gaia, Isabella, Giovanna, Elisa, Gabriella, Valentina, Viola]
106
+ Greece:
107
+ weight: 3
108
+ hair: {blonde: 10, red: 5, brown: 30, black: 50, other: 5}
109
+ names: [Eleni, Katerina, Vasiliki, Sophia, Angeliki, Georgia, Dimitra, Konstandina, Paraskevi, Anastasia, Joanna, Danae, Athina, Melina, Ioanna]
110
+ Romania:
111
+ weight: 3
112
+ names: [Antonia, Andreea, Ioana, Alexandra, Luminita, Alina, Bianca, Iolanda, Lavinia, Daniela, Stefana, Doina, Simona, Iulian, Claudia, Angelika]
113
+ Turkey:
114
+ weight: 4
115
+ names: [Elif, Ayşe, Asli, Azra, Aysun, Emel, Dilara, Derya, Afet, Alara, Beyza, Damla, Meryem, Ceren, Nuray, Miray, Ayten, Esra, Eymen, Caria, Esma]
116
+ Ukraine:
117
+ weight: 4
118
+ names: [Aleksandra, Daryna, Olena, Lavra, Ivanna, Anastasia, Bohdana, Maryska, Ionna, Alisa, Nyura, Alina, Aneta, Myroslava, Marynia, Galyna, Nastunye, Nastasiya]
119
+ Belarus:
120
+ weight: 3
121
+ names: [Alina, Elena, Ekaterina, Anya, Faina, Darya, Svetlana, Aleksandra, Natasha, Arina, Alisa, Natalia]
122
+ Russia:
123
+ weight: 5
124
+ names: [Anastasia, Irina, Galina, Anya, Alina, Svetlana, Mila, Ekaterina, Angelina, Oksana, Olga, Nikita, Natasha, Arina, Kira, Sasha, Alisa, Nadia, Alla, Darya]
125
+ Anglosphere:
126
+ England:
127
+ weight: 4
128
+ names: [Ivy, Amelia, Emily, Alice, Hannah, Eleanor, Daisy, Phoebe, Evie, Florence, Ellie, Poppy, Rosie, Anne, Harriet, Jane, Rebecca, Mary, Alice, Elizabeth, Abigail, Holly, Lucy, Nancy]
129
+ USA:
130
+ weight: 5
131
+ names: [Charlotte, Amelia, Emma, Abigail, Alice, Mia, Eleanor, Sarah, Evelyn, Olivia, Grace, Sophia, Emily, Zuri, Destiny, Amari, Nia, Michelle, Brianna, Ebony,
132
+ Eshe, Candice, Tiana, Toni, Jasmine, Simone, Tiffany, Nala, Camila, Sofia, Elena, Valentina, Natalia, Lucia, Alaia, Ximena, Valeria, Catalina]
133
+ Canada:
134
+ weight: 3
135
+ names: [Emily, Amelia, Sarah, Mila, Ellie, Mackenzie, Claire, Lily, Zoey, Evelyn, Avery, Abigail, Madison, Madeline, Everly, Ivy, Hailey, Addison]
136
+ Australia:
137
+ weight: 3
138
+ names: [Charlotte, Amelia, Ava, Sienna, Matilda, Evie, Zara, Mackenzie, Aria, Violet, Florence, Ivy, Elsie, Layla, Frankie, Daisy, Sadie, Stella, Millie]
139
+ New Zealand:
140
+ weight: 1
141
+ names: [Lilly, Lainie, Heide, Emme, Opal, Lacey, Livvie, Adalie, Sidonie, Dunja, Gitta, Kiani, Kenia, Rubia, Ophélie, Zäzilie, Zina, Marli, Meya]
142
+ MENA:
143
+ Israel:
144
+ weight: 2
145
+ hair: {blonde: 15, red: 10, brown: 35, black: 35, other: 5}
146
+ names: [Ariel, Leah, Rachel, Esther, Naomi, Talia, Shira, Yael, Lia, Aviva, Moriah, Delilah, Romi]
147
+ Lebanon:
148
+ weight: 2
149
+ names: [Nour, Aya, Alaa, Souad, Reewa, Ilham, Salwa, Sawsane, Fatme, Dina, Elza, Ghofran, Farah]
150
+ Iran:
151
+ weight: 4
152
+ names: [Bahar, Fatemeh, Anahita, Fariba, Parisa, Shirin, Darya, Zahra, Maryam, Yasmin, Rana, Mahsa, Nasrin, Cyra, Haleh]
153
+ Syria:
154
+ weight: 2
155
+ names: [Amena, Reem, Qamar, Haya, Ousa, Rosarita, Aischa, Hala, Uri, Rojda, Layal, Ranim, Souzan]
156
+ Saudi Arabia:
157
+ weight: 3
158
+ names: [Fatima, Jamila, Amira, Hanifa, Farida, Halima, Farah, Malika, Nabila, Habiba, Maha, Karima, Khalida, Maryam, Tahira, Ghada, Nadia, Naila]
159
+ Egypt:
160
+ weight: 4
161
+ names: [Jana, Amunet, Mariam, Salma, Jamila, Aya, Anippe, Halima, Jomana, Isis, Renenutet, Farida, Dalia, Dendera, Eshe, Amal, Yasmin]
162
+ Algeria:
163
+ weight: 3
164
+ names: [Yasmine, Amina, Inčs, Amel, Meriem, Rania, Asma, Nesrine, Mina, Feriel, Anais, Ikram, Camelia, Chiraz, Dounia, Samira]
165
+ Morocco:
166
+ weight: 3
167
+ names: [Amina, Fatima, Salma, Aziza, Faiza, Hajar, Ikram, Farah, Naima, Layla, Adil, Amal, Amira, Jamila, Marwa, Inès, Rania, Zineb]
168
+ Sub-Saharan Africa:
169
+ Nigeria:
170
+ weight: 5
171
+ names: [Tami, Remi, Sade, Ima, Toya, Tobi, Tomi, Tari, Ajah, Tona, Nneka, Dola, Chioma, Chimere, Zelie, Tonna, Timi, Amarachi, Chiamaka, Adanna, Chinyere]
172
+ Ethiopia:
173
+ weight: 4
174
+ names: [Kayla, Aisha, Amara, Ayana, Zena, Sheba, Amira, Gabra, Jahzara, Makda, Nyala]
175
+ Congo:
176
+ weight: 4
177
+ names: [Belvie, Mado, Rosine, Ruth, Malicia, Exaucée, Keicha, Grasnie, Benie, Chinelvie, Fiavina, Mireille, Aminata, Dayana, Peniel, Taime]
178
+ Tanzania:
179
+ weight: 3
180
+ names: [Mila, Nia, Lulu, Malia, Imani, Asha, Zahra, Aida, Asma, Kali, Lela, Habiba, Rabia, Tisha, Shani, Khadija, Naima]
181
+ South Africa:
182
+ weight: 3
183
+ hair: {blonde: 10, red: 5, brown: 30, black: 50, other: 5}
184
+ names: [Melokuhle, Omphile, Iminathi, Lisakhanya, Lethabo, Amahle, Lesedi, Rethabile, Christal, Danelle, Alida, Marli, Gerrie, Mariette, Aletta, Leane, Lizanne, Lindie, Elize]
185
+ Kenya:
186
+ weight: 3
187
+ names: [Mila, Imani, Cora, Nia, Kamaria, Nadia, Mumbi, Lulu, Zahra, Lela, Habiba, Shani, Khadija, Naima, Aluna, Kamaria, Zakiya]
188
+ Uganda:
189
+ weight: 3
190
+ names: [Florence, Joan, Sarah, Alice, Tracy, Evelyn, Hanifah, Mariam, Charity, Martha, Angel, Catherine, Patricia, Blessing, Patience, Diana, Ritah]
191
+ Sudan:
192
+ weight: 3
193
+ names: [Roaa, Ruba, Abrar, Felicia, Akong, Doaa, Esraa, Rayan, Nancy, Anila, Kate, Naelia]
194
+ Angola:
195
+ weight: 2
196
+ names: [Ooli, Samara, Hannah, Jazmine, Leila, Hiki, Miranda, Rosine, Cynthia, Nattat, Lara, Christina, Abigail]
197
+ Mozambique:
198
+ weight: 2
199
+ names: [Gaia, Gabrielle, Chelsea, Leila, Hedda, Hyacinta, Xiluva]
200
+ Ghana:
201
+ weight: 2
202
+ names: [Beatrice, Sandra, Priscilla, Abigail, Angela, Esther, Barbara, Alberta, Doris, Linda, Cecilia, Blessing, Maame, Ruth, Patricia, Nancy, Eunice, Janet]
203
+ Madagascar:
204
+ weight: 2
205
+ names: [Miora, Stephanie, Aina, Rotsy, Domoina, Cynthia, Raissa, Erica, Aliciah, Lili, Miantsa, Christelle, Linah, Tsiky, Hanitra]
206
+ Cameroon:
207
+ weight: 2
208
+ names: [Joaddan, Armelle, Sandra, Esther, Daniella, Tracy, Rosy, Mylléna, Jade, Carine, Maeva, Tatiana]
209
+ Latin America:
210
+ Brazil:
211
+ weight: 5
212
+ names: [Maria, Fernanda, Ana, Gabriela, Aline, Francisca, Beatriz, Julia, Bruna, Antonia, Clara, Helena, Marcia, Flavia, Juliana, Patricia, Vitoria, Bianca]
213
+ Mexico:
214
+ weight: 4
215
+ names: [Juana, Gabriela, Isabella, Elena, Alejandra, Lola, Ana, Lucia, Rosa, Veronica, Daniela, Fernanda, Valeria, Valentina, Carmen, Catalina, Leticia, Francisca, Mariana, Luna]
216
+ Colombia:
217
+ weight: 3
218
+ names: [Isabella, Ana, Daniela, Mariana, Gabriela, Valentina, Luciana, Carmen, Adriana, Guadalupe, Antonia, Natalia, Lucia, Bianca, Clara, Lina, Rosa, Lola]
219
+ Argentina:
220
+ weight: 3
221
+ names: [Camila, Agustina, Paula, Valentina, Ana, Julieta, Micaela, Martina, Daiana, Natalia, Carolina, Noelia, Lucia, Juana]
222
+ Peru:
223
+ weight: 3
224
+ names: [Agata, Alejandra, Antonella, Arsenia, Beatriz, Claudia, Daniela, Dominga, Evita, Guadalupe, Irene, Karmina, Manuela, Maritza, Ofelia, Pilar, Roberta, Teresa, Valentina, Xaviera, Ximena]
225
+ Venezuela:
226
+ weight: 3
227
+ names: [Gioia, Aymar, Consuelo, Mariángela, Gabriela, Vanessa, Selene, Susana, Paola, Federica, Mariam, Claudia, Carolina, Ligia]
228
+ Chile:
229
+ weight: 2
230
+ names: [Isabella, Valentina, Laura, Catalina, Fernanda, Mila, Florencia, Julieta, Gabriela, Isidora, Martina, Josefa, Daniela, Antonia, Constanza, Agustina, Lucia]
231
+ Ecuador:
232
+ weight: 2
233
+ names: [Ana, Gabriela, Maria, Camila, Carla, Elena, Graciela, Lucia, Giselle, Andrea, Daniela, Viviana, Juliana]
234
+ Guatemala:
235
+ weight: 2
236
+ names: [Michelle, Andrea, Esmeralda, Rosa, Luz, Belen, Camila, Isabela, Angelita]
237
+ Cuba:
238
+ weight: 1
239
+ names: [Adelgonda, Agata, Alegria, Alejandra, Angela, Arsenia, Beatriz, Bertalina, Daniela, Dominga, Evita, Georgina, Guadalupe, Havana, Ivelisse, Karmina, Manuela, Maritza, Pabla, Pilar, Roberta, Teresa, Ximena, Yamile]
240
+ East Asia:
241
+ China:
242
+ weight: 5
243
+ names: [Huan, Ling, Feng, Bao, Jie, Ping, Mei, Liu, Xia, Lian, Chun, Chang, Qing, Xiang, Xue, Qiao, Lei, Caihong, Ying, Jiayi, Lanying]
244
+ Mongolia:
245
+ weight: 1
246
+ names: [Sunjidmaa, Minjinsor, Darimaa, Solongo, Yanjmaa, Ariunzaya, Khongorzul, Gankhuyagiin, Davaademberel, Batkhuyagiin, Batchimeg, Yanjaa, Ankhmaa, Namgar, Nominjin]
247
+ South-Korea:
248
+ weight: 2
249
+ names: [Seo-yeon, Min-seo, Seo-hyeon, Su-bin, Yoo-jin, Min-ji, Seo-yeong, Ji-won, Soo-min, Ye-won, Mi-sun, Hyo-Sonn, Sun-Hee]
250
+ Taiwan:
251
+ weight: 1
252
+ names: [Shu-fen, Shu-hui, Mei-ling, Ya-ting, Mei-hua, Li-hua, Shu-chen, Yi-chun, Tzu-hui, Xian-cha, Ching-min, Chun-yi, Jiau-hua, Ming-zhu, Pei-chien, Pei-yu, Shu-Fang, Xiu-xi]
253
+ Hong Kong:
254
+ weight: 1
255
+ names: [Ellie, Avery, Charlotte, Renee, Cheryl, Evelyn, Hailey, Cynthia, Sylvia, Ally, Hannah, Chloe, Daphne, Aria, Riley, Emily, Mandy, Amber, Catherine, Natalie, Lily, Michelle]
256
+ Japan:
257
+ weight: 2
258
+ names: [Aoi, Akari, Sakura, Ayaka, Akira, Aki, Hinata, Akane, Hikari, Keiko, Kei, Chiyo, Kaede, Ayame, Hanako, Ayako, Akiko, Chika, Ayumi, Yuki]
259
+ Indonesia:
260
+ weight: 2
261
+ names: [Aulia, Budiwati, Bulan, Citra, Gendis, Indri, Kadek, Kemala, Komang, Lestari, Maharani, Melati, Mawar, Mayang, Ningrum, Ningsih, Permata, Pertiwi, Puspita, Putri, Ratih, Sinta, Siti, Wati, Yanti]
262
+ Philippines:
263
+ weight: 2
264
+ names: [Joyce, Cindy, Ruby, Michelle, Serena, Jewel, Nancy, Alodia, Bianca, Chynna, Coleen, Diane, Isabel, Janine, Mariel, Nina, Rachel, Valerie]
265
+ Vietnam:
266
+ weight: 2
267
+ names: [Anh, Bao, Chau, Chinh, Chi, Dao, Duong, Giang, Hoa, Huong, Khanh, Khuyen, Linh, Loan, Minh, Mai, Ngan, Nhung, Nguyet, Phuong, Quyen, Suong, Thanh, Thuy, Trinh, Vinh, Xinh, Xuyen]
268
+ Thailand:
269
+ weight: 2
270
+ names: [Dara, Kamala, Mali, Han, Chariya, Arich, Anchali, Chanthira, Mani, Rune, Kanya, Khajee, Naiyana, Pakpao, Ying, Chalermwan, Chantana, Hansa, Maliwan]
271
+ South Asia:
272
+ India:
273
+ weight: 5
274
+ names: [Saanvi, Anika, Aditi, Prisha, Aarna, Divya, Parvati, Jiya, Riya, Aaradhya, Myra, Isha, Siya, Anaya, Anaisha, Jaya, Aarya, Ahana, Zara, Gayatri, Aarushi, Aarvi, Adya, Damini]
275
+ Pakistan:
276
+ weight: 2
277
+ names: [Fatima, Inaya, Haniya, Maira, Zainab, Aafreen, Zoya, Neha, Aabish, Aaeedah, Eman, Aaminah, Aalia, Aamaal, Aamira, Aadila, Anabia, Anam, Sana]
278
+ Bangladesh:
279
+ weight: 2
280
+ names: [Barsha, Farhan, Megh, Chandni, Rifah, Hridi, Labiba, Rafia, Disha, Muhaiminul, Abida, Oporajita, Sharmin, Fariha, Nawrin, Samreen, Anamika, Rafsan, Meghbalika, Sanjana, Ranya]
281
+ Nepal:
282
+ weight: 1
283
+ names: [Dhanvi, Ditya, Gamya, Ehani, Chirasmi, Fatehjit, Binsa, Chaha, Bhavisana, Bilhana, Shanoli, Chirashree, Bhavaroopa, Kasmitha, Kopisha, Feba, Feshikha, Chaarumathi, Eenakshi, Chantin, Chimini, Baijanthi]
284
+ Sri Lanka:
285
+ weight: 1
286
+ names: [Hiruni, Tharushi, Sachini, Bhagya, Nethmi, Janani, Sanduni, Thilini, Rashmi, Samadhi, Harshani, Upeksha, Nipuni, Dinithi, Chathu, Senuri, Hansani, Chalani, Shehara, Gayani]
extensions/CloneCleaner/scripts/__pycache__/clonecleaner.cpython-310.pyc ADDED
Binary file (8.68 kB). View file
 
extensions/CloneCleaner/scripts/clonecleaner.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import random
4
+ import sys
5
+ import yaml
6
+
7
+ from modules import scripts, script_callbacks, shared, paths
8
+ from modules.processing import Processed
9
+ from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton
10
+ from modules.ui import random_symbol, reuse_symbol, gr_show
11
+ from modules.generation_parameters_copypaste import parse_generation_parameters
12
+ from pprint import pprint
13
+
14
+ def read_yaml():
15
+ promptfile = os.path.join(scripts.basedir(), "prompt_tree.yml")
16
+ with open(promptfile, "r", encoding="utf8") as stream:
17
+ prompt_tree = yaml.safe_load(stream)
18
+ return prompt_tree
19
+
20
+ def get_last_params(declone_seed, gallery_index):
21
+ filename = os.path.join(paths.data_path, "params.txt")
22
+ if os.path.exists(filename):
23
+ with open(filename, "r", encoding="utf8") as file:
24
+ prompt = file.read()
25
+
26
+ if gallery_index > 0:
27
+ gallery_index -= 1
28
+ params = parse_generation_parameters(prompt)
29
+ if params.get("CC_use_main_seed", "") == "True":
30
+ return [int(float(params.get("Seed", "-0.0"))) + gallery_index, gr_show(False)]
31
+ else:
32
+ return [int(float(params.get("CC_declone_seed", "-0.0"))) + gallery_index, gr_show(False)]
33
+
34
+ def sorted_difference(a, b):
35
+ newlist = list(set(a).difference(b))
36
+ newlist.sort()
37
+ return newlist
38
+
39
+ class CloneCleanerScript(scripts.Script):
40
+ prompt_tree = read_yaml() # maybe make this an instance property later
41
+
42
+ def title(self):
43
+ return "CloneCleaner"
44
+
45
+ # show menu in either txt2img or img2img
46
+ def show(self, is_img2img):
47
+ return scripts.AlwaysVisible
48
+
49
+ def ui(self, is_img2img):
50
+ with gr.Accordion("CloneCleaner (beta!)", open=True):
51
+ dummy_component = gr.Label(visible=False)
52
+ regions = self.prompt_tree["country"].keys()
53
+ hairlength = self.prompt_tree["hair"]["length"].keys()
54
+ haircolor = self.prompt_tree["hair"]["color"].keys()
55
+ with FormRow():
56
+ with FormColumn(min_width=160):
57
+ is_enabled = gr.Checkbox(value=True, label="Enable CloneCleaner")
58
+ with FormColumn(elem_id="CloneCleaner_gender"):
59
+ gender = gr.Radio(["female", "male", "generic"], value="female", label="Male & generic not yet implemented.", elem_classes="ghosted")
60
+ gender.style(container=False, item_container=False)
61
+ with FormRow(elem_id="CloneCleaner_components"):
62
+ components = ["name", "country", "hair length", "hair style", "hair color"]
63
+ use_components = gr.CheckboxGroup(components, label="Use declone components", value=components)
64
+ with FormRow(elem_id="CloneCleaner_midsection"):
65
+ with FormGroup():
66
+ insert_start = gr.Checkbox(value=True, label="Put declone tokens at beginning of prompt")
67
+ declone_weight = gr.Slider(minimum=0.0, maximum=2.0, step=0.05, value=1.0, label="Weight of declone tokens", elem_id="CloneCleaner_slider")
68
+ with FormGroup():
69
+ use_main_seed = gr.Checkbox(value=True, label="Use main image seed for decloning")
70
+ with FormRow(variant="compact", elem_id="CloneCleaner_seed_row", elem_classes="ghosted"):
71
+ declone_seed = gr.Number(label='Declone seed', value=-1, elem_id="CloneCleaner_seed")
72
+ declone_seed.style(container=False)
73
+ random_seed = ToolButton(random_symbol, elem_id="CloneCleaner_random_seed", label='Random seed')
74
+ reuse_seed = ToolButton(reuse_symbol, elem_id="CloneCleaner_reuse_seed", label='Reuse seed')
75
+ with FormRow(elem_id="CloneCleaner_exclude_row") as exclude_row:
76
+ exclude_regions = gr.Dropdown(choices=regions, label="Exclude regions", multiselect=True)
77
+ exclude_hairlength = gr.Dropdown(choices=hairlength, label="Exclude hair lengths", multiselect=True)
78
+ exclude_haircolor = gr.Dropdown(choices=haircolor, label="Exclude hair colors", multiselect=True)
79
+
80
+ jstoggle = "() => {document.getElementById('CloneCleaner_seed_row').classList.toggle('ghosted')}"
81
+ jsclickseed = "() => {setRandomSeed('CloneCleaner_seed')}"
82
+ jsgetgalleryindex = "(x, y) => [x, selected_gallery_index()]"
83
+ other_jstoggles = "() => {" + \
84
+ "const labels = document.getElementById('CloneCleaner_components').getElementsByTagName('label');" + \
85
+ "const excludelabels = document.getElementById('CloneCleaner_exclude_row').getElementsByTagName('label');" + \
86
+ "excludelabels[1].classList.toggle('ghosted', !labels[2].firstChild.checked);" + \
87
+ "excludelabels[2].classList.toggle('ghosted', !labels[4].firstChild.checked);" + \
88
+ "}"
89
+ use_main_seed.change(fn=None, _js=jstoggle)
90
+ random_seed.click(fn=None, _js=jsclickseed, show_progress=False, inputs=[], outputs=[])
91
+ reuse_seed.click(fn=get_last_params, _js=jsgetgalleryindex, show_progress=False, inputs=[declone_seed, dummy_component], outputs=[declone_seed, dummy_component])
92
+ use_components.change(fn=None, _js=other_jstoggles)
93
+
94
+ def list_from_params_key(key, params):
95
+ regionstring = params.get(key, "")
96
+ regions = regionstring.split(",") if regionstring else []
97
+ return gr.update(value = regions)
98
+
99
+ self.infotext_fields = [
100
+ (is_enabled, "CloneCleaner enabled"),
101
+ (gender, "CC_gender"),
102
+ (insert_start, "CC_insert_start"),
103
+ (declone_weight, "CC_declone_weight"),
104
+ (use_main_seed, "CC_use_main_seed"),
105
+ (declone_seed, "CC_declone_seed"),
106
+ (exclude_regions, lambda params:list_from_params_key("CC_exclude_regions", params)),
107
+ (exclude_hairlength, lambda params:list_from_params_key("CC_exclude_hairlength", params)),
108
+ (exclude_haircolor, lambda params:list_from_params_key("CC_exclude_haircolor", params))
109
+ ]
110
+ return [is_enabled, gender, insert_start, declone_weight, use_main_seed, declone_seed, use_components, exclude_regions, exclude_hairlength, exclude_haircolor]
111
+
112
+ def process(self, p, is_enabled, gender, insert_start, declone_weight, use_main_seed, declone_seed, use_components, exclude_regions, exclude_hairlength, exclude_haircolor):
113
+ if not is_enabled:
114
+ return
115
+
116
+ if use_main_seed:
117
+ declone_seed = p.all_seeds[0]
118
+ elif declone_seed == -1:
119
+ declone_seed = int(random.randrange(4294967294))
120
+ else:
121
+ declone_seed = int(declone_seed)
122
+
123
+ # original_prompt = p.all_prompts[0]
124
+ # settings = f"gender={gender}, beginning={insert_start}, declone_weight={declone_weight}, main_seed={use_main_seed}, " + \
125
+ # f"declone_seed={declone_seed}, exclude_regions={exclude_regions}"
126
+ p.extra_generation_params["CloneCleaner enabled"] = True
127
+ p.extra_generation_params["CC_gender"] = gender
128
+ p.extra_generation_params["CC_insert_start"] = insert_start
129
+ p.extra_generation_params["CC_declone_weight"] = declone_weight
130
+ p.extra_generation_params["CC_use_main_seed"] = use_main_seed
131
+ p.extra_generation_params["CC_declone_seed"] = declone_seed
132
+ if exclude_regions:
133
+ p.extra_generation_params["CC_exclude_regions"] = ",".join(exclude_regions)
134
+ if exclude_hairlength:
135
+ p.extra_generation_params["CC_exclude_hairlength"] = ",".join(exclude_hairlength)
136
+ if exclude_haircolor:
137
+ p.extra_generation_params["CC_exclude_haircolor"] = ",".join(exclude_haircolor)
138
+
139
+ countrytree = self.prompt_tree["country"]
140
+ hairtree = self.prompt_tree["hair"]
141
+
142
+ regions = sorted_difference(countrytree.keys(), exclude_regions)
143
+ hairlengths = sorted_difference(hairtree["length"].keys(), exclude_hairlength)
144
+ haircolors = sorted_difference(hairtree["color"].keys(), exclude_haircolor)
145
+
146
+ use_name = "name" in use_components
147
+ use_country = "country" in use_components
148
+ use_length = "hair length" in use_components
149
+ use_style = "hair style" in use_components
150
+ use_color = "hair color" in use_components
151
+
152
+ for i, prompt in enumerate(p.all_prompts): # for each image in batch
153
+ rng = random.Random()
154
+ seed = p.all_seeds[i] if use_main_seed else declone_seed + i
155
+ rng.seed(seed)
156
+
157
+ region = rng.choice(regions)
158
+ countries = list(countrytree[region].keys())
159
+ countryweights = [countrytree[region][cty]["weight"] for cty in countries]
160
+ country = rng.choices(countries, weights=countryweights)[0]
161
+
162
+ countrydata = countrytree[region][country]
163
+ hairdata = countrydata.get("hair", hairtree["defaultweight"][region])
164
+ maincolor = rng.choices(haircolors, weights=[hairdata[col] for col in haircolors])[0]
165
+ color = rng.choice(hairtree["color"][maincolor])
166
+ mainlength = rng.choice(hairlengths)
167
+ length = rng.choice(hairtree["length"][mainlength])
168
+ style = rng.choice(hairtree["style"][mainlength])
169
+ name = rng.choice(countrydata["names"])
170
+
171
+ inserted_prompt = ""
172
+
173
+ if use_name or use_country:
174
+ inserted_prompt += name if use_name else "person"
175
+ inserted_prompt += " from " + country if use_country else ""
176
+
177
+ if use_length or use_style or use_color:
178
+ if inserted_prompt:
179
+ inserted_prompt += ", "
180
+ if use_length:
181
+ inserted_prompt += length + " "
182
+ if use_style:
183
+ inserted_prompt += style + " "
184
+ if use_color:
185
+ inserted_prompt += color + " "
186
+ inserted_prompt += "hair"
187
+
188
+ if inserted_prompt:
189
+ if declone_weight != 1:
190
+ inserted_prompt = f"({inserted_prompt}:{declone_weight})"
191
+
192
+ if insert_start:
193
+ p.all_prompts[i] = inserted_prompt + ", " + prompt
194
+ else:
195
+ p.all_prompts[i] = prompt + ", " + inserted_prompt
196
+
197
+ # def postprocess_batch(self, p, *args, **kwargs):
198
+ # p.all_prompts[0] = p.prompt # gets saved in file metadata AND in batch file metadata
199
+
200
+ # def process_batch(self, p, *args, **kwargs):
201
+ # p.extra_generation_params["CC_TEST"] = "whatever"
202
+ # p.all_prompts[0] = p.prompt + " SUFFIX"
203
+
204
+ def postprocess(self, p, processed, *args):
205
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
206
+ p.all_prompts[0] = p.prompt
207
+ processed = Processed(p, [], p.seed, "")
208
+ file.write(processed.infotext(p, 0))
209
+
210
+ # read with shared.opts.prompt_database_path
211
+ def on_ui_settings():
212
+ info = shared.OptionInfo("prompt_tree.yml", "CloneCleaner prompt database path", section=("clonecleaner", "CloneCleaner"))
213
+ shared.opts.add_option("prompt_database_path", info)
214
+ # shared.opts.add_option("option1", shared.OptionInfo(
215
+ # False,
216
+ # "option1 description",
217
+ # gr.Checkbox,
218
+ # {"interactive": True},
219
+ # section=('template', "Template"))
220
+ # )
221
+
222
+
223
+ script_callbacks.on_ui_settings(on_ui_settings)
extensions/CloneCleaner/style.css ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .ghosted {
2
+ opacity: 0.5 !important;
3
+ pointer-events: none !important;
4
+ }
5
+
6
+ /* #CloneCleaner_gender {
7
+ padding: 0 0 10px 0 !important;
8
+ }
9
+
10
+ #CloneCleaner_gender > div > label {
11
+ padding: var(--checkbox-label-padding) 0 !important;
12
+ } */
13
+
14
+ #CloneCleaner_components {
15
+ margin-top: -10px;
16
+ }
17
+
18
+ #CloneCleaner_slider {
19
+ width: 280px;
20
+ margin-top: 15px;
21
+ }
22
+
23
+ #CloneCleaner_seed{
24
+ min-width: min(80px,100%);
25
+ }
26
+
27
+ #CloneCleaner_midsection {
28
+ margin-top: 10px;
29
+ }
30
+
31
+ #CloneCleaner_exclude_row {
32
+ margin-top: 10px;
33
+ }
extensions/ComfyUI-AutomaticCFG/.github/workflows/publish.yml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish to Comfy registry
2
+ on:
3
+ workflow_dispatch:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "pyproject.toml"
9
+
10
+ jobs:
11
+ publish-node:
12
+ name: Publish Custom Node to registry
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Check out code
16
+ uses: actions/checkout@v4
17
+ - name: Publish Custom Node
18
+ uses: Comfy-Org/publish-node-action@main
19
+ with:
20
+ ## Add your own personal access token to your Github Repository secrets and reference it here.
21
+ personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
extensions/ComfyUI-AutomaticCFG/README.md ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Abandon this boat and jump on [this one!](https://github.com/Extraltodeus/Skimmed_CFG)
2
+
3
+ If you liked other functionnalities, I've re-created most in [this](https://github.com/Extraltodeus/pre_cfg_comfy_nodes_for_ComfyUI) repository.
4
+
5
+ # In short:
6
+
7
+ My own version "from scratch" of a self-rescaling CFG / anti-burn. It ain't much but it's honest work.
8
+
9
+ No more burns and 160% faster gens with the warp drive node.
10
+
11
+ Now includes custom attention modifiers and interesting presets as well as temperature scaling.
12
+
13
+ Also just tested and it works with pixart sigma.
14
+
15
+ Works with SD3 for as long as you don't use any boost feature / cutting the uncond (it's the same thing). 20 steps works nicely.
16
+
17
+ # Note:
18
+
19
+ The presets are interpreted with eval(). Make sure that you thrust whoever sent a preset to you as it may be used to execute malicious code.
20
+
21
+ # Update:
22
+
23
+ - Removed and perfected "Uncond Zero" node and moved it to it's [own repository](https://github.com/Extraltodeus/Uncond-Zero-for-ComfyUI/tree/main)
24
+ - Removed temperature nodes and set a [repository](https://github.com/Extraltodeus/Stable-Diffusion-temperature-settings) for these
25
+
26
+ # Usage:
27
+
28
+ ![77889aa6-a2f6-48bf-8cde-17c9cbfda5fa](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/c725a06c-8966-43de-ab1c-569e2ff5b151)
29
+
30
+
31
+ ### That's it!
32
+
33
+ - The "boost" toggle will turn off the negative guidance when the sigmas are near 1. This doubles the inference speed.
34
+ - The negative strength lerp the cond and uncond. Now in normal times the way I do this would burn things to the ground. But since it is initialy an anti-burn it just works. This idea is inspired by the [negative prompt weight](https://github.com/muerrilla/stable-diffusion-NPW) repository.
35
+ - I leave the advanced node for those who are interested. It will not be beneficial to those who do not feel like experimenting.
36
+
37
+ For 100 steps this is where the sigma are reaching 1:
38
+
39
+ ![image](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/525199f1-2857-4027-a96e-105bc4b01860)
40
+
41
+ Note: the warp drive node improves the speed a lot more. The average speed is 160% the normal one if used with the AYS scheduler (check the workflow images).
42
+
43
+ There seem to be a slight improvement in quality when using the boost with my other node [CLIP Vector Sculptor text encode](https://github.com/Extraltodeus/Vector_Sculptor_ComfyUI) using the "mean" normalization option.
44
+
45
+ # Just a note:
46
+
47
+ Your CFG won't be your CFG anymore. It is turned into a way to guide the CFG/final intensity/brightness/saturation. So don't hesitate to change your habits while trying!
48
+
49
+ # The rest of the explaination:
50
+
51
+ While this node is connected, this will turn your sampler's CFG scale into something else.
52
+ This methods works by rescaling the CFG at each step by evaluating the potential average min/max values. Aiming at a desired output intensity (by intensity I mean overall brightness/saturation/sharpness).
53
+ The base intensity has been arbitrarily chosen by me and your sampler's CFG scale will make this target vary.
54
+ I have set the "central" CFG at 8. Meaning that at 4 you will aim at half of the desired range while at 16 it will be doubled. This makes it feel slightly like the usual when you're around the normal values.
55
+
56
+ The CFG behavior during the sampling being automatically set for each channel makes it behave differently and therefores gives different outputs than the usual.
57
+ From my observations by printing the results while testing, it seems to be going from around 16 at the beginning, to something like 4 near the middle and ends up near ~7.
58
+ These values might have changed since I've done a thousand tests with different ways but that's to give you an idea, it's just me eyeballing the CLI's output.
59
+
60
+ I use the upper and lower 25% topk mean value as a reference to have some margin of manoeuver.
61
+
62
+ It makes the sampling generate overall better quality images. I get much less if not any artifacts anymore and my more creative prompts also tends to give more random, in a good way, different results.
63
+
64
+ I attribute this more random yet positive behavior to the fact that it seems to be starting high and then since it becomes lower, it self-corrects and improvise, taking advantage of the sampling process a lot more.
65
+
66
+ It is dead simple to use and made sampling more fun from my perspective :)
67
+
68
+ You will find it in the model_patches category.
69
+
70
+ TLDR: set your CFG at 8 to try it. No burned images and artifacts anymore. CFG is also a bit more sensitive because it's a proportion around 8.
71
+
72
+ Low scale like 4 also gives really nice results since your CFG is not the CFG anymore.
73
+
74
+ # Updates:
75
+
76
+ Updated:
77
+ - Up to 28.5% faster generation speed than normal
78
+ - Negative weighting
79
+
80
+ 05.04.24:
81
+
82
+ - Updated to latest ComfyUI version. If you get an error: update your ComfyUI
83
+
84
+ 15.04.24
85
+
86
+ - ~~Added "no uncond" node which completely disable the negative and doubles the speed while rescaling the latent space in the post-cfg function up until the sigmas are at 1 (or really, 6.86%). By itself it is not perfect and I'm searching for solutions to improve the final result. It seems to work better with dpmpp3m_sde/exponential if you're not using anything else. If you are using the PAG node then you don't need to care about the sampler but will generate at a normal speed. Result will be simply different (I personally like them).~~ use the warp drive instead
87
+ - To use the [PAG node](https://github.com/pamparamm/sd-perturbed-attention/tree/master) ~~without the complete slow-down (if using the no-uncond node) or at least take advantage of the boost feature:~~
88
+ ~~- in the "pag_nodes.py" file look for "disable_cfg1_optimization=True"~~
89
+ ~~- set it to "disable_cfg1_optimization=False".~~ This is not necessary anymore because the dev modified it already :)
90
+ - For the negative lerp function in the other nodes the scale has been divided by two. So if you were using it at 10, set it to 5.
91
+
92
+ 16.04.24
93
+
94
+ - Added "uncond_start_percentage" as an experimental feature. This allows to start the guidance later as a way to try [Applying Guidance in a Limited Interval Improves
95
+ Sample and Distribution Quality in Diffusion Models](https://arxiv.org/pdf/2404.07724.pdf). A more accurate implementation [can be found here](https://github.com/ericbeyer/guidance_interval) :)
96
+
97
+ 17.04.24
98
+
99
+ - reworked the advanced node and cleaned up
100
+ - added timing on every options
101
+ - add a post-rescale node which allows to fight deep-frying images a bit more forr some special cases
102
+ - added a tweaked version of the Comfy SAG node with start/end sliders
103
+ - changed start/end sliders, they are related directly to the sigma values and not in percentage anymore. ⚠
104
+
105
+ 01.05.24
106
+
107
+ - Actually working disabled uncond
108
+ - Added "warp drive" preset to test it out simply.
109
+
110
+ 03.05.24
111
+
112
+ - Allows unpatch `turn off the negative` by removing or disconnecting the node.
113
+ - added the "Warp drive" node. It uses a new method of my own cooking which uses the previous step to determin a negative. Cutting the generation time by half for approx 3/4 of the steps.
114
+ - added example workflows with 10-12 steps but of course you can do more steps if needed. It is not a goal to do less steps in general but also to show it is compatible.
115
+
116
+ 14.05.24:
117
+ - fix the little mem leak 😀
118
+ - temporarily disabled the timed SAG node because an update broke it.
119
+ - added node: **preset loader**. Can do what the other can and much more like modify the attention mechanisms! Mostly tested on SDXL 😀!
120
+ - Some presets are slower than others. Just like for the perturbed attention guidance for example. Most are just as fast.
121
+ - About some of the presets:
122
+ - For SD 1.5 "crossed conds customized 3" seems amazing!
123
+ - "Enhanced_details_and_tweaked_attention" works better on creative generations and less on characters.
124
+ - "Reinforced_style" does not regulates the CFG, gives MUCH MORE importance to your negative prompt, works with 12 steps and is slightly slower.
125
+ - "The red riding latent" only works with SDXL. It is an almost nonsensical mix of attention tweaks. Best with 12 steps and really nice with creative prompts. Has the tendency to give more red clothings to the characters. Hence the name.
126
+ - "Excellent_attention" is the default settings for the node described below. Don't delete it or the node won't work. 🙃
127
+ - "Potato Attention Guidance" is really nice for portraits of happy people...
128
+ - There are a bunch of others. I've generated examples which you can find in the example grids folder.
129
+ - Most of these have been tested on SDXL. I have very little idea of the effect on SD 1.5
130
+ - The presets are .json files and can contain a string which will go through eval(). ⚠
131
+ - Always check what is inside before running it when it comes from someone else! I hesitated to share a preset which would plan a shutdown in 60 seconds named "actually shut down the computer in one minute" to let you be aware but that would bother more than it would be helfpul.
132
+ - added node: "**Excellent attention**" developped by myself and based on this [astonishingly easy to understand research paper!](https://github.com/Extraltodeus/temp/blob/main/very_science.jpg) But in short:
133
+ - Just try it. [Do it](https://www.youtube.com/watch?v=ZXsQAXx_ao0).
134
+ - This node allows to disable the input layer 8 on self and cross attention.
135
+ - But also to apply a custom modification on cross attention middle layer 0. The "patch_cond" and "patch_uncond" toggles are about this modification.
136
+ - While the modification is definitely not very ressource costy, the light patch uses less VRAM.
137
+ - The multiplier influences the cross attention and reinforces prompt-following. But like for real. Works better with the "light patch" toggle ON.
138
+ - I have ~~only~~ mostly tested it with SDXL.
139
+ - You can find a grid example of this node's settings in the "grids_example" folder.
140
+ - For some reason the Juggernaut model does not work with it and I have no idea why.
141
+ - Customizable attention modifiers:
142
+ - Check the ["attention_modifiers_explainations"](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/blob/main/workflows/attention_modifiers_explainations.png) in the workflows. 👀 It is basically a tutorial.
143
+ - Experiment what each layer really do by using what is basically a bruteforcing node! (the Attention modifiers tester node)
144
+ - This is how you do a [Perturbed Attention Guidance](https://github.com/Extraltodeus/temp/blob/main/PAG.png) for example
145
+
146
+
147
+
148
+ # Examples
149
+
150
+ ### 10 steps with only 2 having the negative enabled. So ~170% faster. 2.5 seconds on a RTX4070
151
+
152
+ ![03640UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/673cb47a-095f-4ebb-a186-2f6a49ffd2e1)
153
+
154
+ ### cherry-picked 24 steps uncond fully disabled (these images are also workflows):
155
+
156
+
157
+ ![03619UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/19ee6edc-b039-4472-9ec2-c08ea15dd908)
158
+
159
+ ![03621UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/52695e1c-d28e-427f-9109-7ee4e4b3a5f6)
160
+
161
+ ![03604UI_00001_](https://github.com/Extraltodeus/ComfyUI-AutomaticCFG/assets/15731540/ca391b46-f587-43da-98da-a87e4982e4ed)
162
+
163
+
164
+
165
+ # Pro tip:
166
+
167
+ Did you know that my first activity is to write creative model merging functions?
168
+
169
+ While the code is too much of a mess to be shared, I do expose and share my models. You can find them in this [gallery](https://github.com/Extraltodeus/shared_models_galleries)! 😁
170
+
171
+
172
+ -----
173
+
174
+ Thanks to ComfyUI for existing and making such things so simple!
175
+
extensions/ComfyUI-AutomaticCFG/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .nodes import *
2
+ # from .experimental_temperature import ExperimentalTemperaturePatchSDXL,ExperimentalTemperaturePatchSD15,CLIPTemperaturePatch,CLIPTemperaturePatchDual
3
+ # from .nodes_sag_custom import *
4
+
5
+ NODE_CLASS_MAPPINGS = {
6
+ "Automatic CFG": simpleDynamicCFG,
7
+ "Automatic CFG - Negative": simpleDynamicCFGlerpUncond,
8
+ "Automatic CFG - Warp Drive": simpleDynamicCFGwarpDrive,
9
+ "Automatic CFG - Preset Loader": presetLoader,
10
+ "Automatic CFG - Excellent attention": simpleDynamicCFGExcellentattentionPatch,
11
+ "Automatic CFG - Advanced": advancedDynamicCFG,
12
+ "Automatic CFG - Post rescale only": postCFGrescaleOnly,
13
+ "Automatic CFG - Custom attentions": simpleDynamicCFGCustomAttentionPatch,
14
+ "Automatic CFG - Attention modifiers": attentionModifierParametersNode,
15
+ "Automatic CFG - Attention modifiers tester": attentionModifierBruteforceParametersNode,
16
+ "Automatic CFG - Unpatch function": simpleDynamicCFGunpatch,
17
+ # "Zero Uncond CFG - standalone patch (incompatible with the others)":uncondZeroNode,
18
+ # "Temperature settings SDXL": ExperimentalTemperaturePatchSDXL,
19
+ # "Temperature settings SD 1.5": ExperimentalTemperaturePatchSD15,
20
+ # "Temperature settings CLIP": CLIPTemperaturePatch,
21
+ # "Temperature separate settings CLIP SDXL": CLIPTemperaturePatchDual,
22
+ # "SAG delayed activation": SelfAttentionGuidanceCustom,
23
+ }
24
+
25
+ NODE_DISPLAY_NAME_MAPPINGS = {
26
+ "Automatic CFG - Unpatch function": "Automatic CFG - Unpatch function(Deprecated)",
27
+ }
extensions/ComfyUI-AutomaticCFG/experimental_temperature.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn, einsum
3
+ from einops import rearrange, repeat
4
+ import torch.nn.functional as F
5
+ import math
6
+ from comfy import model_management
7
+ import types
8
+ import os
9
+
10
+ def exists(val):
11
+ return val is not None
12
+
13
+ # better than a division by 0 hey
14
+ abs_mean = lambda x: torch.where(torch.isnan(x) | torch.isinf(x), torch.zeros_like(x), x).abs().mean()
15
+
16
+ class temperature_patcher():
17
+ def __init__(self, temperature, layer_name="None"):
18
+ self.temperature = temperature
19
+ self.layer_name = layer_name
20
+
21
+ # taken from comfy.ldm.modules
22
+ def attention_basic_with_temperature(self, q, k, v, extra_options, mask=None, attn_precision=None):
23
+ if isinstance(extra_options, int):
24
+ heads = extra_options
25
+ else:
26
+ heads = extra_options['n_heads']
27
+
28
+ b, _, dim_head = q.shape
29
+ dim_head //= heads
30
+ scale = dim_head ** -0.5
31
+
32
+ h = heads
33
+ q, k, v = map(
34
+ lambda t: t.unsqueeze(3)
35
+ .reshape(b, -1, heads, dim_head)
36
+ .permute(0, 2, 1, 3)
37
+ .reshape(b * heads, -1, dim_head)
38
+ .contiguous(),
39
+ (q, k, v),
40
+ )
41
+
42
+ # force cast to fp32 to avoid overflowing
43
+ if attn_precision == torch.float32:
44
+ sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
45
+ else:
46
+ sim = einsum('b i d, b j d -> b i j', q, k) * scale
47
+
48
+ del q, k
49
+
50
+ if exists(mask):
51
+ if mask.dtype == torch.bool:
52
+ mask = rearrange(mask, 'b ... -> b (...)')
53
+ max_neg_value = -torch.finfo(sim.dtype).max
54
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
55
+ sim.masked_fill_(~mask, max_neg_value)
56
+ else:
57
+ if len(mask.shape) == 2:
58
+ bs = 1
59
+ else:
60
+ bs = mask.shape[0]
61
+ mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
62
+ sim.add_(mask)
63
+
64
+ # attention, what we cannot get enough of
65
+ sim = sim.div(self.temperature if self.temperature > 0 else abs_mean(sim)).softmax(dim=-1)
66
+
67
+ out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
68
+ out = (
69
+ out.unsqueeze(0)
70
+ .reshape(b, heads, -1, dim_head)
71
+ .permute(0, 2, 1, 3)
72
+ .reshape(b, -1, heads * dim_head)
73
+ )
74
+ return out
75
+
76
+ layers_SD15 = {
77
+ "input":[1,2,4,5,7,8],
78
+ "middle":[0],
79
+ "output":[3,4,5,6,7,8,9,10,11],
80
+ }
81
+
82
+ layers_SDXL = {
83
+ "input":[4,5,7,8],
84
+ "middle":[0],
85
+ "output":[0,1,2,3,4,5],
86
+ }
87
+
88
+ class ExperimentalTemperaturePatch:
89
+ @classmethod
90
+ def INPUT_TYPES(s):
91
+ required_inputs = {f"{key}_{layer}": ("BOOLEAN", {"default": False}) for key, layers in s.TOGGLES.items() for layer in layers}
92
+ required_inputs["model"] = ("MODEL",)
93
+ required_inputs["Temperature"] = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": 0.01})
94
+ required_inputs["Attention"] = (["both","self","cross"],)
95
+ return {"required": required_inputs}
96
+
97
+ TOGGLES = {}
98
+ RETURN_TYPES = ("MODEL","STRING",)
99
+ RETURN_NAMES = ("Model","String",)
100
+ FUNCTION = "patch"
101
+
102
+ CATEGORY = "model_patches/Automatic_CFG/Standalone_temperature_patches"
103
+
104
+ def patch(self, model, Temperature, Attention, **kwargs):
105
+ m = model.clone()
106
+ levels = ["input","middle","output"]
107
+ parameters_output = {level:[] for level in levels}
108
+ for key, toggle_enabled in kwargs.items():
109
+ current_level = key.split("_")[0]
110
+ if current_level in levels and toggle_enabled:
111
+ b_number = int(key.split("_")[1])
112
+ parameters_output[current_level].append(b_number)
113
+ patcher = temperature_patcher(Temperature,key)
114
+
115
+ if Attention in ["both","self"]:
116
+ m.set_model_attn1_replace(patcher.attention_basic_with_temperature, current_level, b_number)
117
+ if Attention in ["both","cross"]:
118
+ m.set_model_attn2_replace(patcher.attention_basic_with_temperature, current_level, b_number)
119
+
120
+ parameters_as_string = "\n".join(f"{k}: {','.join(map(str, v))}" for k, v in parameters_output.items())
121
+ parameters_as_string = f"Temperature: {Temperature}\n{parameters_as_string}\nAttention: {Attention}"
122
+ return (m, parameters_as_string,)
123
+
124
+ ExperimentalTemperaturePatchSDXL = type("ExperimentalTemperaturePatch_SDXL", (ExperimentalTemperaturePatch,), {"TOGGLES": layers_SDXL})
125
+ ExperimentalTemperaturePatchSD15 = type("ExperimentalTemperaturePatch_SD15", (ExperimentalTemperaturePatch,), {"TOGGLES": layers_SD15})
126
+
127
+ class CLIPTemperaturePatch:
128
+ @classmethod
129
+ def INPUT_TYPES(cls):
130
+ return {"required": { "clip": ("CLIP",),
131
+ "Temperature": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
132
+ }}
133
+
134
+ RETURN_TYPES = ("CLIP",)
135
+ FUNCTION = "patch"
136
+ CATEGORY = "model_patches/Automatic_CFG/Standalone_temperature_patches"
137
+
138
+ def patch(self, clip, Temperature):
139
+ def custom_optimized_attention(device, mask=None, small_input=True):
140
+ return temperature_patcher(Temperature).attention_basic_with_temperature
141
+
142
+ def new_forward(self, x, mask=None, intermediate_output=None):
143
+ optimized_attention = custom_optimized_attention(x.device, mask=mask is not None, small_input=True)
144
+
145
+ if intermediate_output is not None:
146
+ if intermediate_output < 0:
147
+ intermediate_output = len(self.layers) + intermediate_output
148
+
149
+ intermediate = None
150
+ for i, l in enumerate(self.layers):
151
+ x = l(x, mask, optimized_attention)
152
+ if i == intermediate_output:
153
+ intermediate = x.clone()
154
+ return x, intermediate
155
+
156
+ m = clip.clone()
157
+
158
+ clip_encoder_instance = m.cond_stage_model.clip_l.transformer.text_model.encoder
159
+ clip_encoder_instance.forward = types.MethodType(new_forward, clip_encoder_instance)
160
+
161
+ if getattr(m.cond_stage_model, f"clip_g", None) is not None:
162
+ clip_encoder_instance_g = m.cond_stage_model.clip_g.transformer.text_model.encoder
163
+ clip_encoder_instance_g.forward = types.MethodType(new_forward, clip_encoder_instance_g)
164
+
165
+ return (m,)
166
+
167
+ class CLIPTemperaturePatchDual:
168
+ @classmethod
169
+ def INPUT_TYPES(cls):
170
+ return {"required": { "clip": ("CLIP",),
171
+ "Temperature": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
172
+ "CLIP_Model": (["clip_g","clip_l","both"],),
173
+ }}
174
+
175
+ RETURN_TYPES = ("CLIP",)
176
+ FUNCTION = "patch"
177
+ CATEGORY = "model_patches/Automatic_CFG/Standalone_temperature_patches"
178
+
179
+ def patch(self, clip, Temperature, CLIP_Model):
180
+ def custom_optimized_attention(device, mask=None, small_input=True):
181
+ return temperature_patcher(Temperature, "CLIP").attention_basic_with_temperature
182
+
183
+ def new_forward(self, x, mask=None, intermediate_output=None):
184
+ optimized_attention = custom_optimized_attention(x.device, mask=mask is not None, small_input=True)
185
+
186
+ if intermediate_output is not None:
187
+ if intermediate_output < 0:
188
+ intermediate_output = len(self.layers) + intermediate_output
189
+
190
+ intermediate = None
191
+ for i, l in enumerate(self.layers):
192
+ x = l(x, mask, optimized_attention)
193
+ if i == intermediate_output:
194
+ intermediate = x.clone()
195
+ return x, intermediate
196
+
197
+ m = clip.clone()
198
+
199
+ if CLIP_Model in ["clip_l","both"]:
200
+ clip_encoder_instance = m.cond_stage_model.clip_l.transformer.text_model.encoder
201
+ clip_encoder_instance.forward = types.MethodType(new_forward, clip_encoder_instance)
202
+
203
+ if CLIP_Model in ["clip_g","both"]:
204
+ if getattr(m.cond_stage_model, f"clip_g", None) is not None:
205
+ clip_encoder_instance_g = m.cond_stage_model.clip_g.transformer.text_model.encoder
206
+ clip_encoder_instance_g.forward = types.MethodType(new_forward, clip_encoder_instance_g)
207
+
208
+ return (m,)
extensions/ComfyUI-AutomaticCFG/grids_example/Enhanced_details_and_tweaked_attention.png ADDED

Git LFS Details

  • SHA256: 23dbd409ff9526382892b0395c8889bce4afb4f16b2df8a4918abfcb375b2f3a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.32 MB
extensions/ComfyUI-AutomaticCFG/grids_example/Iris_Lux_v1051_base_image_vanilla_sampling.png ADDED

Git LFS Details

  • SHA256: f36c78b64e07f85975e9dcb57e6a17787a3c95ffddc89b393c01c70864ed95e4
  • Pointer size: 132 Bytes
  • Size of remote file: 1.16 MB
extensions/ComfyUI-AutomaticCFG/grids_example/excellent_patch_a.jpg ADDED

Git LFS Details

  • SHA256: c54fad2902563d00fc5f8529a22bff7e85f82f787578cfb787b729e4f64040e3
  • Pointer size: 132 Bytes
  • Size of remote file: 1.66 MB
extensions/ComfyUI-AutomaticCFG/grids_example/excellent_patch_b.jpg ADDED

Git LFS Details

  • SHA256: 317817674e6b2a8754c953a1adc403cc43793098e8316367b7e9db66653034ee
  • Pointer size: 132 Bytes
  • Size of remote file: 1.61 MB
extensions/ComfyUI-AutomaticCFG/grids_example/presets.jpg ADDED

Git LFS Details

  • SHA256: 9afb639bf225151a4c4c9dc5a349b83c56d21fc88a43303e37b611579680f125
  • Pointer size: 132 Bytes
  • Size of remote file: 1.42 MB
extensions/ComfyUI-AutomaticCFG/nodes.py ADDED
@@ -0,0 +1,1286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from torch.nn import Upsample
4
+ import comfy.model_management as model_management
5
+ from comfy.model_patcher import set_model_options_patch_replace
6
+ from comfy.ldm.modules.attention import attention_basic, attention_xformers, attention_pytorch, attention_split, attention_sub_quad, optimized_attention_for_device
7
+ from .experimental_temperature import temperature_patcher
8
+ import comfy.samplers
9
+ import comfy.utils
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from colorama import Fore, Style
14
+ import json
15
+ import os
16
+ import random
17
+ import base64
18
+
19
+ original_sampling_function = None
20
+ current_dir = os.path.dirname(os.path.realpath(__file__))
21
+ json_preset_path = os.path.join(current_dir, 'presets')
22
+ attnfunc = optimized_attention_for_device(model_management.get_torch_device())
23
+ check_string = "UEFUUkVPTi50eHQ="
24
+ support_string = b'CgoKClRoYW5rIHlvdSBmb3IgdXNpbmcgbXkgbm9kZXMhCgpJZiB5b3UgZW5qb3kgaXQsIHBsZWFzZSBjb25zaWRlciBzdXBwb3J0aW5nIG1lIG9uIFBhdHJlb24gdG8ga2VlcCB0aGUgbWFnaWMgZ29pbmchCgpWaXNpdDoKCmh0dHBzOi8vd3d3LnBhdHJlb24uY29tL2V4dHJhbHRvZGV1cwoKCgo='
25
+
26
+ def support_function():
27
+ if base64.b64decode(check_string).decode('utf8') not in os.listdir(current_dir):
28
+ print(base64.b64decode(check_string).decode('utf8'))
29
+ print(base64.b64decode(support_string).decode('utf8'))
30
+
31
+ def sampling_function_patched(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None, **kwargs):
32
+
33
+ cond_copy = cond
34
+ uncond_copy = uncond
35
+
36
+ for fn in model_options.get("sampler_patch_model_pre_cfg_function", []):
37
+ args = {"model": model, "sigma": timestep, "model_options": model_options}
38
+ model, model_options = fn(args)
39
+
40
+ if "sampler_pre_cfg_function" in model_options:
41
+ uncond, cond, cond_scale = model_options["sampler_pre_cfg_function"](
42
+ sigma=timestep, uncond=uncond, cond=cond, cond_scale=cond_scale
43
+ )
44
+
45
+ if math.isclose(cond_scale, 1.0) and model_options.get("disable_cfg1_optimization", False) == False:
46
+ uncond_ = None
47
+ else:
48
+ uncond_ = uncond
49
+
50
+ conds = [cond, uncond_]
51
+
52
+ out = comfy.samplers.calc_cond_batch(model, conds, x, timestep, model_options)
53
+ cond_pred = out[0]
54
+ uncond_pred = out[1]
55
+
56
+ if "sampler_cfg_function" in model_options:
57
+ args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep,
58
+ "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options, "cond_pos": cond_copy, "cond_neg": uncond_copy}
59
+ cfg_result = x - model_options["sampler_cfg_function"](args)
60
+ else:
61
+ cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale
62
+
63
+ for fn in model_options.get("sampler_post_cfg_function", []):
64
+ args = {"denoised": cfg_result, "cond": cond_copy, "uncond": uncond_copy, "model": model, "uncond_denoised": uncond_pred, "cond_denoised": cond_pred,
65
+ "sigma": timestep, "model_options": model_options, "input": x}
66
+ cfg_result = fn(args)
67
+
68
+ return cfg_result
69
+
70
+ def monkey_patching_comfy_sampling_function():
71
+ global original_sampling_function
72
+
73
+ if original_sampling_function is None:
74
+ original_sampling_function = comfy.samplers.sampling_function
75
+ # Make sure to only patch once
76
+ if hasattr(comfy.samplers.sampling_function, '_automatic_cfg_decorated'):
77
+ return
78
+ comfy.samplers.sampling_function = sampling_function_patched
79
+ comfy.samplers.sampling_function._automatic_cfg_decorated = True # flag to check monkey patch
80
+
81
+ def make_sampler_pre_cfg_function(minimum_sigma_to_disable_uncond=0, maximum_sigma_to_enable_uncond=1000000, disabled_cond_start=10000,disabled_cond_end=10000):
82
+ def sampler_pre_cfg_function(sigma, uncond, cond, cond_scale, **kwargs):
83
+ if sigma[0] < minimum_sigma_to_disable_uncond or sigma[0] > maximum_sigma_to_enable_uncond:
84
+ uncond = None
85
+ if sigma[0] <= disabled_cond_start and sigma[0] > disabled_cond_end:
86
+ cond = None
87
+ return uncond, cond, cond_scale
88
+ return sampler_pre_cfg_function
89
+
90
+ def get_entropy(tensor):
91
+ hist = np.histogram(tensor.cpu(), bins=100)[0]
92
+ hist = hist / hist.sum()
93
+ hist = hist[hist > 0]
94
+ return -np.sum(hist * np.log2(hist))
95
+
96
+ def map_sigma(sigma, sigmax, sigmin):
97
+ return 1 + ((sigma - sigmax) * (0 - 1)) / (sigmin - sigmax)
98
+
99
+ def center_latent_mean_values(latent, per_channel, mult):
100
+ for b in range(len(latent)):
101
+ if per_channel:
102
+ for c in range(len(latent[b])):
103
+ latent[b][c] -= latent[b][c].mean() * mult
104
+ else:
105
+ latent[b] -= latent[b].mean() * mult
106
+ return latent
107
+
108
+ def get_denoised_ranges(latent, measure="hard", top_k=0.25):
109
+ chans = []
110
+ for x in range(len(latent)):
111
+ max_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=True).values
112
+ min_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=False).values
113
+ max_val = torch.mean(max_values).item()
114
+ min_val = abs(torch.mean(min_values).item()) if measure == "soft" else torch.mean(torch.abs(min_values)).item()
115
+ denoised_range = (max_val + min_val) / 2
116
+ chans.append(denoised_range**2 if measure == "hard_squared" else denoised_range)
117
+ return chans
118
+
119
+ def get_sigmin_sigmax(model):
120
+ model_sampling = model.model.model_sampling
121
+ sigmin = model_sampling.sigma(model_sampling.timestep(model_sampling.sigma_min))
122
+ sigmax = model_sampling.sigma(model_sampling.timestep(model_sampling.sigma_max))
123
+ return sigmin, sigmax
124
+
125
+ def gaussian_similarity(x, y, sigma=1.0):
126
+ diff = (x - y) ** 2
127
+ return torch.exp(-diff / (2 * sigma ** 2))
128
+
129
+ def check_skip(sigma, high_sigma_threshold, low_sigma_threshold):
130
+ return sigma > high_sigma_threshold or sigma < low_sigma_threshold
131
+
132
+ def max_abs(tensors):
133
+ shape = tensors.shape
134
+ tensors = tensors.reshape(shape[0], -1)
135
+ tensors_abs = torch.abs(tensors)
136
+ max_abs_idx = torch.argmax(tensors_abs, dim=0)
137
+ result = tensors[max_abs_idx, torch.arange(tensors.shape[1])]
138
+ return result.reshape(shape[1:])
139
+
140
+ def gaussian_kernel(size: int, sigma: float):
141
+ x = torch.arange(size) - size // 2
142
+ gauss = torch.exp(-x**2 / (2 * sigma**2))
143
+ kernel = gauss / gauss.sum()
144
+ return kernel.view(1, size) * kernel.view(size, 1)
145
+
146
+ def blur_tensor(tensor, kernel_size = 9, sigma = 2.0):
147
+ tensor = tensor.unsqueeze(0)
148
+ C = tensor.size(1)
149
+ kernel = gaussian_kernel(kernel_size, sigma)
150
+ kernel = kernel.expand(C, 1, kernel_size, kernel_size).to(tensor.device).to(dtype=tensor.dtype, device=tensor.device)
151
+ padding = kernel_size // 2
152
+ tensor = F.pad(tensor, (padding, padding, padding, padding), mode='reflect')
153
+ blurred_tensor = F.conv2d(tensor, kernel, groups=C)
154
+ return blurred_tensor.squeeze(0)
155
+
156
+ def smallest_distances(tensors):
157
+ if all(torch.equal(tensors[0], tensor) for tensor in tensors[1:]):
158
+ return tensors[0]
159
+ set_device = tensors.device
160
+ min_val = torch.full(tensors[0].shape, float("inf")).to(set_device)
161
+ result = torch.zeros_like(tensors[0])
162
+ for idx1, t1 in enumerate(tensors):
163
+ temp_diffs = torch.zeros_like(tensors[0])
164
+ for idx2, t2 in enumerate(tensors):
165
+ if idx1 != idx2:
166
+ temp_diffs += torch.abs(torch.sub(t1, t2))
167
+ min_val = torch.minimum(min_val, temp_diffs)
168
+ mask = torch.eq(min_val,temp_diffs)
169
+ result[mask] = t1[mask]
170
+ return result
171
+
172
+ def rescale(tensor, multiplier=2):
173
+ batch, seq_length, features = tensor.shape
174
+ H = W = int(seq_length**0.5)
175
+ tensor_reshaped = tensor.view(batch, features, H, W)
176
+ new_H = new_W = int(H * multiplier)
177
+ resized_tensor = F.interpolate(tensor_reshaped, size=(new_H, new_W), mode='bilinear', align_corners=False)
178
+ return resized_tensor.view(batch, new_H * new_W, features)
179
+
180
+ # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475
181
+ def slerp(high, low, val):
182
+ dims = low.shape
183
+
184
+ #flatten to batches
185
+ low = low.reshape(dims[0], -1)
186
+ high = high.reshape(dims[0], -1)
187
+
188
+ low_norm = low/torch.norm(low, dim=1, keepdim=True)
189
+ high_norm = high/torch.norm(high, dim=1, keepdim=True)
190
+
191
+ # in case we divide by zero
192
+ low_norm[low_norm != low_norm] = 0.0
193
+ high_norm[high_norm != high_norm] = 0.0
194
+
195
+ omega = torch.acos((low_norm*high_norm).sum(1))
196
+ so = torch.sin(omega)
197
+ res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
198
+ return res.reshape(dims)
199
+
200
+ normalize_tensor = lambda x: x / x.norm()
201
+
202
+ def random_swap(tensors, proportion=1):
203
+ num_tensors = tensors.shape[0]
204
+ if num_tensors < 2: return tensors[0],0
205
+ tensor_size = tensors[0].numel()
206
+ if tensor_size < 100: return tensors[0],0
207
+
208
+ true_count = int(tensor_size * proportion)
209
+ mask = torch.cat((torch.ones(true_count, dtype=torch.bool, device=tensors[0].device),
210
+ torch.zeros(tensor_size - true_count, dtype=torch.bool, device=tensors[0].device)))
211
+ mask = mask[torch.randperm(tensor_size)].reshape(tensors[0].shape)
212
+ if num_tensors == 2 and proportion < 1:
213
+ index_tensor = torch.ones_like(tensors[0], dtype=torch.int64, device=tensors[0].device)
214
+ else:
215
+ index_tensor = torch.randint(1 if proportion < 1 else 0, num_tensors, tensors[0].shape, device=tensors[0].device)
216
+ for i, t in enumerate(tensors):
217
+ if i == 0: continue
218
+ merge_mask = index_tensor == i & mask
219
+ tensors[0][merge_mask] = t[merge_mask]
220
+ return tensors[0]
221
+
222
+ def multi_tensor_check_mix(tensors):
223
+ if tensors[0].numel() < 2 or len(tensors) < 2:
224
+ return tensors[0]
225
+ ref_tensor_shape = tensors[0].shape
226
+ sequence_tensor = torch.arange(tensors[0].numel(), device=tensors[0].device) % len(tensors)
227
+ reshaped_sequence = sequence_tensor.view(ref_tensor_shape)
228
+ for i in range(len(tensors)):
229
+ if i == 0: continue
230
+ mask = reshaped_sequence == i
231
+ tensors[0][mask] = tensors[i][mask]
232
+ return tensors[0]
233
+
234
+ def sspow(input_tensor, p=2):
235
+ return input_tensor.abs().pow(p) * input_tensor.sign()
236
+
237
+ def sspown(input_tensor, p=2):
238
+ abs_t = input_tensor.abs()
239
+ abs_t = (abs_t - abs_t.min()) / (abs_t.max() - abs_t.min())
240
+ return abs_t.pow(p) * input_tensor.sign()
241
+
242
+ def gradient_merge(tensor1, tensor2, start_value=0, dim=0):
243
+ if torch.numel(tensor1) <= 1: return tensor1
244
+ if dim >= tensor1.dim(): dim = 0
245
+ size = tensor1.size(dim)
246
+ alpha = torch.linspace(start_value, 1-start_value, steps=size, device=tensor1.device).view([-1 if i == dim else 1 for i in range(tensor1.dim())])
247
+ return tensor1 * alpha + tensor2 * (1 - alpha)
248
+
249
+ def save_tensor(input_tensor,name):
250
+ if "rndnum" in name:
251
+ rndnum = str(random.randint(100000,999999))
252
+ name = name.replace("rndnum", rndnum)
253
+ output_directory = os.path.join(current_dir, 'saved_tensors')
254
+ os.makedirs(output_directory, exist_ok=True)
255
+ output_file_path = os.path.join(output_directory, f"{name}.pt")
256
+ torch.save(input_tensor, output_file_path)
257
+ return input_tensor
258
+
259
+ def print_and_return(input_tensor, *args):
260
+ for what_to_print in args:
261
+ print(" ",what_to_print)
262
+ return input_tensor
263
+
264
+ # Experimental testings
265
+ def normal_attention(q, k, v, mask=None):
266
+ attention_scores = torch.matmul(q, k.transpose(-2, -1))
267
+ d_k = k.size(-1)
268
+ attention_scores = attention_scores / torch.sqrt(torch.tensor(d_k, dtype=torch.float32))
269
+ if mask is not None:
270
+ attention_scores = attention_scores.masked_fill(mask == 0, float('-inf'))
271
+ attention_weights = F.softmax(attention_scores, dim=-1)
272
+ output = torch.matmul(attention_weights, v)
273
+ return output
274
+
275
+ def split_heads(x, n_heads):
276
+ batch_size, seq_length, hidden_dim = x.size()
277
+ head_dim = hidden_dim // n_heads
278
+ x = x.view(batch_size, seq_length, n_heads, head_dim)
279
+ return x.permute(0, 2, 1, 3)
280
+
281
+ def combine_heads(x, n_heads):
282
+ batch_size, n_heads, seq_length, head_dim = x.size()
283
+ hidden_dim = n_heads * head_dim
284
+ x = x.permute(0, 2, 1, 3).contiguous()
285
+ return x.view(batch_size, seq_length, hidden_dim)
286
+
287
+ def sparsemax(logits):
288
+ logits_sorted, _ = torch.sort(logits, descending=True, dim=-1)
289
+ cumulative_sum = torch.cumsum(logits_sorted, dim=-1) - 1
290
+ rho = (logits_sorted > cumulative_sum / (torch.arange(logits.size(-1)) + 1).to(logits.device)).float()
291
+ tau = (cumulative_sum / rho.sum(dim=-1, keepdim=True)).gather(dim=-1, index=rho.sum(dim=-1, keepdim=True).long() - 1)
292
+ return torch.max(torch.zeros_like(logits), logits - tau)
293
+
294
+ def attnfunc_custom(q, k, v, n_heads, eval_string = ""):
295
+ q = split_heads(q, n_heads)
296
+ k = split_heads(k, n_heads)
297
+ v = split_heads(v, n_heads)
298
+
299
+ d_k = q.size(-1)
300
+
301
+ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
302
+
303
+ if eval_string == "":
304
+ attn_weights = F.softmax(scores, dim=-1)
305
+ else:
306
+ attn_weights = eval(eval_string)
307
+
308
+ output = torch.matmul(attn_weights, v)
309
+ output = combine_heads(output, n_heads)
310
+ return output
311
+
312
+ def min_max_norm(t):
313
+ return (t - t.min()) / (t.max() - t.min())
314
+
315
+ class attention_modifier():
316
+ def __init__(self, self_attn_mod_eval, conds = None):
317
+ self.self_attn_mod_eval = self_attn_mod_eval
318
+ self.conds = conds
319
+
320
+ def modified_attention(self, q, k, v, extra_options, mask=None):
321
+
322
+ """extra_options contains: {'cond_or_uncond': [1, 0], 'sigmas': tensor([14.6146], device='cuda:0'),
323
+ 'original_shape': [2, 4, 128, 128], 'transformer_index': 4, 'block': ('middle', 0),
324
+ 'block_index': 3, 'n_heads': 20, 'dim_head': 64, 'attn_precision': None}"""
325
+
326
+ if "attnbc" in self.self_attn_mod_eval:
327
+ attnbc = attention_basic(q, k, v, extra_options['n_heads'], mask)
328
+ if "normattn" in self.self_attn_mod_eval:
329
+ normattn = normal_attention(q, k, v, mask)
330
+ if "attnxf" in self.self_attn_mod_eval:
331
+ attnxf = attention_xformers(q, k, v, extra_options['n_heads'], mask)
332
+ if "attnpy" in self.self_attn_mod_eval:
333
+ attnpy = attention_pytorch(q, k, v, extra_options['n_heads'], mask)
334
+ if "attnsp" in self.self_attn_mod_eval:
335
+ attnsp = attention_split(q, k, v, extra_options['n_heads'], mask)
336
+ if "attnsq" in self.self_attn_mod_eval:
337
+ attnsq = attention_sub_quad(q, k, v, extra_options['n_heads'], mask)
338
+ if "attnopt" in self.self_attn_mod_eval:
339
+ attnopt = attnfunc(q, k, v, extra_options['n_heads'], mask)
340
+ n_heads = extra_options['n_heads']
341
+ if self.conds is not None:
342
+ cond_pos_l = self.conds[0][..., :768].cuda()
343
+ cond_neg_l = self.conds[1][..., :768].cuda()
344
+ if self.conds[0].shape[-1] > 768:
345
+ cond_pos_g = self.conds[0][..., 768:2048].cuda()
346
+ cond_neg_g = self.conds[1][..., 768:2048].cuda()
347
+ return eval(self.self_attn_mod_eval)
348
+
349
+ def experimental_functions(cond_input, method, exp_value, exp_normalize, pcp, psi, sigma, sigmax, attention_modifiers_input, args, model_options_copy, eval_string = ""):
350
+ """
351
+ There may or may not be an actual reasoning behind each of these methods.
352
+ Some like the sine value have interesting properties. Enabled for both cond and uncond preds it somehow make them stronger.
353
+ Note that there is a "normalize" toggle and it may change greatly the end result since some operation will totaly butcher the values.
354
+ "theDaRkNeSs" for example without normalizing seems to darken if used for cond/uncond (not with the cond as the uncond or something).
355
+ Maybe just with the positive. I don't remember. I leave it for now if you want to play around.
356
+
357
+ The eval_string can be used to create the uncond replacement.
358
+ I made it so it's split by semicolons and only the last split is the value in used.
359
+ What is before is added in an array named "v".
360
+ pcp is previous cond_pred
361
+ psi is previous sigma
362
+ args is the CFG function input arguments with the added cond/unconds (like the actual activation conditionings) named respectively "cond_pos" and "cond_neg"
363
+
364
+ So if you write:
365
+
366
+ pcp if sigma < 7 else -pcp;
367
+ print("it works too just don't use the output I guess");
368
+ v[0] if sigma < 14 else torch.zeros_like(cond);
369
+ v[-1]*2
370
+
371
+ Well the first line becomes v[0], second v[1] etc.
372
+ The last one becomes the result.
373
+ Note that it's just an example, I don't see much interest in that one.
374
+
375
+ Using comfy.samplers.calc_cond_batch(args["model"], [args["cond_pos"], None], args["input"], args["timestep"], args["model_options"])[0]
376
+ can work too.
377
+
378
+ This whole mess has for initial goal to attempt to find the best way (or have some bruteforcing fun) to replace the uncond pred for as much as possible.
379
+ Or simply to try things around :)
380
+ """
381
+ if method == "cond_pred":
382
+ return cond_input
383
+ default_device = cond_input.device
384
+ # print()
385
+ # print(get_entropy(cond))
386
+ cond = cond_input.clone()
387
+ cond_norm = cond.norm()
388
+ if method == "amplify":
389
+ mask = torch.abs(cond) >= 1
390
+ cond_copy = cond.clone()
391
+ cond = torch.pow(torch.abs(cond), ( 1 / exp_value)) * cond.sign()
392
+ cond[mask] = torch.pow(torch.abs(cond_copy[mask]), exp_value) * cond[mask].sign()
393
+ elif method == "root":
394
+ cond = torch.pow(torch.abs(cond), ( 1 / exp_value)) * cond.sign()
395
+ elif method == "power":
396
+ cond = torch.pow(torch.abs(cond), exp_value) * cond.sign()
397
+ elif method == "erf":
398
+ cond = torch.erf(cond)
399
+ elif method == "exp_erf":
400
+ cond = torch.pow(torch.erf(cond), exp_value)
401
+ elif method == "root_erf":
402
+ cond = torch.erf(cond)
403
+ cond = torch.pow(torch.abs(cond), 1 / exp_value ) * cond.sign()
404
+ elif method == "erf_amplify":
405
+ cond = torch.erf(cond)
406
+ mask = torch.abs(cond) >= 1
407
+ cond_copy = cond.clone()
408
+ cond = torch.pow(torch.abs(cond), 1 / exp_value ) * cond.sign()
409
+ cond[mask] = torch.pow(torch.abs(cond_copy[mask]), exp_value) * cond[mask].sign()
410
+ elif method == "sine":
411
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
412
+ elif method == "sine_exp":
413
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
414
+ cond = torch.pow(torch.abs(cond), exp_value) * cond.sign()
415
+ elif method == "sine_exp_diff":
416
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
417
+ cond = torch.pow(torch.abs(cond_input), exp_value) * cond.sign() - cond
418
+ elif method == "sine_exp_diff_to_sine":
419
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
420
+ cond = torch.pow(torch.abs(cond), exp_value) * cond.sign() - cond
421
+ elif method == "sine_root":
422
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
423
+ cond = torch.pow(torch.abs(cond), ( 1 / exp_value)) * cond.sign()
424
+ elif method == "sine_root_diff":
425
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
426
+ cond = torch.pow(torch.abs(cond_input), 1 / exp_value) * cond.sign() - cond
427
+ elif method == "sine_root_diff_to_sine":
428
+ cond = torch.sin(torch.abs(cond)) * cond.sign()
429
+ cond = torch.pow(torch.abs(cond), 1 / exp_value) * cond.sign() - cond
430
+ elif method == "theDaRkNeSs":
431
+ cond = torch.sin(cond)
432
+ cond = torch.pow(torch.abs(cond), 1 / exp_value) * cond.sign() - cond
433
+ elif method == "cosine":
434
+ cond = torch.cos(torch.abs(cond)) * cond.sign()
435
+ elif method == "sign":
436
+ cond = cond.sign()
437
+ elif method == "zero":
438
+ cond = torch.zeros_like(cond)
439
+ elif method in ["attention_modifiers_input_using_cond","attention_modifiers_input_using_uncond","subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"]:
440
+ cond_to_use = args["cond_pos"] if method in ["attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_cond"] else args["cond_neg"]
441
+ tmp_model_options = deepcopy(model_options_copy)
442
+ for atm in attention_modifiers_input:
443
+ if sigma <= atm['sigma_start'] and sigma > atm['sigma_end']:
444
+ block_layers = {"input": atm['unet_block_id_input'], "middle": atm['unet_block_id_middle'], "output": atm['unet_block_id_output']}
445
+ for unet_block in block_layers:
446
+ for unet_block_id in block_layers[unet_block].split(","):
447
+ if unet_block_id != "":
448
+ unet_block_id = int(unet_block_id)
449
+ tmp_model_options = set_model_options_patch_replace(tmp_model_options, attention_modifier(atm['self_attn_mod_eval'], [args["cond_pos"][0]["cross_attn"], args["cond_neg"][0]["cross_attn"]]if "cond" in atm['self_attn_mod_eval'] else None).modified_attention, atm['unet_attn'], unet_block, unet_block_id)
450
+
451
+ cond = comfy.samplers.calc_cond_batch(args["model"], [cond_to_use], args["input"], args["timestep"], tmp_model_options)[0]
452
+ if method in ["subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"]:
453
+ cond = cond_input + (cond_input - cond) * exp_value
454
+
455
+ elif method == "previous_average":
456
+ if sigma > (sigmax - 1):
457
+ cond = torch.zeros_like(cond)
458
+ else:
459
+ cond = (pcp / psi * sigma + cond) / 2
460
+ elif method == "eval":
461
+ if "condmix" in eval_string:
462
+ def condmix(args, mult=2):
463
+ cond_pos_tmp = deepcopy(args["cond_pos"])
464
+ cond_pos_tmp[0]["cross_attn"] += (args["cond_pos"][0]["cross_attn"] - args["cond_neg"][0]["cross_attn"]*-1) * mult
465
+ return cond_pos_tmp
466
+ v = []
467
+ evals_strings = eval_string.split(";")
468
+ if len(evals_strings) > 1:
469
+ for i in range(len(evals_strings[:-1])):
470
+ v.append(eval(evals_strings[i]))
471
+ cond = eval(evals_strings[-1])
472
+ if exp_normalize and torch.all(cond != 0):
473
+ cond = cond * cond_norm / cond.norm()
474
+ # print(get_entropy(cond))
475
+ return cond.to(device=default_device)
476
+
477
+ class advancedDynamicCFG:
478
+ def __init__(self):
479
+ self.last_cfg_ht_one = 8
480
+ self.previous_cond_pred = None
481
+
482
+ @classmethod
483
+ def INPUT_TYPES(s):
484
+ return {"required": {
485
+ "model": ("MODEL",),
486
+
487
+ "automatic_cfg" : (["None", "soft", "hard", "hard_squared", "range"], {"default": "hard"},),
488
+
489
+ "skip_uncond" : ("BOOLEAN", {"default": True}),
490
+ "fake_uncond_start" : ("BOOLEAN", {"default": False}),
491
+ "uncond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
492
+ "uncond_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
493
+
494
+ "lerp_uncond" : ("BOOLEAN", {"default": False}),
495
+ "lerp_uncond_strength": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.1, "round": 0.1}),
496
+ "lerp_uncond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
497
+ "lerp_uncond_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
498
+
499
+ "subtract_latent_mean" : ("BOOLEAN", {"default": False}),
500
+ "subtract_latent_mean_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
501
+ "subtract_latent_mean_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
502
+
503
+ "latent_intensity_rescale" : ("BOOLEAN", {"default": False}),
504
+ "latent_intensity_rescale_method" : (["soft","hard","range"], {"default": "hard"},),
505
+ "latent_intensity_rescale_cfg": ("FLOAT", {"default": 8, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.1}),
506
+ "latent_intensity_rescale_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
507
+ "latent_intensity_rescale_sigma_end": ("FLOAT", {"default": 3, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
508
+
509
+ "cond_exp": ("BOOLEAN", {"default": False}),
510
+ "cond_exp_normalize": ("BOOLEAN", {"default": False}),
511
+ "cond_exp_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
512
+ "cond_exp_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
513
+ "cond_exp_method": (["amplify", "root", "power", "erf", "erf_amplify", "exp_erf", "root_erf", "sine", "sine_exp", "sine_exp_diff", "sine_exp_diff_to_sine", "sine_root", "sine_root_diff", "sine_root_diff_to_sine", "theDaRkNeSs", "cosine", "sign", "zero", "previous_average", "eval",
514
+ "attention_modifiers_input_using_cond","attention_modifiers_input_using_uncond",
515
+ "subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"],),
516
+ "cond_exp_value": ("FLOAT", {"default": 2, "min": 0, "max": 100, "step": 0.1, "round": 0.01}),
517
+
518
+ "uncond_exp": ("BOOLEAN", {"default": False}),
519
+ "uncond_exp_normalize": ("BOOLEAN", {"default": False}),
520
+ "uncond_exp_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
521
+ "uncond_exp_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
522
+ "uncond_exp_method": (["amplify", "root", "power", "erf", "erf_amplify", "exp_erf", "root_erf", "sine", "sine_exp", "sine_exp_diff", "sine_exp_diff_to_sine", "sine_root", "sine_root_diff", "sine_root_diff_to_sine", "theDaRkNeSs", "cosine", "sign", "zero", "previous_average", "eval",
523
+ "subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond"],),
524
+ "uncond_exp_value": ("FLOAT", {"default": 2, "min": 0, "max": 100, "step": 0.1, "round": 0.01}),
525
+
526
+ "fake_uncond_exp": ("BOOLEAN", {"default": False}),
527
+ "fake_uncond_exp_normalize": ("BOOLEAN", {"default": False}),
528
+ "fake_uncond_exp_method" : (["cond_pred", "previous_average",
529
+ "amplify", "root", "power", "erf", "erf_amplify", "exp_erf", "root_erf", "sine", "sine_exp", "sine_exp_diff", "sine_exp_diff_to_sine", "sine_root", "sine_root_diff",
530
+ "sine_root_diff_to_sine", "theDaRkNeSs", "cosine", "sign", "zero", "eval",
531
+ "subtract_attention_modifiers_input_using_cond","subtract_attention_modifiers_input_using_uncond",
532
+ "attention_modifiers_input_using_cond","attention_modifiers_input_using_uncond"],),
533
+ "fake_uncond_exp_value": ("FLOAT", {"default": 2, "min": 0, "max": 1000, "step": 0.1, "round": 0.01}),
534
+ "fake_uncond_multiplier": ("INT", {"default": 1, "min": -1, "max": 1, "step": 1}),
535
+ "fake_uncond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
536
+ "fake_uncond_sigma_end": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
537
+ "auto_cfg_topk": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 0.5, "step": 0.05, "round": 0.01}),
538
+ "auto_cfg_ref": ("FLOAT", {"default": 8, "min": 0.0, "max": 100, "step": 0.5, "round": 0.01}),
539
+ "attention_modifiers_global_enabled": ("BOOLEAN", {"default": False}),
540
+ "disable_cond": ("BOOLEAN", {"default": False}),
541
+ "disable_cond_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
542
+ "disable_cond_sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
543
+ "save_as_preset": ("BOOLEAN", {"default": False}),
544
+ "preset_name": ("STRING", {"multiline": False}),
545
+ },
546
+ "optional":{
547
+ "eval_string_cond": ("STRING", {"multiline": True}),
548
+ "eval_string_uncond": ("STRING", {"multiline": True}),
549
+ "eval_string_fake": ("STRING", {"multiline": True}),
550
+ "args_filter": ("STRING", {"multiline": True, "forceInput": True}),
551
+ "attention_modifiers_positive": ("ATTNMOD", {"forceInput": True}),
552
+ "attention_modifiers_negative": ("ATTNMOD", {"forceInput": True}),
553
+ "attention_modifiers_fake_negative": ("ATTNMOD", {"forceInput": True}),
554
+ "attention_modifiers_global": ("ATTNMOD", {"forceInput": True}),
555
+ }
556
+ }
557
+ RETURN_TYPES = ("MODEL","STRING",)
558
+ FUNCTION = "patch"
559
+
560
+ CATEGORY = "model_patches/Automatic_CFG"
561
+
562
+ def patch(self, model, automatic_cfg = "None",
563
+ skip_uncond = False, fake_uncond_start = False, uncond_sigma_start = 1000, uncond_sigma_end = 0,
564
+ lerp_uncond = False, lerp_uncond_strength = 1, lerp_uncond_sigma_start = 1000, lerp_uncond_sigma_end = 1,
565
+ subtract_latent_mean = False, subtract_latent_mean_sigma_start = 1000, subtract_latent_mean_sigma_end = 1,
566
+ latent_intensity_rescale = False, latent_intensity_rescale_sigma_start = 1000, latent_intensity_rescale_sigma_end = 1,
567
+ cond_exp = False, cond_exp_sigma_start = 1000, cond_exp_sigma_end = 1000, cond_exp_method = "amplify", cond_exp_value = 2, cond_exp_normalize = False,
568
+ uncond_exp = False, uncond_exp_sigma_start = 1000, uncond_exp_sigma_end = 1000, uncond_exp_method = "amplify", uncond_exp_value = 2, uncond_exp_normalize = False,
569
+ fake_uncond_exp = False, fake_uncond_exp_method = "amplify", fake_uncond_exp_value = 2, fake_uncond_exp_normalize = False, fake_uncond_multiplier = 1, fake_uncond_sigma_start = 1000, fake_uncond_sigma_end = 1,
570
+ latent_intensity_rescale_cfg = 8, latent_intensity_rescale_method = "hard",
571
+ ignore_pre_cfg_func = False, args_filter = "", auto_cfg_topk = 0.25, auto_cfg_ref = 8,
572
+ eval_string_cond = "", eval_string_uncond = "", eval_string_fake = "",
573
+ attention_modifiers_global_enabled = False,
574
+ attention_modifiers_positive = [], attention_modifiers_negative = [], attention_modifiers_fake_negative = [], attention_modifiers_global = [],
575
+ disable_cond=False, disable_cond_sigma_start=1000,disable_cond_sigma_end=1000, save_as_preset = False, preset_name = "", **kwargs
576
+ ):
577
+
578
+ # support_function()
579
+ model_options_copy = deepcopy(model.model_options)
580
+ monkey_patching_comfy_sampling_function()
581
+ if args_filter != "":
582
+ args_filter = args_filter.split(",")
583
+ else:
584
+ args_filter = [k for k, v in locals().items()]
585
+ not_in_filter = ['self','model','args','args_filter','save_as_preset','preset_name','model_options_copy']
586
+ if fake_uncond_exp_method != "eval":
587
+ not_in_filter.append("eval_string")
588
+
589
+ if save_as_preset and preset_name != "":
590
+ preset_parameters = {key: value for key, value in locals().items() if key not in not_in_filter}
591
+ with open(os.path.join(json_preset_path, preset_name+".json"), 'w', encoding='utf-8') as f:
592
+ json.dump(preset_parameters, f)
593
+ print(f"Preset saved with the name: {Fore.GREEN}{preset_name}{Fore.RESET}")
594
+ print(f"{Fore.RED}Don't forget to turn the save toggle OFF to not overwrite!{Fore.RESET}")
595
+
596
+ args_str = '\n'.join(f'{k}: {v}' for k, v in locals().items() if k not in not_in_filter and k in args_filter)
597
+
598
+ sigmin, sigmax = get_sigmin_sigmax(model)
599
+
600
+ lerp_start, lerp_end = lerp_uncond_sigma_start, lerp_uncond_sigma_end
601
+ subtract_start, subtract_end = subtract_latent_mean_sigma_start, subtract_latent_mean_sigma_end
602
+ rescale_start, rescale_end = latent_intensity_rescale_sigma_start, latent_intensity_rescale_sigma_end
603
+ print(f"Model maximum sigma: {sigmax} / Model minimum sigma: {sigmin}")
604
+ m = model.clone()
605
+
606
+ if skip_uncond or disable_cond:
607
+ # set model_options sampler_pre_cfg_function
608
+ m.model_options["sampler_pre_cfg_function"] = make_sampler_pre_cfg_function(uncond_sigma_end if skip_uncond else 0, uncond_sigma_start if skip_uncond else 100000,\
609
+ disable_cond_sigma_start if disable_cond else 100000, disable_cond_sigma_end if disable_cond else 100000)
610
+ print(f"Sampling function patched. Uncond enabled from {round(uncond_sigma_start,2)} to {round(uncond_sigma_end,2)}")
611
+ elif not ignore_pre_cfg_func:
612
+ m.model_options.pop("sampler_pre_cfg_function", None)
613
+ uncond_sigma_start, uncond_sigma_end = 1000000, 0
614
+
615
+ top_k = auto_cfg_topk
616
+ previous_cond_pred = None
617
+ previous_sigma = None
618
+ def automatic_cfg_function(args):
619
+ nonlocal previous_sigma
620
+ cond_scale = args["cond_scale"]
621
+ input_x = args["input"]
622
+ cond_pred = args["cond_denoised"]
623
+ uncond_pred = args["uncond_denoised"]
624
+ sigma = args["sigma"][0]
625
+ model_options = args["model_options"]
626
+ if self.previous_cond_pred is None:
627
+ self.previous_cond_pred = cond_pred.clone().detach().to(device=cond_pred.device)
628
+ if previous_sigma is None:
629
+ previous_sigma = sigma.item()
630
+ reference_cfg = auto_cfg_ref if auto_cfg_ref > 0 else cond_scale
631
+
632
+ def fake_uncond_step():
633
+ return fake_uncond_start and skip_uncond and (sigma > uncond_sigma_start or sigma < uncond_sigma_end) and sigma <= fake_uncond_sigma_start and sigma >= fake_uncond_sigma_end
634
+
635
+ if fake_uncond_step():
636
+ uncond_pred = cond_pred.clone().detach().to(device=cond_pred.device) * fake_uncond_multiplier
637
+
638
+ if cond_exp and sigma <= cond_exp_sigma_start and sigma >= cond_exp_sigma_end:
639
+ cond_pred = experimental_functions(cond_pred, cond_exp_method, cond_exp_value, cond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_positive, args, model_options_copy, eval_string_cond)
640
+ if uncond_exp and sigma <= uncond_exp_sigma_start and sigma >= uncond_exp_sigma_end and not fake_uncond_step():
641
+ uncond_pred = experimental_functions(uncond_pred, uncond_exp_method, uncond_exp_value, uncond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_negative, args, model_options_copy, eval_string_uncond)
642
+ if fake_uncond_step() and fake_uncond_exp:
643
+ uncond_pred = experimental_functions(uncond_pred, fake_uncond_exp_method, fake_uncond_exp_value, fake_uncond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_fake_negative, args, model_options_copy, eval_string_fake)
644
+ self.previous_cond_pred = cond_pred.clone().detach().to(device=cond_pred.device)
645
+
646
+ if sigma >= sigmax or cond_scale > 1:
647
+ self.last_cfg_ht_one = cond_scale
648
+ target_intensity = self.last_cfg_ht_one / 10
649
+
650
+ if ((check_skip(sigma, uncond_sigma_start, uncond_sigma_end) and skip_uncond) and not fake_uncond_step()) or cond_scale == 1:
651
+ return input_x - cond_pred
652
+
653
+ if lerp_uncond and not check_skip(sigma, lerp_start, lerp_end) and lerp_uncond_strength != 1:
654
+ uncond_pred_norm = uncond_pred.norm()
655
+ uncond_pred = torch.lerp(cond_pred, uncond_pred, lerp_uncond_strength)
656
+ uncond_pred = uncond_pred * uncond_pred_norm / uncond_pred.norm()
657
+ cond = input_x - cond_pred
658
+ uncond = input_x - uncond_pred
659
+
660
+ if automatic_cfg == "None":
661
+ return uncond + cond_scale * (cond - uncond)
662
+
663
+ denoised_tmp = input_x - (uncond + reference_cfg * (cond - uncond))
664
+
665
+ for b in range(len(denoised_tmp)):
666
+ denoised_ranges = get_denoised_ranges(denoised_tmp[b], automatic_cfg, top_k)
667
+ for c in range(len(denoised_tmp[b])):
668
+ fixeds_scale = reference_cfg * target_intensity / denoised_ranges[c]
669
+ denoised_tmp[b][c] = uncond[b][c] + fixeds_scale * (cond[b][c] - uncond[b][c])
670
+
671
+ return denoised_tmp
672
+
673
+ def center_mean_latent_post_cfg(args):
674
+ denoised = args["denoised"]
675
+ sigma = args["sigma"][0]
676
+ if check_skip(sigma, subtract_start, subtract_end):
677
+ return denoised
678
+ denoised = center_latent_mean_values(denoised, False, 1)
679
+ return denoised
680
+
681
+ def rescale_post_cfg(args):
682
+ denoised = args["denoised"]
683
+ sigma = args["sigma"][0]
684
+
685
+ if check_skip(sigma, rescale_start, rescale_end):
686
+ return denoised
687
+ target_intensity = latent_intensity_rescale_cfg / 10
688
+ for b in range(len(denoised)):
689
+ denoised_ranges = get_denoised_ranges(denoised[b], latent_intensity_rescale_method)
690
+ for c in range(len(denoised[b])):
691
+ scale_correction = target_intensity / denoised_ranges[c]
692
+ denoised[b][c] = denoised[b][c] * scale_correction
693
+ return denoised
694
+
695
+ tmp_model_options = deepcopy(m.model_options)
696
+ if attention_modifiers_global_enabled:
697
+ # print(f"{Fore.GREEN}Sigma timings are ignored for global modifiers.{Fore.RESET}")
698
+ for atm in attention_modifiers_global:
699
+ block_layers = {"input": atm['unet_block_id_input'], "middle": atm['unet_block_id_middle'], "output": atm['unet_block_id_output']}
700
+ for unet_block in block_layers:
701
+ for unet_block_id in block_layers[unet_block].split(","):
702
+ if unet_block_id != "":
703
+ unet_block_id = int(unet_block_id)
704
+ tmp_model_options = set_model_options_patch_replace(tmp_model_options, attention_modifier(atm['self_attn_mod_eval']).modified_attention, atm['unet_attn'], unet_block, unet_block_id)
705
+ m.model_options = tmp_model_options
706
+
707
+ if not ignore_pre_cfg_func:
708
+ m.set_model_sampler_cfg_function(automatic_cfg_function, disable_cfg1_optimization = False)
709
+ if subtract_latent_mean:
710
+ m.set_model_sampler_post_cfg_function(center_mean_latent_post_cfg)
711
+ if latent_intensity_rescale:
712
+ m.set_model_sampler_post_cfg_function(rescale_post_cfg)
713
+ return (m, args_str, )
714
+
715
+ class attentionModifierParametersNode:
716
+ @classmethod
717
+ def INPUT_TYPES(s):
718
+ return {"required": {
719
+ "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
720
+ "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
721
+ "self_attn_mod_eval": ("STRING", {"multiline": True }, {"default": ""}),
722
+ "unet_block_id_input": ("STRING", {"multiline": False}, {"default": ""}),
723
+ "unet_block_id_middle": ("STRING", {"multiline": False}, {"default": ""}),
724
+ "unet_block_id_output": ("STRING", {"multiline": False}, {"default": ""}),
725
+ "unet_attn": (["attn1","attn2","both"],),
726
+ },
727
+ "optional":{
728
+ "join_parameters": ("ATTNMOD", {"forceInput": True}),
729
+ }}
730
+
731
+ RETURN_TYPES = ("ATTNMOD","STRING",)
732
+ RETURN_NAMES = ("Attention modifier", "Parameters as string")
733
+ FUNCTION = "exec"
734
+ CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers"
735
+ def exec(self, join_parameters=None, **kwargs):
736
+ info_string = "\n".join([f"{k}: {v}" for k,v in kwargs.items() if v != ""])
737
+ if kwargs['unet_attn'] == "both":
738
+ copy_kwargs = kwargs.copy()
739
+ kwargs['unet_attn'] = "attn1"
740
+ copy_kwargs['unet_attn'] = "attn2"
741
+ out_modifiers = [kwargs, copy_kwargs]
742
+ else:
743
+ out_modifiers = [kwargs]
744
+ return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, )
745
+
746
+ class attentionModifierBruteforceParametersNode:
747
+ @classmethod
748
+ def INPUT_TYPES(s):
749
+ return {"required": {
750
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
751
+ "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
752
+ "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
753
+ "self_attn_mod_eval": ("STRING", {"multiline": True , "default": ""}),
754
+ "unet_block_id_input": ("STRING", {"multiline": False, "default": "4,5,7,8"}),
755
+ "unet_block_id_middle": ("STRING", {"multiline": False, "default": "0"}),
756
+ "unet_block_id_output": ("STRING", {"multiline": False, "default": "0,1,2,3,4,5"}),
757
+ "unet_attn": (["attn1","attn2","both"],),
758
+ },
759
+ "optional":{
760
+ "join_parameters": ("ATTNMOD", {"forceInput": True}),
761
+ }}
762
+
763
+ RETURN_TYPES = ("ATTNMOD","STRING",)
764
+ RETURN_NAMES = ("Attention modifier", "Parameters as string")
765
+ FUNCTION = "exec"
766
+ CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers"
767
+
768
+ def create_sequence_parameters(self, input_str, middle_str, output_str):
769
+ input_values = input_str.split(",") if input_str else []
770
+ middle_values = middle_str.split(",") if middle_str else []
771
+ output_values = output_str.split(",") if output_str else []
772
+ result = []
773
+ result.extend([{"unet_block_id_input": val, "unet_block_id_middle": "", "unet_block_id_output": ""} for val in input_values])
774
+ result.extend([{"unet_block_id_input": "", "unet_block_id_middle": val, "unet_block_id_output": ""} for val in middle_values])
775
+ result.extend([{"unet_block_id_input": "", "unet_block_id_middle": "", "unet_block_id_output": val} for val in output_values])
776
+ return result
777
+
778
+ def exec(self, seed, join_parameters=None, **kwargs):
779
+ sequence_parameters = self.create_sequence_parameters(kwargs['unet_block_id_input'],kwargs['unet_block_id_middle'],kwargs['unet_block_id_output'])
780
+ lenseq = len(sequence_parameters)
781
+ current_index = seed % lenseq
782
+ current_sequence = sequence_parameters[current_index]
783
+ kwargs["unet_block_id_input"] = current_sequence["unet_block_id_input"]
784
+ kwargs["unet_block_id_middle"] = current_sequence["unet_block_id_middle"]
785
+ kwargs["unet_block_id_output"] = current_sequence["unet_block_id_output"]
786
+ if current_sequence["unet_block_id_input"] != "":
787
+ current_block_string = f"unet_block_id_input: {current_sequence['unet_block_id_input']}"
788
+ elif current_sequence["unet_block_id_middle"] != "":
789
+ current_block_string = f"unet_block_id_middle: {current_sequence['unet_block_id_middle']}"
790
+ elif current_sequence["unet_block_id_output"] != "":
791
+ current_block_string = f"unet_block_id_output: {current_sequence['unet_block_id_output']}"
792
+ info_string = f"Progress: {current_index+1}/{lenseq}\n{kwargs['self_attn_mod_eval']}\n{kwargs['unet_attn']} {current_block_string}"
793
+ if kwargs['unet_attn'] == "both":
794
+ copy_kwargs = kwargs.copy()
795
+ kwargs['unet_attn'] = "attn1"
796
+ copy_kwargs['unet_attn'] = "attn2"
797
+ out_modifiers = [kwargs, copy_kwargs]
798
+ else:
799
+ out_modifiers = [kwargs]
800
+ return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, )
801
+
802
+ class attentionModifierConcatNode:
803
+ @classmethod
804
+ def INPUT_TYPES(s):
805
+ return {"required": {
806
+ "parameters_1": ("ATTNMOD", {"forceInput": True}),
807
+ "parameters_2": ("ATTNMOD", {"forceInput": True}),
808
+ }}
809
+
810
+ RETURN_TYPES = ("ATTNMOD",)
811
+ FUNCTION = "exec"
812
+ CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers"
813
+ def exec(self, parameters_1, parameters_2):
814
+ output_parms = parameters_1 + parameters_2
815
+ return (output_parms, )
816
+
817
+ class simpleDynamicCFG:
818
+ @classmethod
819
+ def INPUT_TYPES(s):
820
+ return {"required": {
821
+ "model": ("MODEL",),
822
+ "hard_mode" : ("BOOLEAN", {"default": True}),
823
+ "boost" : ("BOOLEAN", {"default": True}),
824
+ }}
825
+ RETURN_TYPES = ("MODEL",)
826
+ FUNCTION = "patch"
827
+
828
+ CATEGORY = "model_patches/Automatic_CFG/presets"
829
+
830
+ def patch(self, model, hard_mode, boost):
831
+ advcfg = advancedDynamicCFG()
832
+ m = advcfg.patch(model,
833
+ skip_uncond = boost,
834
+ uncond_sigma_start = 1000, uncond_sigma_end = 1,
835
+ automatic_cfg = "hard" if hard_mode else "soft"
836
+ )[0]
837
+ return (m, )
838
+
839
+ class presetLoader:
840
+ @classmethod
841
+ def INPUT_TYPES(s):
842
+ presets_files = [pj.replace(".json","") for pj in os.listdir(json_preset_path) if ".json" in pj and pj not in ["Experimental_temperature.json","do_not_delete.json"]]
843
+ presets_files = sorted(presets_files, key=str.lower)
844
+ return {"required": {
845
+ "model": ("MODEL",),
846
+ "preset" : (presets_files, {"default": "Excellent_attention"}),
847
+ "uncond_sigma_end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
848
+ "use_uncond_sigma_end_from_preset" : ("BOOLEAN", {"default": True}),
849
+ "automatic_cfg" : (["From preset","None", "soft", "hard", "hard_squared", "range"],),
850
+ },
851
+ "optional":{
852
+ "join_global_parameters": ("ATTNMOD", {"forceInput": True}),
853
+ }}
854
+ RETURN_TYPES = ("MODEL", "STRING", "STRING",)
855
+ RETURN_NAMES = ("Model", "Preset name", "Parameters as string",)
856
+ FUNCTION = "patch"
857
+
858
+ CATEGORY = "model_patches/Automatic_CFG"
859
+
860
+ def patch(self, model, preset, uncond_sigma_end, use_uncond_sigma_end_from_preset, automatic_cfg, join_global_parameters=None):
861
+ with open(os.path.join(json_preset_path, preset+".json"), 'r', encoding='utf-8') as f:
862
+ preset_args = json.load(f)
863
+ if not use_uncond_sigma_end_from_preset:
864
+ preset_args["uncond_sigma_end"] = uncond_sigma_end
865
+ preset_args["fake_uncond_sigma_end"] = uncond_sigma_end
866
+ preset_args["fake_uncond_exp_sigma_end"] = uncond_sigma_end
867
+ preset_args["uncond_exp_sigma_end"] = uncond_sigma_end
868
+
869
+ if join_global_parameters is not None:
870
+ preset_args["attention_modifiers_global"] = preset_args["attention_modifiers_global"] + join_global_parameters
871
+ preset_args["attention_modifiers_global_enabled"] = True
872
+
873
+ if automatic_cfg != "From preset":
874
+ preset_args["automatic_cfg"] = automatic_cfg
875
+
876
+ advcfg = advancedDynamicCFG()
877
+ m = advcfg.patch(model, **preset_args)[0]
878
+ info_string = ",\n".join([f"\"{k}\": {v}" for k,v in preset_args.items() if v != ""])
879
+ print(f"Preset {Fore.GREEN}{preset}{Fore.RESET} loaded successfully!")
880
+ return (m, preset, info_string,)
881
+
882
+ class simpleDynamicCFGlerpUncond:
883
+ @classmethod
884
+ def INPUT_TYPES(s):
885
+ return {"required": {
886
+ "model": ("MODEL",),
887
+ "boost" : ("BOOLEAN", {"default": True}),
888
+ "negative_strength": ("FLOAT", {"default": 1, "min": 0.0, "max": 5.0, "step": 0.1, "round": 0.1}),
889
+ }}
890
+ RETURN_TYPES = ("MODEL",)
891
+ FUNCTION = "patch"
892
+
893
+ CATEGORY = "model_patches/Automatic_CFG/presets"
894
+
895
+ def patch(self, model, boost, negative_strength):
896
+ advcfg = advancedDynamicCFG()
897
+ m = advcfg.patch(model=model,
898
+ automatic_cfg="hard", skip_uncond=boost,
899
+ uncond_sigma_start = 15, uncond_sigma_end = 1,
900
+ lerp_uncond=negative_strength != 1, lerp_uncond_strength=negative_strength,
901
+ lerp_uncond_sigma_start = 15, lerp_uncond_sigma_end = 1
902
+ )[0]
903
+ return (m, )
904
+
905
+ class postCFGrescaleOnly:
906
+ @classmethod
907
+ def INPUT_TYPES(s):
908
+ return {"required": {
909
+ "model": ("MODEL",),
910
+ "subtract_latent_mean" : ("BOOLEAN", {"default": True}),
911
+ "subtract_latent_mean_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}),
912
+ "subtract_latent_mean_sigma_end": ("FLOAT", {"default": 7.5, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}),
913
+ "latent_intensity_rescale" : ("BOOLEAN", {"default": True}),
914
+ "latent_intensity_rescale_method" : (["soft","hard","range"], {"default": "hard"},),
915
+ "latent_intensity_rescale_cfg" : ("FLOAT", {"default": 8, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.1}),
916
+ "latent_intensity_rescale_sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}),
917
+ "latent_intensity_rescale_sigma_end": ("FLOAT", {"default": 5, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.1}),
918
+ }}
919
+ RETURN_TYPES = ("MODEL",)
920
+ FUNCTION = "patch"
921
+
922
+ CATEGORY = "model_patches/Automatic_CFG/utils"
923
+
924
+ def patch(self, model,
925
+ subtract_latent_mean, subtract_latent_mean_sigma_start, subtract_latent_mean_sigma_end,
926
+ latent_intensity_rescale, latent_intensity_rescale_method, latent_intensity_rescale_cfg, latent_intensity_rescale_sigma_start, latent_intensity_rescale_sigma_end
927
+ ):
928
+ advcfg = advancedDynamicCFG()
929
+ m = advcfg.patch(model=model,
930
+ subtract_latent_mean = subtract_latent_mean,
931
+ subtract_latent_mean_sigma_start = subtract_latent_mean_sigma_start, subtract_latent_mean_sigma_end = subtract_latent_mean_sigma_end,
932
+ latent_intensity_rescale = latent_intensity_rescale, latent_intensity_rescale_cfg = latent_intensity_rescale_cfg, latent_intensity_rescale_method = latent_intensity_rescale_method,
933
+ latent_intensity_rescale_sigma_start = latent_intensity_rescale_sigma_start, latent_intensity_rescale_sigma_end = latent_intensity_rescale_sigma_end,
934
+ ignore_pre_cfg_func = True
935
+ )[0]
936
+ return (m, )
937
+
938
+ class simpleDynamicCFGHighSpeed:
939
+ @classmethod
940
+ def INPUT_TYPES(s):
941
+ return {"required": {
942
+ "model": ("MODEL",),
943
+ }}
944
+ RETURN_TYPES = ("MODEL",)
945
+ FUNCTION = "patch"
946
+
947
+ CATEGORY = "model_patches/Automatic_CFG/presets"
948
+
949
+ def patch(self, model):
950
+ advcfg = advancedDynamicCFG()
951
+ m = advcfg.patch(model=model, automatic_cfg = "hard",
952
+ skip_uncond = True, uncond_sigma_start = 7.5, uncond_sigma_end = 1)[0]
953
+ return (m, )
954
+
955
+ class simpleDynamicCFGwarpDrive:
956
+ @classmethod
957
+ def INPUT_TYPES(s):
958
+ return {"required": {
959
+ "model": ("MODEL",),
960
+ "uncond_sigma_start": ("FLOAT", {"default": 5.5, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
961
+ "uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
962
+ "fake_uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
963
+ }}
964
+ RETURN_TYPES = ("MODEL",)
965
+ FUNCTION = "patch"
966
+
967
+ CATEGORY = "model_patches/Automatic_CFG/presets"
968
+
969
+ def patch(self, model, uncond_sigma_start, uncond_sigma_end, fake_uncond_sigma_end):
970
+ advcfg = advancedDynamicCFG()
971
+ print(f" {Fore.CYAN}WARP DRIVE MODE ENGAGED!{Style.RESET_ALL}\n Settings suggestions:\n"
972
+ f" {Fore.GREEN}1/1/1: {Fore.YELLOW}Maaaxxxiiimum speeeeeed.{Style.RESET_ALL} {Fore.RED}Uncond disabled.{Style.RESET_ALL} {Fore.MAGENTA}Fasten your seatbelt!{Style.RESET_ALL}\n"
973
+ f" {Fore.GREEN}3/1/1: {Fore.YELLOW}Risky space-time continuum distortion.{Style.RESET_ALL} {Fore.MAGENTA}Awesome for prompts with a clear subject!{Style.RESET_ALL}\n"
974
+ f" {Fore.GREEN}5.5/1/1: {Fore.YELLOW}Frameshift Drive Autopilot: {Fore.GREEN}Engaged.{Style.RESET_ALL} {Fore.MAGENTA}Should work with anything but do it better and faster!{Style.RESET_ALL}")
975
+
976
+ m = advcfg.patch(model=model, automatic_cfg = "hard",
977
+ skip_uncond = True, uncond_sigma_start = uncond_sigma_start, uncond_sigma_end = uncond_sigma_end,
978
+ fake_uncond_sigma_end = fake_uncond_sigma_end, fake_uncond_sigma_start = 1000, fake_uncond_start=True,
979
+ fake_uncond_exp=True,fake_uncond_exp_normalize=True,fake_uncond_exp_method="previous_average",
980
+ cond_exp = False, cond_exp_sigma_start = 9, cond_exp_sigma_end = uncond_sigma_start, cond_exp_method = "erf", cond_exp_normalize = True,
981
+ )[0]
982
+ return (m, )
983
+
984
+ class simpleDynamicCFGunpatch:
985
+ @classmethod
986
+ def INPUT_TYPES(s):
987
+ return {"required": {
988
+ "model": ("MODEL",),
989
+ }}
990
+ RETURN_TYPES = ("MODEL",)
991
+ FUNCTION = "unpatch"
992
+
993
+ CATEGORY = "model_patches/Automatic_CFG/utils"
994
+
995
+ def unpatch(self, model):
996
+ m = model.clone()
997
+ m.model_options.pop("sampler_pre_cfg_function", None)
998
+ return (m, )
999
+
1000
+ class simpleDynamicCFGExcellentattentionPatch:
1001
+ @classmethod
1002
+ def INPUT_TYPES(s):
1003
+ inputs = {"required": {
1004
+ "model": ("MODEL",),
1005
+ "Auto_CFG": ("BOOLEAN", {"default": True}),
1006
+ "patch_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 1.0, "round": 0.01}),
1007
+ "patch_cond": ("BOOLEAN", {"default": True}),
1008
+ "patch_uncond": ("BOOLEAN", {"default": True}),
1009
+ "light_patch": ("BOOLEAN", {"default": False}),
1010
+ "mute_self_input_layer_8_cond": ("BOOLEAN", {"default": False}),
1011
+ "mute_cross_input_layer_8_cond": ("BOOLEAN", {"default": False}),
1012
+ "mute_self_input_layer_8_uncond": ("BOOLEAN", {"default": True}),
1013
+ "mute_cross_input_layer_8_uncond": ("BOOLEAN", {"default": False}),
1014
+ "uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
1015
+ "bypass_layer_8_instead_of_mute": ("BOOLEAN", {"default": False}),
1016
+ "save_as_preset": ("BOOLEAN", {"default": False}),
1017
+ "preset_name": ("STRING", {"multiline": False}),
1018
+ },
1019
+ "optional":{
1020
+ "attn_mod_for_positive_operation": ("ATTNMOD", {"forceInput": True}),
1021
+ "attn_mod_for_negative_operation": ("ATTNMOD", {"forceInput": True}),
1022
+ },
1023
+ }
1024
+ if "dev_env.txt" in os.listdir(current_dir):
1025
+ inputs['optional'].update({"attn_mod_for_global_operation": ("ATTNMOD", {"forceInput": True})})
1026
+ return inputs
1027
+
1028
+ RETURN_TYPES = ("MODEL","STRING",)
1029
+ RETURN_NAMES = ("Model", "Parameters as string",)
1030
+ FUNCTION = "patch"
1031
+
1032
+ CATEGORY = "model_patches/Automatic_CFG"
1033
+
1034
+ def patch(self, model, Auto_CFG, patch_multiplier, patch_cond, patch_uncond, light_patch,
1035
+ mute_self_input_layer_8_cond, mute_cross_input_layer_8_cond,
1036
+ mute_self_input_layer_8_uncond, mute_cross_input_layer_8_uncond,
1037
+ uncond_sigma_end,bypass_layer_8_instead_of_mute, save_as_preset, preset_name,
1038
+ attn_mod_for_positive_operation = None, attn_mod_for_negative_operation = None, attn_mod_for_global_operation = None):
1039
+
1040
+ parameters_as_string = "Excellent attention:\n" + "\n".join([f"{k}: {v}" for k, v in locals().items() if k not in ["self", "model"]])
1041
+
1042
+ with open(os.path.join(json_preset_path, "Excellent_attention.json"), 'r', encoding='utf-8') as f:
1043
+ patch_parameters = json.load(f)
1044
+
1045
+ attn_patch = {"sigma_start": 1000, "sigma_end": 0,
1046
+ "self_attn_mod_eval": f"normalize_tensor(q+(q-attention_basic(attnbc, k, v, extra_options['n_heads'])))*attnbc.norm()*{patch_multiplier}",
1047
+ "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}
1048
+ attn_patch_light = {"sigma_start": 1000, "sigma_end": 0,
1049
+ "self_attn_mod_eval": f"q*{patch_multiplier}",
1050
+ "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}
1051
+
1052
+ kill_self_input_8 = {
1053
+ "sigma_start": 1000,
1054
+ "sigma_end": 0,
1055
+ "self_attn_mod_eval": "q" if bypass_layer_8_instead_of_mute else "torch.zeros_like(q)",
1056
+ "unet_block_id_input": "8",
1057
+ "unet_block_id_middle": "",
1058
+ "unet_block_id_output": "",
1059
+ "unet_attn": "attn1"}
1060
+
1061
+ kill_cross_input_8 = kill_self_input_8.copy()
1062
+ kill_cross_input_8['unet_attn'] = "attn2"
1063
+
1064
+ attention_modifiers_positive = []
1065
+ attention_modifiers_fake_negative = []
1066
+
1067
+ if patch_cond: attention_modifiers_positive.append(attn_patch) if not light_patch else attention_modifiers_positive.append(attn_patch_light)
1068
+ if mute_self_input_layer_8_cond: attention_modifiers_positive.append(kill_self_input_8)
1069
+ if mute_cross_input_layer_8_cond: attention_modifiers_positive.append(kill_cross_input_8)
1070
+
1071
+ if patch_uncond: attention_modifiers_fake_negative.append(attn_patch) if not light_patch else attention_modifiers_fake_negative.append(attn_patch_light)
1072
+ if mute_self_input_layer_8_uncond: attention_modifiers_fake_negative.append(kill_self_input_8)
1073
+ if mute_cross_input_layer_8_uncond: attention_modifiers_fake_negative.append(kill_cross_input_8)
1074
+
1075
+ patch_parameters['attention_modifiers_positive'] = attention_modifiers_positive
1076
+ patch_parameters['attention_modifiers_fake_negative'] = attention_modifiers_fake_negative
1077
+
1078
+ if attn_mod_for_positive_operation is not None:
1079
+ patch_parameters['attention_modifiers_positive'] = patch_parameters['attention_modifiers_positive'] + attn_mod_for_positive_operation
1080
+ if attn_mod_for_negative_operation is not None:
1081
+ patch_parameters['attention_modifiers_fake_negative'] = patch_parameters['attention_modifiers_fake_negative'] + attn_mod_for_negative_operation
1082
+ if attn_mod_for_global_operation is not None:
1083
+ patch_parameters["attention_modifiers_global_enabled"] = True
1084
+ patch_parameters['attention_modifiers_global'] = attn_mod_for_global_operation
1085
+
1086
+ patch_parameters["uncond_sigma_end"] = uncond_sigma_end
1087
+ patch_parameters["fake_uncond_sigma_end"] = uncond_sigma_end
1088
+ patch_parameters["automatic_cfg"] = "hard" if Auto_CFG else "None"
1089
+
1090
+ if save_as_preset:
1091
+ patch_parameters["save_as_preset"] = save_as_preset
1092
+ patch_parameters["preset_name"] = preset_name
1093
+
1094
+ advcfg = advancedDynamicCFG()
1095
+ m = advcfg.patch(model, **patch_parameters)[0]
1096
+
1097
+ return (m, parameters_as_string, )
1098
+
1099
+ class simpleDynamicCFGCustomAttentionPatch:
1100
+ @classmethod
1101
+ def INPUT_TYPES(s):
1102
+ return {"required": {
1103
+ "model": ("MODEL",),
1104
+ "Auto_CFG": ("BOOLEAN", {"default": True}),
1105
+ "cond_mode" : (["replace_by_custom","normal+(normal-custom_cond)*multiplier","normal+(normal-custom_uncond)*multiplier"],),
1106
+ "uncond_mode" : (["replace_by_custom","normal+(normal-custom_cond)*multiplier","normal+(normal-custom_uncond)*multiplier"],),
1107
+ "cond_diff_multiplier": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.1, "round": 0.01}),
1108
+ "uncond_diff_multiplier": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.1, "round": 0.01}),
1109
+ "uncond_sigma_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10000, "step": 0.1, "round": 0.01}),
1110
+ "save_as_preset": ("BOOLEAN", {"default": False}),
1111
+ "preset_name": ("STRING", {"multiline": False}),
1112
+ },
1113
+ "optional":{
1114
+ "attn_mod_for_positive_operation": ("ATTNMOD", {"forceInput": True}),
1115
+ "attn_mod_for_negative_operation": ("ATTNMOD", {"forceInput": True}),
1116
+ }}
1117
+ RETURN_TYPES = ("MODEL",)
1118
+ RETURN_NAMES = ("Model",)
1119
+ FUNCTION = "patch"
1120
+
1121
+ CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers"
1122
+
1123
+ def patch(self, model, Auto_CFG, cond_mode, uncond_mode, cond_diff_multiplier, uncond_diff_multiplier, uncond_sigma_end, save_as_preset, preset_name,
1124
+ attn_mod_for_positive_operation = [], attn_mod_for_negative_operation = []):
1125
+
1126
+ with open(os.path.join(json_preset_path, "do_not_delete.json"), 'r', encoding='utf-8') as f:
1127
+ patch_parameters = json.load(f)
1128
+
1129
+ patch_parameters["cond_exp_value"] = cond_diff_multiplier
1130
+ patch_parameters["uncond_exp_value"] = uncond_diff_multiplier
1131
+
1132
+ if cond_mode != "replace_by_custom":
1133
+ patch_parameters["disable_cond"] = False
1134
+ if cond_mode == "normal+(normal-custom_cond)*multiplier":
1135
+ patch_parameters["cond_exp_method"] = "subtract_attention_modifiers_input_using_cond"
1136
+ elif cond_mode == "normal+(normal-custom_uncond)*multiplier":
1137
+ patch_parameters["cond_exp_method"] = "subtract_attention_modifiers_input_using_uncond"
1138
+
1139
+ if uncond_mode != "replace_by_custom":
1140
+ patch_parameters["uncond_sigma_start"] = 1000.0
1141
+ patch_parameters["fake_uncond_exp"] = False
1142
+ patch_parameters["uncond_exp"] = True
1143
+
1144
+ if uncond_mode == "normal+(normal-custom_cond)*multiplier":
1145
+ patch_parameters["uncond_exp_method"] = "subtract_attention_modifiers_input_using_cond"
1146
+ elif uncond_mode == "normal+(normal-custom_uncond)*multiplier":
1147
+ patch_parameters["uncond_exp_method"] = "subtract_attention_modifiers_input_using_uncond"
1148
+
1149
+ if cond_mode != "replace_by_custom" and attn_mod_for_positive_operation != []:
1150
+ smallest_sigma = min([float(x['sigma_end']) for x in attn_mod_for_positive_operation])
1151
+ patch_parameters["disable_cond_sigma_end"] = smallest_sigma
1152
+ patch_parameters["cond_exp_sigma_end"] = smallest_sigma
1153
+
1154
+ if uncond_mode != "replace_by_custom" and attn_mod_for_negative_operation != []:
1155
+ smallest_sigma = min([float(x['sigma_end']) for x in attn_mod_for_negative_operation])
1156
+ patch_parameters["uncond_exp_sigma_end"] = smallest_sigma
1157
+ patch_parameters["fake_uncond_start"] = False
1158
+ # else:
1159
+ # biggest_sigma = max([float(x['sigma_start']) for x in attn_mod_for_negative_operation])
1160
+ # patch_parameters["fake_uncond_sigma_start"] = biggest_sigma
1161
+
1162
+ patch_parameters["automatic_cfg"] = "hard" if Auto_CFG else "None"
1163
+ patch_parameters['attention_modifiers_positive'] = attn_mod_for_positive_operation
1164
+ patch_parameters['attention_modifiers_negative'] = attn_mod_for_negative_operation
1165
+ patch_parameters['attention_modifiers_fake_negative'] = attn_mod_for_negative_operation
1166
+ patch_parameters["uncond_sigma_end"] = uncond_sigma_end
1167
+ patch_parameters["fake_uncond_sigma_end"] = uncond_sigma_end
1168
+ patch_parameters["save_as_preset"] = save_as_preset
1169
+ patch_parameters["preset_name"] = preset_name
1170
+
1171
+ advcfg = advancedDynamicCFG()
1172
+ m = advcfg.patch(model, **patch_parameters)[0]
1173
+
1174
+ return (m, )
1175
+
1176
+
1177
+
1178
+
1179
+ class attentionModifierSingleLayerBypassNode:
1180
+ @classmethod
1181
+ def INPUT_TYPES(s):
1182
+ return {"required": {
1183
+ "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
1184
+ "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
1185
+ "block_name": (["input","middle","output"],),
1186
+ "block_number": ("INT", {"default": 0, "min": 0, "max": 12, "step": 1}),
1187
+ "unet_attn": (["attn1","attn2","both"],),
1188
+ },
1189
+ "optional":{
1190
+ "join_parameters": ("ATTNMOD", {"forceInput": True}),
1191
+ }}
1192
+
1193
+ RETURN_TYPES = ("ATTNMOD","STRING",)
1194
+ RETURN_NAMES = ("Attention modifier", "Parameters as string")
1195
+ FUNCTION = "exec"
1196
+ CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers"
1197
+
1198
+ def exec(self, sigma_start, sigma_end, block_name, block_number, unet_attn, join_parameters=None):
1199
+ attn_modifier_dict = {
1200
+ "sigma_start": sigma_start, "sigma_end": sigma_end,
1201
+ "self_attn_mod_eval": "q",
1202
+ "unet_block_id_input": str(block_number) if block_name == "input" else "",
1203
+ "unet_block_id_middle": str(block_number) if block_name == "middle" else "",
1204
+ "unet_block_id_output": str(block_number) if block_name == "output" else "",
1205
+ "unet_attn": f"{unet_attn}"
1206
+ }
1207
+
1208
+ info_string = "\n".join([f"{k}: {v}" for k,v in attn_modifier_dict.items() if v != ""])
1209
+
1210
+ if unet_attn == "both":
1211
+ attn_modifier_dict['unet_attn'] = "attn1"
1212
+ copy_attn_modifier_dict = attn_modifier_dict.copy()
1213
+ copy_attn_modifier_dict['unet_attn'] = "attn2"
1214
+ out_modifiers = [attn_modifier_dict, copy_attn_modifier_dict]
1215
+ else:
1216
+ out_modifiers = [attn_modifier_dict]
1217
+
1218
+ return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, )
1219
+
1220
+ class attentionModifierSingleLayerTemperatureNode:
1221
+ @classmethod
1222
+ def INPUT_TYPES(s):
1223
+ return {"required": {
1224
+ "sigma_start": ("FLOAT", {"default": 1000, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
1225
+ "sigma_end": ("FLOAT", {"default": 0, "min": 0.0, "max": 10000.0, "step": 0.1, "round": 0.01}),
1226
+ "block_name": (["input","middle","output"],),
1227
+ "block_number": ("INT", {"default": 0, "min": 0, "max": 12, "step": 1}),
1228
+ "unet_attn": (["attn1","attn2","both"],),
1229
+ "temperature": ("FLOAT", {"default": 1, "min": 0.0, "max": 10000.0, "step": 0.01, "round": 0.01}),
1230
+ },
1231
+ "optional":{
1232
+ "join_parameters": ("ATTNMOD", {"forceInput": True}),
1233
+ }}
1234
+
1235
+ RETURN_TYPES = ("ATTNMOD","STRING",)
1236
+ RETURN_NAMES = ("Attention modifier", "Parameters as string")
1237
+ FUNCTION = "exec"
1238
+ CATEGORY = "model_patches/Automatic_CFG/experimental_attention_modifiers"
1239
+
1240
+ def exec(self, sigma_start, sigma_end, block_name, block_number, unet_attn, temperature, join_parameters=None):
1241
+ attn_modifier_dict = {
1242
+ "sigma_start": sigma_start, "sigma_end": sigma_end,
1243
+ "self_attn_mod_eval": f"temperature_patcher({temperature}).attention_basic_with_temperature(q, k, v, extra_options)",
1244
+ "unet_block_id_input": str(block_number) if block_name == "input" else "",
1245
+ "unet_block_id_middle": str(block_number) if block_name == "middle" else "",
1246
+ "unet_block_id_output": str(block_number) if block_name == "output" else "",
1247
+ "unet_attn": f"{unet_attn}"
1248
+ }
1249
+
1250
+ info_string = "\n".join([f"{k}: {v}" for k,v in attn_modifier_dict.items() if v != ""])
1251
+
1252
+ if unet_attn == "both":
1253
+ attn_modifier_dict['unet_attn'] = "attn1"
1254
+ copy_attn_modifier_dict = attn_modifier_dict.copy()
1255
+ copy_attn_modifier_dict['unet_attn'] = "attn2"
1256
+ out_modifiers = [attn_modifier_dict, copy_attn_modifier_dict]
1257
+ else:
1258
+ out_modifiers = [attn_modifier_dict]
1259
+
1260
+ return (out_modifiers if join_parameters is None else join_parameters + out_modifiers, info_string, )
1261
+
1262
+ class uncondZeroNode:
1263
+ @classmethod
1264
+ def INPUT_TYPES(s):
1265
+ return {"required": {
1266
+ "model": ("MODEL",),
1267
+ "scale": ("FLOAT", {"default": 1.2, "min": 0.0, "max": 10.0, "step": 0.01, "round": 0.01}),
1268
+ }}
1269
+ RETURN_TYPES = ("MODEL",)
1270
+ FUNCTION = "patch"
1271
+
1272
+ CATEGORY = "model_patches/Automatic_CFG"
1273
+
1274
+ def patch(self, model, scale):
1275
+ def custom_patch(args):
1276
+ cond_pred = args["cond_denoised"]
1277
+ input_x = args["input"]
1278
+ if args["sigma"][0] <= 1:
1279
+ return input_x - cond_pred
1280
+ cond = input_x - cond_pred
1281
+ uncond = input_x - torch.zeros_like(cond)
1282
+ return uncond + scale * (cond - uncond)
1283
+
1284
+ m = model.clone()
1285
+ m.set_model_sampler_cfg_function(custom_patch)
1286
+ return (m, )
extensions/ComfyUI-AutomaticCFG/nodes_sag_custom.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import einsum
3
+ import torch.nn.functional as F
4
+ import math
5
+
6
+ from einops import rearrange, repeat
7
+ import os
8
+ from comfy.ldm.modules.attention import optimized_attention, _ATTN_PRECISION
9
+ import comfy.samplers
10
+
11
+ # from comfy/ldm/modules/attention.py
12
+ # but modified to return attention scores as well as output
13
+ def attention_basic_with_sim(q, k, v, heads, mask=None):
14
+ b, _, dim_head = q.shape
15
+ dim_head //= heads
16
+ scale = dim_head ** -0.5
17
+
18
+ h = heads
19
+ q, k, v = map(
20
+ lambda t: t.unsqueeze(3)
21
+ .reshape(b, -1, heads, dim_head)
22
+ .permute(0, 2, 1, 3)
23
+ .reshape(b * heads, -1, dim_head)
24
+ .contiguous(),
25
+ (q, k, v),
26
+ )
27
+
28
+ # force cast to fp32 to avoid overflowing
29
+ if _ATTN_PRECISION =="fp32":
30
+ sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
31
+ else:
32
+ sim = einsum('b i d, b j d -> b i j', q, k) * scale
33
+
34
+ del q, k
35
+
36
+ if mask is not None:
37
+ mask = rearrange(mask, 'b ... -> b (...)')
38
+ max_neg_value = -torch.finfo(sim.dtype).max
39
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
40
+ sim.masked_fill_(~mask, max_neg_value)
41
+
42
+ # attention, what we cannot get enough of
43
+ sim = sim.softmax(dim=-1)
44
+
45
+ out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
46
+ out = (
47
+ out.unsqueeze(0)
48
+ .reshape(b, heads, -1, dim_head)
49
+ .permute(0, 2, 1, 3)
50
+ .reshape(b, -1, heads * dim_head)
51
+ )
52
+ return (out, sim)
53
+
54
+ def create_blur_map(x0, attn, sigma=3.0, threshold=1.0):
55
+ # reshape and GAP the attention map
56
+ _, hw1, hw2 = attn.shape
57
+ b, _, lh, lw = x0.shape
58
+ attn = attn.reshape(b, -1, hw1, hw2)
59
+ # Global Average Pool
60
+ mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold
61
+ ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length()
62
+ mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)]
63
+
64
+ # Reshape
65
+ mask = (
66
+ mask.reshape(b, *mid_shape)
67
+ .unsqueeze(1)
68
+ .type(attn.dtype)
69
+ )
70
+ # Upsample
71
+ mask = F.interpolate(mask, (lh, lw))
72
+
73
+ blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma)
74
+ blurred = blurred * mask + x0 * (1 - mask)
75
+ return blurred
76
+
77
+ def gaussian_blur_2d(img, kernel_size, sigma):
78
+ ksize_half = (kernel_size - 1) * 0.5
79
+
80
+ x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
81
+
82
+ pdf = torch.exp(-0.5 * (x / sigma).pow(2))
83
+
84
+ x_kernel = pdf / pdf.sum()
85
+ x_kernel = x_kernel.to(device=img.device, dtype=img.dtype)
86
+
87
+ kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :])
88
+ kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1])
89
+
90
+ padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2]
91
+
92
+ img = F.pad(img, padding, mode="reflect")
93
+ img = F.conv2d(img, kernel2d, groups=img.shape[-3])
94
+ return img
95
+
96
+ def get_denoised_ranges(latent, measure="hard", top_k=0.25):
97
+ chans = []
98
+ for x in range(len(latent)):
99
+ max_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=True).values
100
+ min_values = torch.topk(latent[x] - latent[x].mean() if measure == "range" else latent[x], k=int(len(latent[x])*top_k), largest=False).values
101
+ max_val = torch.mean(max_values).item()
102
+ min_val = torch.mean(torch.abs(min_values)).item() if (measure == "hard" or measure == "range") else abs(torch.mean(min_values).item())
103
+ denoised_range = (max_val + min_val) / 2
104
+ chans.append(denoised_range)
105
+ return chans
106
+
107
+ class SelfAttentionGuidanceCustom:
108
+ @classmethod
109
+ def INPUT_TYPES(s):
110
+ return {"required": { "model": ("MODEL",),
111
+ "scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 100.0, "step": 0.1}),
112
+ "blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}),
113
+ "sigma_start": ("FLOAT", {"default": 15.0, "min": 0.0, "max": 1000.0, "step": 0.1, "round": 0.1}),
114
+ "sigma_end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.1, "round": 0.1}),
115
+ "auto_scale" : ("BOOLEAN", {"default": False}),
116
+ }}
117
+ RETURN_TYPES = ("MODEL",)
118
+ FUNCTION = "patch"
119
+
120
+ CATEGORY = "model_patches"
121
+
122
+ def patch(self, model, scale, blur_sigma, sigma_start, sigma_end, auto_scale):
123
+ m = model.clone()
124
+
125
+ attn_scores = None
126
+
127
+ # TODO: make this work properly with chunked batches
128
+ # currently, we can only save the attn from one UNet call
129
+ def attn_and_record(q, k, v, extra_options):
130
+ nonlocal attn_scores
131
+ # if uncond, save the attention scores
132
+ heads = extra_options["n_heads"]
133
+ cond_or_uncond = extra_options["cond_or_uncond"]
134
+ b = q.shape[0] // len(cond_or_uncond)
135
+ if 1 in cond_or_uncond:
136
+ uncond_index = cond_or_uncond.index(1)
137
+ # do the entire attention operation, but save the attention scores to attn_scores
138
+ (out, sim) = attention_basic_with_sim(q, k, v, heads=heads)
139
+ # when using a higher batch size, I BELIEVE the result batch dimension is [uc1, ... ucn, c1, ... cn]
140
+ n_slices = heads * b
141
+ attn_scores = sim[n_slices * uncond_index:n_slices * (uncond_index+1)]
142
+ return out
143
+ else:
144
+ return optimized_attention(q, k, v, heads=heads)
145
+
146
+ def post_cfg_function(args):
147
+ nonlocal attn_scores
148
+ uncond_attn = attn_scores
149
+
150
+ sag_scale = scale
151
+ sag_sigma = blur_sigma
152
+ sag_threshold = 1.0
153
+ model = args["model"]
154
+ uncond_pred = args["uncond_denoised"]
155
+ uncond = args["uncond"]
156
+ cfg_result = args["denoised"]
157
+ sigma = args["sigma"]
158
+ model_options = args["model_options"]
159
+ x = args["input"]
160
+ if uncond_pred is None or uncond is None or uncond_attn is None:
161
+ return cfg_result
162
+ if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding
163
+ return cfg_result
164
+ if sigma[0] > sigma_start or sigma[0] < sigma_end:
165
+ return cfg_result
166
+ # create the adversarially blurred image
167
+ degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold)
168
+ degraded_noised = degraded + x - uncond_pred
169
+ # call into the UNet
170
+ (sag, _) = comfy.samplers.calc_cond_batch(model, [uncond, None], degraded_noised, sigma, model_options)
171
+ # comfy.samplers.calc_cond_uncond_batch(model, uncond, None, degraded_noised, sigma, model_options)
172
+
173
+ if auto_scale:
174
+ denoised_tmp = cfg_result + (degraded - sag) * 8
175
+ for b in range(len(denoised_tmp)):
176
+ denoised_ranges = get_denoised_ranges(denoised_tmp[b])
177
+ for c in range(len(denoised_tmp[b])):
178
+ fixed_scale = (sag_scale / 10) / denoised_ranges[c]
179
+ denoised_tmp[b][c] = cfg_result[b][c] + (degraded[b][c] - sag[b][c]) * fixed_scale
180
+ return denoised_tmp
181
+
182
+ return cfg_result + (degraded - sag) * sag_scale
183
+
184
+ m.set_model_sampler_post_cfg_function(post_cfg_function, disable_cfg1_optimization=False)
185
+
186
+ # from diffusers:
187
+ # unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch
188
+ m.set_model_attn1_replace(attn_and_record, "middle", 0, 0)
189
+
190
+ return (m, )
extensions/ComfyUI-AutomaticCFG/presets/A subtle touch.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lerp_uncond_sigma_start": 1000.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 1000.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 1000.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [{"sigma_start": 15, "sigma_end": 0, "self_attn_mod_eval": "q.sin()", "unet_block_id_input": "", "unet_block_id_middle": "0", "unet_block_id_output": "", "unet_attn": "attn2"}], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "model_options_copy": {"transformer_options": {}}, "attention_modifiers_fake_negative": [], "attention_modifiers_negative": [], "attention_modifiers_positive": [], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": false, "cond_exp_method": "attention_modifiers_input_using_cond", "cond_exp_normalize": false, "cond_exp_sigma_end": 0.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 1.0, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "attention_modifiers_input_using_uncond", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 1.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": true, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "eval_string"], "skip_uncond": true, "uncond_exp": false, "uncond_exp_method": "subtract_attention_modifiers_input_using_uncond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 0.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 1.0, "uncond_sigma_end": 1.0, "uncond_sigma_start": 15.0}
extensions/ComfyUI-AutomaticCFG/presets/Crossed conds customized 1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lerp_uncond_sigma_start": 15.0, "lerp_uncond_sigma_end": 1.0, "subtract_latent_mean": false, "subtract_latent_mean_sigma_start": 15.0, "subtract_latent_mean_sigma_end": 1.0, "latent_intensity_rescale": false, "latent_intensity_rescale_sigma_start": 15.0, "latent_intensity_rescale_sigma_end": 3.0, "ignore_pre_cfg_func": false, "auto_cfg_topk": 0.25, "attention_modifiers_global_enabled": true, "attention_modifiers_global": [], "disable_cond": false, "disable_cond_sigma_start": 1000.0, "disable_cond_sigma_end": 0.0, "kwargs": {}, "attention_modifiers_fake_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_negative": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "attention_modifiers_positive": [{"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "k", "unet_block_id_input": "7,8", "unet_block_id_middle": "", "unet_block_id_output": "0", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "v", "unet_block_id_input": "4", "unet_block_id_middle": "0", "unet_block_id_output": "3", "unet_attn": "attn1"}, {"sigma_start": 1000, "sigma_end": 0, "self_attn_mod_eval": "torch.zeros_like(q)", "unet_block_id_input": "8", "unet_block_id_middle": "", "unet_block_id_output": "", "unet_attn": "attn2"}], "auto_cfg_ref": 8.0, "automatic_cfg": "hard", "cond_exp": true, "cond_exp_method": "subtract_attention_modifiers_input_using_uncond", "cond_exp_normalize": false, "cond_exp_sigma_end": 1.0, "cond_exp_sigma_start": 1000.0, "cond_exp_value": 0.3333333333333333, "eval_string_cond": "", "eval_string_fake": "", "eval_string_uncond": "", "fake_uncond_exp": false, "fake_uncond_exp_method": "cond_pred", "fake_uncond_exp_normalize": false, "fake_uncond_exp_value": 1.0, "fake_uncond_multiplier": 1, "fake_uncond_sigma_end": 0.0, "fake_uncond_sigma_start": 1000.0, "fake_uncond_start": false, "latent_intensity_rescale_cfg": 8.0, "latent_intensity_rescale_method": "hard", "lerp_uncond": false, "lerp_uncond_strength": 2.0, "not_in_filter": ["self", "model", "args", "args_filter", "save_as_preset", "preset_name", "model_options_copy", "eval_string"], "skip_uncond": true, "uncond_exp": true, "uncond_exp_method": "subtract_attention_modifiers_input_using_cond", "uncond_exp_normalize": false, "uncond_exp_sigma_end": 1.0, "uncond_exp_sigma_start": 1000.0, "uncond_exp_value": 0.3333333333333333, "uncond_sigma_end": 1.0, "uncond_sigma_start": 150.0}