pscotti commited on
Commit
ba1bf39
1 Parent(s): b614a95

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. generative_models/CODEOWNERS +1 -0
  2. generative_models/LICENSE-CODE +21 -0
  3. generative_models/README.md +304 -0
  4. generative_models/assets/000.jpg +3 -0
  5. generative_models/assets/001_with_eval.png +3 -0
  6. generative_models/assets/test_image.png +3 -0
  7. generative_models/assets/tile.gif +3 -0
  8. generative_models/assets/turbo_tile.png +3 -0
  9. generative_models/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml +104 -0
  10. generative_models/configs/example_training/autoencoder/kl-f4/imagenet-kl_f8_8chn.yaml +105 -0
  11. generative_models/configs/example_training/imagenet-f8_cond.yaml +185 -0
  12. generative_models/configs/example_training/toy/cifar10_cond.yaml +98 -0
  13. generative_models/configs/example_training/toy/mnist.yaml +79 -0
  14. generative_models/configs/example_training/toy/mnist_cond.yaml +98 -0
  15. generative_models/configs/example_training/toy/mnist_cond_discrete_eps.yaml +103 -0
  16. generative_models/configs/example_training/toy/mnist_cond_l1_loss.yaml +99 -0
  17. generative_models/configs/example_training/toy/mnist_cond_with_ema.yaml +100 -0
  18. generative_models/configs/example_training/txt2img-clipl-legacy-ucg-training.yaml +182 -0
  19. generative_models/configs/example_training/txt2img-clipl.yaml +184 -0
  20. generative_models/configs/inference/.ipynb_checkpoints/sd_xl_base-checkpoint.yaml +93 -0
  21. generative_models/configs/inference/sd_2_1.yaml +60 -0
  22. generative_models/configs/inference/sd_2_1_768.yaml +60 -0
  23. generative_models/configs/inference/sd_xl_base.yaml +93 -0
  24. generative_models/configs/inference/sd_xl_refiner.yaml +86 -0
  25. generative_models/configs/inference/svd.yaml +131 -0
  26. generative_models/configs/inference/svd_image_decoder.yaml +114 -0
  27. generative_models/configs/unclip6.yaml +220 -0
  28. generative_models/data/DejaVuSans.ttf +0 -0
  29. generative_models/main.py +943 -0
  30. generative_models/model_licenses/LICENCE-SD-Turbo +58 -0
  31. generative_models/model_licenses/LICENSE-SDV +31 -0
  32. generative_models/model_licenses/LICENSE-SDXL-Turbo +58 -0
  33. generative_models/model_licenses/LICENSE-SDXL0.9 +75 -0
  34. generative_models/model_licenses/LICENSE-SDXL1.0 +175 -0
  35. generative_models/pyproject.toml +48 -0
  36. generative_models/pytest.ini +3 -0
  37. generative_models/requirements/pt2.txt +40 -0
  38. generative_models/scripts/__init__.py +0 -0
  39. generative_models/scripts/demo/__init__.py +0 -0
  40. generative_models/scripts/demo/detect.py +156 -0
  41. generative_models/scripts/demo/discretization.py +59 -0
  42. generative_models/scripts/demo/sampling.py +364 -0
  43. generative_models/scripts/demo/streamlit_helpers.py +887 -0
  44. generative_models/scripts/demo/turbo.py +223 -0
  45. generative_models/scripts/demo/video_sampling.py +200 -0
  46. generative_models/scripts/sampling/configs/svd.yaml +146 -0
  47. generative_models/scripts/sampling/configs/svd_image_decoder.yaml +129 -0
  48. generative_models/scripts/sampling/configs/svd_xt.yaml +146 -0
  49. generative_models/scripts/sampling/configs/svd_xt_image_decoder.yaml +129 -0
  50. generative_models/scripts/sampling/simple_video_sample.py +278 -0
generative_models/CODEOWNERS ADDED
@@ -0,0 +1 @@
 
 
1
+ .github @Stability-AI/infrastructure
generative_models/LICENSE-CODE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Stability AI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
generative_models/README.md ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generative Models by Stability AI
2
+
3
+ ![sample1](assets/000.jpg)
4
+
5
+ ## News
6
+ **November 30, 2023**
7
+ - Following the launch of SDXL-Turbo, we are releasing [SD-Turbo](https://huggingface.co/stabilityai/sd-turbo).
8
+
9
+ **November 28, 2023**
10
+ - We are releasing SDXL-Turbo, a lightning fast text-to image model.
11
+ Alongside the model, we release a [technical report](https://stability.ai/research/adversarial-diffusion-distillation)
12
+ - Usage:
13
+ - Follow the installation instructions or update the existing environment with `pip install streamlit-keyup`.
14
+ - Download the [weights](https://huggingface.co/stabilityai/sdxl-turbo) and place them in the `checkpoints/` directory.
15
+ - Run `streamlit run scripts/demo/turbo.py`.
16
+
17
+ ![tile](assets/turbo_tile.png)
18
+
19
+
20
+ **November 21, 2023**
21
+ - We are releasing Stable Video Diffusion, an image-to-video model, for research purposes:
22
+ - [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid): This model was trained to generate 14
23
+ frames at resolution 576x1024 given a context frame of the same size.
24
+ We use the standard image encoder from SD 2.1, but replace the decoder with a temporally-aware `deflickering decoder`.
25
+ - [SVD-XT](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt): Same architecture as `SVD` but finetuned
26
+ for 25 frame generation.
27
+ - We provide a streamlit demo `scripts/demo/video_sampling.py` and a standalone python script `scripts/sampling/simple_video_sample.py` for inference of both models.
28
+ - Alongside the model, we release a [technical report](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets).
29
+
30
+ ![tile](assets/tile.gif)
31
+
32
+ **July 26, 2023**
33
+
34
+ - We are releasing two new open models with a
35
+ permissive [`CreativeML Open RAIL++-M` license](model_licenses/LICENSE-SDXL1.0) (see [Inference](#inference) for file
36
+ hashes):
37
+ - [SDXL-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0): An improved version
38
+ over `SDXL-base-0.9`.
39
+ - [SDXL-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0): An improved version
40
+ over `SDXL-refiner-0.9`.
41
+
42
+ ![sample2](assets/001_with_eval.png)
43
+
44
+ **July 4, 2023**
45
+
46
+ - A technical report on SDXL is now available [here](https://arxiv.org/abs/2307.01952).
47
+
48
+ **June 22, 2023**
49
+
50
+ - We are releasing two new diffusion models for research purposes:
51
+ - `SDXL-base-0.9`: The base model was trained on a variety of aspect ratios on images with resolution 1024^2. The
52
+ base model uses [OpenCLIP-ViT/G](https://github.com/mlfoundations/open_clip)
53
+ and [CLIP-ViT/L](https://github.com/openai/CLIP/tree/main) for text encoding whereas the refiner model only uses
54
+ the OpenCLIP model.
55
+ - `SDXL-refiner-0.9`: The refiner has been trained to denoise small noise levels of high quality data and as such is
56
+ not expected to work as a text-to-image model; instead, it should only be used as an image-to-image model.
57
+
58
+ If you would like to access these models for your research, please apply using one of the following links:
59
+ [SDXL-0.9-Base model](https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9),
60
+ and [SDXL-0.9-Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9).
61
+ This means that you can apply for any of the two links - and if you are granted - you can access both.
62
+ Please log in to your Hugging Face Account with your organization email to request access.
63
+ **We plan to do a full release soon (July).**
64
+
65
+ ## The codebase
66
+
67
+ ### General Philosophy
68
+
69
+ Modularity is king. This repo implements a config-driven approach where we build and combine submodules by
70
+ calling `instantiate_from_config()` on objects defined in yaml configs. See `configs/` for many examples.
71
+
72
+ ### Changelog from the old `ldm` codebase
73
+
74
+ For training, we use [PyTorch Lightning](https://lightning.ai/docs/pytorch/stable/), but it should be easy to use other
75
+ training wrappers around the base modules. The core diffusion model class (formerly `LatentDiffusion`,
76
+ now `DiffusionEngine`) has been cleaned up:
77
+
78
+ - No more extensive subclassing! We now handle all types of conditioning inputs (vectors, sequences and spatial
79
+ conditionings, and all combinations thereof) in a single class: `GeneralConditioner`,
80
+ see `sgm/modules/encoders/modules.py`.
81
+ - We separate guiders (such as classifier-free guidance, see `sgm/modules/diffusionmodules/guiders.py`) from the
82
+ samplers (`sgm/modules/diffusionmodules/sampling.py`), and the samplers are independent of the model.
83
+ - We adopt the ["denoiser framework"](https://arxiv.org/abs/2206.00364) for both training and inference (most notable
84
+ change is probably now the option to train continuous time models):
85
+ * Discrete times models (denoisers) are simply a special case of continuous time models (denoisers);
86
+ see `sgm/modules/diffusionmodules/denoiser.py`.
87
+ * The following features are now independent: weighting of the diffusion loss
88
+ function (`sgm/modules/diffusionmodules/denoiser_weighting.py`), preconditioning of the
89
+ network (`sgm/modules/diffusionmodules/denoiser_scaling.py`), and sampling of noise levels during
90
+ training (`sgm/modules/diffusionmodules/sigma_sampling.py`).
91
+ - Autoencoding models have also been cleaned up.
92
+
93
+ ## Installation:
94
+
95
+ <a name="installation"></a>
96
+
97
+ #### 1. Clone the repo
98
+
99
+ ```shell
100
+ git clone https://github.com/Stability-AI/generative-models.git
101
+ cd generative-models
102
+ ```
103
+
104
+ #### 2. Setting up the virtualenv
105
+
106
+ This is assuming you have navigated to the `generative-models` root after cloning it.
107
+
108
+ **NOTE:** This is tested under `python3.10`. For other python versions, you might encounter version conflicts.
109
+
110
+ **PyTorch 2.0**
111
+
112
+ ```shell
113
+ # install required packages from pypi
114
+ python3 -m venv .pt2
115
+ source .pt2/bin/activate
116
+ pip3 install -r requirements/pt2.txt
117
+ ```
118
+
119
+ #### 3. Install `sgm`
120
+
121
+ ```shell
122
+ pip3 install .
123
+ ```
124
+
125
+ #### 4. Install `sdata` for training
126
+
127
+ ```shell
128
+ pip3 install -e git+https://github.com/Stability-AI/datapipelines.git@main#egg=sdata
129
+ ```
130
+
131
+ ## Packaging
132
+
133
+ This repository uses PEP 517 compliant packaging using [Hatch](https://hatch.pypa.io/latest/).
134
+
135
+ To build a distributable wheel, install `hatch` and run `hatch build`
136
+ (specifying `-t wheel` will skip building a sdist, which is not necessary).
137
+
138
+ ```
139
+ pip install hatch
140
+ hatch build -t wheel
141
+ ```
142
+
143
+ You will find the built package in `dist/`. You can install the wheel with `pip install dist/*.whl`.
144
+
145
+ Note that the package does **not** currently specify dependencies; you will need to install the required packages,
146
+ depending on your use case and PyTorch version, manually.
147
+
148
+ ## Inference
149
+
150
+ We provide a [streamlit](https://streamlit.io/) demo for text-to-image and image-to-image sampling
151
+ in `scripts/demo/sampling.py`.
152
+ We provide file hashes for the complete file as well as for only the saved tensors in the file (
153
+ see [Model Spec](https://github.com/Stability-AI/ModelSpec) for a script to evaluate that).
154
+ The following models are currently supported:
155
+
156
+ - [SDXL-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
157
+ ```
158
+ File Hash (sha256): 31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b
159
+ Tensordata Hash (sha256): 0xd7a9105a900fd52748f20725fe52fe52b507fd36bee4fc107b1550a26e6ee1d7
160
+ ```
161
+ - [SDXL-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)
162
+ ```
163
+ File Hash (sha256): 7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f
164
+ Tensordata Hash (sha256): 0x1a77d21bebc4b4de78c474a90cb74dc0d2217caf4061971dbfa75ad406b75d81
165
+ ```
166
+ - [SDXL-base-0.9](https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9)
167
+ - [SDXL-refiner-0.9](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9)
168
+ - [SD-2.1-512](https://huggingface.co/stabilityai/stable-diffusion-2-1-base/blob/main/v2-1_512-ema-pruned.safetensors)
169
+ - [SD-2.1-768](https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors)
170
+
171
+ **Weights for SDXL**:
172
+
173
+ **SDXL-1.0:**
174
+ The weights of SDXL-1.0 are available (subject to
175
+ a [`CreativeML Open RAIL++-M` license](model_licenses/LICENSE-SDXL1.0)) here:
176
+
177
+ - base model: https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/
178
+ - refiner model: https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/
179
+
180
+ **SDXL-0.9:**
181
+ The weights of SDXL-0.9 are available and subject to a [research license](model_licenses/LICENSE-SDXL0.9).
182
+ If you would like to access these models for your research, please apply using one of the following links:
183
+ [SDXL-base-0.9 model](https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9),
184
+ and [SDXL-refiner-0.9](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9).
185
+ This means that you can apply for any of the two links - and if you are granted - you can access both.
186
+ Please log in to your Hugging Face Account with your organization email to request access.
187
+
188
+ After obtaining the weights, place them into `checkpoints/`.
189
+ Next, start the demo using
190
+
191
+ ```
192
+ streamlit run scripts/demo/sampling.py --server.port <your_port>
193
+ ```
194
+
195
+ ### Invisible Watermark Detection
196
+
197
+ Images generated with our code use the
198
+ [invisible-watermark](https://github.com/ShieldMnt/invisible-watermark/)
199
+ library to embed an invisible watermark into the model output. We also provide
200
+ a script to easily detect that watermark. Please note that this watermark is
201
+ not the same as in previous Stable Diffusion 1.x/2.x versions.
202
+
203
+ To run the script you need to either have a working installation as above or
204
+ try an _experimental_ import using only a minimal amount of packages:
205
+
206
+ ```bash
207
+ python -m venv .detect
208
+ source .detect/bin/activate
209
+
210
+ pip install "numpy>=1.17" "PyWavelets>=1.1.1" "opencv-python>=4.1.0.25"
211
+ pip install --no-deps invisible-watermark
212
+ ```
213
+
214
+ To run the script you need to have a working installation as above. The script
215
+ is then useable in the following ways (don't forget to activate your
216
+ virtual environment beforehand, e.g. `source .pt1/bin/activate`):
217
+
218
+ ```bash
219
+ # test a single file
220
+ python scripts/demo/detect.py <your filename here>
221
+ # test multiple files at once
222
+ python scripts/demo/detect.py <filename 1> <filename 2> ... <filename n>
223
+ # test all files in a specific folder
224
+ python scripts/demo/detect.py <your folder name here>/*
225
+ ```
226
+
227
+ ## Training:
228
+
229
+ We are providing example training configs in `configs/example_training`. To launch a training, run
230
+
231
+ ```
232
+ python main.py --base configs/<config1.yaml> configs/<config2.yaml>
233
+ ```
234
+
235
+ where configs are merged from left to right (later configs overwrite the same values).
236
+ This can be used to combine model, training and data configs. However, all of them can also be
237
+ defined in a single config. For example, to run a class-conditional pixel-based diffusion model training on MNIST,
238
+ run
239
+
240
+ ```bash
241
+ python main.py --base configs/example_training/toy/mnist_cond.yaml
242
+ ```
243
+
244
+ **NOTE 1:** Using the non-toy-dataset
245
+ configs `configs/example_training/imagenet-f8_cond.yaml`, `configs/example_training/txt2img-clipl.yaml`
246
+ and `configs/example_training/txt2img-clipl-legacy-ucg-training.yaml` for training will require edits depending on the
247
+ used dataset (which is expected to stored in tar-file in
248
+ the [webdataset-format](https://github.com/webdataset/webdataset)). To find the parts which have to be adapted, search
249
+ for comments containing `USER:` in the respective config.
250
+
251
+ **NOTE 2:** This repository supports both `pytorch1.13` and `pytorch2`for training generative models. However for
252
+ autoencoder training as e.g. in `configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml`,
253
+ only `pytorch1.13` is supported.
254
+
255
+ **NOTE 3:** Training latent generative models (as e.g. in `configs/example_training/imagenet-f8_cond.yaml`) requires
256
+ retrieving the checkpoint from [Hugging Face](https://huggingface.co/stabilityai/sdxl-vae/tree/main) and replacing
257
+ the `CKPT_PATH` placeholder in [this line](configs/example_training/imagenet-f8_cond.yaml#81). The same is to be done
258
+ for the provided text-to-image configs.
259
+
260
+ ### Building New Diffusion Models
261
+
262
+ #### Conditioner
263
+
264
+ The `GeneralConditioner` is configured through the `conditioner_config`. Its only attribute is `emb_models`, a list of
265
+ different embedders (all inherited from `AbstractEmbModel`) that are used to condition the generative model.
266
+ All embedders should define whether or not they are trainable (`is_trainable`, default `False`), a classifier-free
267
+ guidance dropout rate is used (`ucg_rate`, default `0`), and an input key (`input_key`), for example, `txt` for
268
+ text-conditioning or `cls` for class-conditioning.
269
+ When computing conditionings, the embedder will get `batch[input_key]` as input.
270
+ We currently support two to four dimensional conditionings and conditionings of different embedders are concatenated
271
+ appropriately.
272
+ Note that the order of the embedders in the `conditioner_config` is important.
273
+
274
+ #### Network
275
+
276
+ The neural network is set through the `network_config`. This used to be called `unet_config`, which is not general
277
+ enough as we plan to experiment with transformer-based diffusion backbones.
278
+
279
+ #### Loss
280
+
281
+ The loss is configured through `loss_config`. For standard diffusion model training, you will have to
282
+ set `sigma_sampler_config`.
283
+
284
+ #### Sampler config
285
+
286
+ As discussed above, the sampler is independent of the model. In the `sampler_config`, we set the type of numerical
287
+ solver, number of steps, type of discretization, as well as, for example, guidance wrappers for classifier-free
288
+ guidance.
289
+
290
+ ### Dataset Handling
291
+
292
+ For large scale training we recommend using the data pipelines from
293
+ our [data pipelines](https://github.com/Stability-AI/datapipelines) project. The project is contained in the requirement
294
+ and automatically included when following the steps from the [Installation section](#installation).
295
+ Small map-style datasets should be defined here in the repository (e.g., MNIST, CIFAR-10, ...), and return a dict of
296
+ data keys/values,
297
+ e.g.,
298
+
299
+ ```python
300
+ example = {"jpg": x, # this is a tensor -1...1 chw
301
+ "txt": "a beautiful image"}
302
+ ```
303
+
304
+ where we expect images in -1...1, channel-first format.
generative_models/assets/000.jpg ADDED

Git LFS Details

  • SHA256: 8c0af5ea509517ca920024595832e77e8e852e21425f4a19f9a1bef0ebb9cc73
  • Pointer size: 131 Bytes
  • Size of remote file: 728 kB
generative_models/assets/001_with_eval.png ADDED

Git LFS Details

  • SHA256: 026fa14e30098729064a00fb7fcec41bb57dcddb33b36b548d553f601bc53634
  • Pointer size: 132 Bytes
  • Size of remote file: 4.19 MB
generative_models/assets/test_image.png ADDED

Git LFS Details

  • SHA256: 83e7729b516ba725cb2283eb397ec2c77fc4b120f52a83801f610eac353b63c4
  • Pointer size: 131 Bytes
  • Size of remote file: 494 kB
generative_models/assets/tile.gif ADDED

Git LFS Details

  • SHA256: 2340a9809e36fa9634633c7cc5fd256737c620ba47151726c85173512dc5c8ff
  • Pointer size: 133 Bytes
  • Size of remote file: 18.6 MB
generative_models/assets/turbo_tile.png ADDED

Git LFS Details

  • SHA256: ad02861815efc0aa3dd3f0cbffa944f2bccd8a504f5f0116fcc14cbba5ea817d
  • Pointer size: 132 Bytes
  • Size of remote file: 2.17 MB
generative_models/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-6
3
+ target: sgm.models.autoencoder.AutoencodingEngine
4
+ params:
5
+ input_key: jpg
6
+ monitor: val/rec_loss
7
+
8
+ loss_config:
9
+ target: sgm.modules.autoencoding.losses.GeneralLPIPSWithDiscriminator
10
+ params:
11
+ perceptual_weight: 0.25
12
+ disc_start: 20001
13
+ disc_weight: 0.5
14
+ learn_logvar: True
15
+
16
+ regularization_weights:
17
+ kl_loss: 1.0
18
+
19
+ regularizer_config:
20
+ target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
21
+
22
+ encoder_config:
23
+ target: sgm.modules.diffusionmodules.model.Encoder
24
+ params:
25
+ attn_type: none
26
+ double_z: True
27
+ z_channels: 4
28
+ resolution: 256
29
+ in_channels: 3
30
+ out_ch: 3
31
+ ch: 128
32
+ ch_mult: [1, 2, 4]
33
+ num_res_blocks: 4
34
+ attn_resolutions: []
35
+ dropout: 0.0
36
+
37
+ decoder_config:
38
+ target: sgm.modules.diffusionmodules.model.Decoder
39
+ params: ${model.params.encoder_config.params}
40
+
41
+ data:
42
+ target: sgm.data.dataset.StableDataModuleFromConfig
43
+ params:
44
+ train:
45
+ datapipeline:
46
+ urls:
47
+ - DATA-PATH
48
+ pipeline_config:
49
+ shardshuffle: 10000
50
+ sample_shuffle: 10000
51
+
52
+ decoders:
53
+ - pil
54
+
55
+ postprocessors:
56
+ - target: sdata.mappers.TorchVisionImageTransforms
57
+ params:
58
+ key: jpg
59
+ transforms:
60
+ - target: torchvision.transforms.Resize
61
+ params:
62
+ size: 256
63
+ interpolation: 3
64
+ - target: torchvision.transforms.ToTensor
65
+ - target: sdata.mappers.Rescaler
66
+ - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
67
+ params:
68
+ h_key: height
69
+ w_key: width
70
+
71
+ loader:
72
+ batch_size: 8
73
+ num_workers: 4
74
+
75
+
76
+ lightning:
77
+ strategy:
78
+ target: pytorch_lightning.strategies.DDPStrategy
79
+ params:
80
+ find_unused_parameters: True
81
+
82
+ modelcheckpoint:
83
+ params:
84
+ every_n_train_steps: 5000
85
+
86
+ callbacks:
87
+ metrics_over_trainsteps_checkpoint:
88
+ params:
89
+ every_n_train_steps: 50000
90
+
91
+ image_logger:
92
+ target: main.ImageLogger
93
+ params:
94
+ enable_autocast: False
95
+ batch_frequency: 1000
96
+ max_images: 8
97
+ increase_log_steps: True
98
+
99
+ trainer:
100
+ devices: 0,
101
+ limit_val_batches: 50
102
+ benchmark: True
103
+ accumulate_grad_batches: 1
104
+ val_check_interval: 10000
generative_models/configs/example_training/autoencoder/kl-f4/imagenet-kl_f8_8chn.yaml ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-6
3
+ target: sgm.models.autoencoder.AutoencodingEngine
4
+ params:
5
+ input_key: jpg
6
+ monitor: val/loss/rec
7
+ disc_start_iter: 0
8
+
9
+ encoder_config:
10
+ target: sgm.modules.diffusionmodules.model.Encoder
11
+ params:
12
+ attn_type: vanilla-xformers
13
+ double_z: true
14
+ z_channels: 8
15
+ resolution: 256
16
+ in_channels: 3
17
+ out_ch: 3
18
+ ch: 128
19
+ ch_mult: [1, 2, 4, 4]
20
+ num_res_blocks: 2
21
+ attn_resolutions: []
22
+ dropout: 0.0
23
+
24
+ decoder_config:
25
+ target: sgm.modules.diffusionmodules.model.Decoder
26
+ params: ${model.params.encoder_config.params}
27
+
28
+ regularizer_config:
29
+ target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
30
+
31
+ loss_config:
32
+ target: sgm.modules.autoencoding.losses.GeneralLPIPSWithDiscriminator
33
+ params:
34
+ perceptual_weight: 0.25
35
+ disc_start: 20001
36
+ disc_weight: 0.5
37
+ learn_logvar: True
38
+
39
+ regularization_weights:
40
+ kl_loss: 1.0
41
+
42
+ data:
43
+ target: sgm.data.dataset.StableDataModuleFromConfig
44
+ params:
45
+ train:
46
+ datapipeline:
47
+ urls:
48
+ - DATA-PATH
49
+ pipeline_config:
50
+ shardshuffle: 10000
51
+ sample_shuffle: 10000
52
+
53
+ decoders:
54
+ - pil
55
+
56
+ postprocessors:
57
+ - target: sdata.mappers.TorchVisionImageTransforms
58
+ params:
59
+ key: jpg
60
+ transforms:
61
+ - target: torchvision.transforms.Resize
62
+ params:
63
+ size: 256
64
+ interpolation: 3
65
+ - target: torchvision.transforms.ToTensor
66
+ - target: sdata.mappers.Rescaler
67
+ - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
68
+ params:
69
+ h_key: height
70
+ w_key: width
71
+
72
+ loader:
73
+ batch_size: 8
74
+ num_workers: 4
75
+
76
+
77
+ lightning:
78
+ strategy:
79
+ target: pytorch_lightning.strategies.DDPStrategy
80
+ params:
81
+ find_unused_parameters: True
82
+
83
+ modelcheckpoint:
84
+ params:
85
+ every_n_train_steps: 5000
86
+
87
+ callbacks:
88
+ metrics_over_trainsteps_checkpoint:
89
+ params:
90
+ every_n_train_steps: 50000
91
+
92
+ image_logger:
93
+ target: main.ImageLogger
94
+ params:
95
+ enable_autocast: False
96
+ batch_frequency: 1000
97
+ max_images: 8
98
+ increase_log_steps: True
99
+
100
+ trainer:
101
+ devices: 0,
102
+ limit_val_batches: 50
103
+ benchmark: True
104
+ accumulate_grad_batches: 1
105
+ val_check_interval: 10000
generative_models/configs/example_training/imagenet-f8_cond.yaml ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ scale_factor: 0.13025
6
+ disable_first_stage_autocast: True
7
+ log_keys:
8
+ - cls
9
+
10
+ scheduler_config:
11
+ target: sgm.lr_scheduler.LambdaLinearScheduler
12
+ params:
13
+ warm_up_steps: [10000]
14
+ cycle_lengths: [10000000000000]
15
+ f_start: [1.e-6]
16
+ f_max: [1.]
17
+ f_min: [1.]
18
+
19
+ denoiser_config:
20
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
21
+ params:
22
+ num_idx: 1000
23
+
24
+ scaling_config:
25
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
26
+ discretization_config:
27
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
28
+
29
+ network_config:
30
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ use_checkpoint: True
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 256
36
+ attention_resolutions: [1, 2, 4]
37
+ num_res_blocks: 2
38
+ channel_mult: [1, 2, 4]
39
+ num_head_channels: 64
40
+ num_classes: sequential
41
+ adm_in_channels: 1024
42
+ transformer_depth: 1
43
+ context_dim: 1024
44
+ spatial_transformer_attn_type: softmax-xformers
45
+
46
+ conditioner_config:
47
+ target: sgm.modules.GeneralConditioner
48
+ params:
49
+ emb_models:
50
+ - is_trainable: True
51
+ input_key: cls
52
+ ucg_rate: 0.2
53
+ target: sgm.modules.encoders.modules.ClassEmbedder
54
+ params:
55
+ add_sequence_dim: True
56
+ embed_dim: 1024
57
+ n_classes: 1000
58
+
59
+ - is_trainable: False
60
+ ucg_rate: 0.2
61
+ input_key: original_size_as_tuple
62
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
63
+ params:
64
+ outdim: 256
65
+
66
+ - is_trainable: False
67
+ input_key: crop_coords_top_left
68
+ ucg_rate: 0.2
69
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
70
+ params:
71
+ outdim: 256
72
+
73
+ first_stage_config:
74
+ target: sgm.models.autoencoder.AutoencoderKL
75
+ params:
76
+ ckpt_path: CKPT_PATH
77
+ embed_dim: 4
78
+ monitor: val/rec_loss
79
+ ddconfig:
80
+ attn_type: vanilla-xformers
81
+ double_z: true
82
+ z_channels: 4
83
+ resolution: 256
84
+ in_channels: 3
85
+ out_ch: 3
86
+ ch: 128
87
+ ch_mult: [1, 2, 4, 4]
88
+ num_res_blocks: 2
89
+ attn_resolutions: []
90
+ dropout: 0.0
91
+ lossconfig:
92
+ target: torch.nn.Identity
93
+
94
+ loss_fn_config:
95
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
96
+ params:
97
+ loss_weighting_config:
98
+ target: sgm.modules.diffusionmodules.loss_weighting.EpsWeighting
99
+ sigma_sampler_config:
100
+ target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
101
+ params:
102
+ num_idx: 1000
103
+
104
+ discretization_config:
105
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
106
+
107
+ sampler_config:
108
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
109
+ params:
110
+ num_steps: 50
111
+
112
+ discretization_config:
113
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
114
+
115
+ guider_config:
116
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
117
+ params:
118
+ scale: 5.0
119
+
120
+ data:
121
+ target: sgm.data.dataset.StableDataModuleFromConfig
122
+ params:
123
+ train:
124
+ datapipeline:
125
+ urls:
126
+ # USER: adapt this path the root of your custom dataset
127
+ - DATA_PATH
128
+ pipeline_config:
129
+ shardshuffle: 10000
130
+ sample_shuffle: 10000 # USER: you might wanna adapt depending on your available RAM
131
+
132
+ decoders:
133
+ - pil
134
+
135
+ postprocessors:
136
+ - target: sdata.mappers.TorchVisionImageTransforms
137
+ params:
138
+ key: jpg # USER: you might wanna adapt this for your custom dataset
139
+ transforms:
140
+ - target: torchvision.transforms.Resize
141
+ params:
142
+ size: 256
143
+ interpolation: 3
144
+ - target: torchvision.transforms.ToTensor
145
+ - target: sdata.mappers.Rescaler
146
+
147
+ - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
148
+ params:
149
+ h_key: height # USER: you might wanna adapt this for your custom dataset
150
+ w_key: width # USER: you might wanna adapt this for your custom dataset
151
+
152
+ loader:
153
+ batch_size: 64
154
+ num_workers: 6
155
+
156
+ lightning:
157
+ modelcheckpoint:
158
+ params:
159
+ every_n_train_steps: 5000
160
+
161
+ callbacks:
162
+ metrics_over_trainsteps_checkpoint:
163
+ params:
164
+ every_n_train_steps: 25000
165
+
166
+ image_logger:
167
+ target: main.ImageLogger
168
+ params:
169
+ disabled: False
170
+ enable_autocast: False
171
+ batch_frequency: 1000
172
+ max_images: 8
173
+ increase_log_steps: True
174
+ log_first_step: False
175
+ log_images_kwargs:
176
+ use_ema_scope: False
177
+ N: 8
178
+ n_rows: 2
179
+
180
+ trainer:
181
+ devices: 0,
182
+ benchmark: True
183
+ num_sanity_val_steps: 0
184
+ accumulate_grad_batches: 1
185
+ max_epochs: 1000
generative_models/configs/example_training/toy/cifar10_cond.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ denoiser_config:
6
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
7
+ params:
8
+ scaling_config:
9
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
10
+ params:
11
+ sigma_data: 1.0
12
+
13
+ network_config:
14
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
15
+ params:
16
+ in_channels: 3
17
+ out_channels: 3
18
+ model_channels: 32
19
+ attention_resolutions: []
20
+ num_res_blocks: 4
21
+ channel_mult: [1, 2, 2]
22
+ num_head_channels: 32
23
+ num_classes: sequential
24
+ adm_in_channels: 128
25
+
26
+ conditioner_config:
27
+ target: sgm.modules.GeneralConditioner
28
+ params:
29
+ emb_models:
30
+ - is_trainable: True
31
+ input_key: cls
32
+ ucg_rate: 0.2
33
+ target: sgm.modules.encoders.modules.ClassEmbedder
34
+ params:
35
+ embed_dim: 128
36
+ n_classes: 10
37
+
38
+ first_stage_config:
39
+ target: sgm.models.autoencoder.IdentityFirstStage
40
+
41
+ loss_fn_config:
42
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
43
+ params:
44
+ loss_weighting_config:
45
+ target: sgm.modules.diffusionmodules.loss_weighting.EDMWeighting
46
+ params:
47
+ sigma_data: 1.0
48
+ sigma_sampler_config:
49
+ target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
50
+
51
+ sampler_config:
52
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
53
+ params:
54
+ num_steps: 50
55
+
56
+ discretization_config:
57
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
58
+
59
+ guider_config:
60
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
61
+ params:
62
+ scale: 3.0
63
+
64
+ data:
65
+ target: sgm.data.cifar10.CIFAR10Loader
66
+ params:
67
+ batch_size: 512
68
+ num_workers: 1
69
+
70
+ lightning:
71
+ modelcheckpoint:
72
+ params:
73
+ every_n_train_steps: 5000
74
+
75
+ callbacks:
76
+ metrics_over_trainsteps_checkpoint:
77
+ params:
78
+ every_n_train_steps: 25000
79
+
80
+ image_logger:
81
+ target: main.ImageLogger
82
+ params:
83
+ disabled: False
84
+ batch_frequency: 1000
85
+ max_images: 64
86
+ increase_log_steps: True
87
+ log_first_step: False
88
+ log_images_kwargs:
89
+ use_ema_scope: False
90
+ N: 64
91
+ n_rows: 8
92
+
93
+ trainer:
94
+ devices: 0,
95
+ benchmark: True
96
+ num_sanity_val_steps: 0
97
+ accumulate_grad_batches: 1
98
+ max_epochs: 20
generative_models/configs/example_training/toy/mnist.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ denoiser_config:
6
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
7
+ params:
8
+ scaling_config:
9
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
10
+ params:
11
+ sigma_data: 1.0
12
+
13
+ network_config:
14
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
15
+ params:
16
+ in_channels: 1
17
+ out_channels: 1
18
+ model_channels: 32
19
+ attention_resolutions: []
20
+ num_res_blocks: 4
21
+ channel_mult: [1, 2, 2]
22
+ num_head_channels: 32
23
+
24
+ first_stage_config:
25
+ target: sgm.models.autoencoder.IdentityFirstStage
26
+
27
+ loss_fn_config:
28
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
29
+ params:
30
+ loss_weighting_config:
31
+ target: sgm.modules.diffusionmodules.loss_weighting.EDMWeighting
32
+ params:
33
+ sigma_data: 1.0
34
+ sigma_sampler_config:
35
+ target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
36
+
37
+ sampler_config:
38
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
39
+ params:
40
+ num_steps: 50
41
+
42
+ discretization_config:
43
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
44
+
45
+ data:
46
+ target: sgm.data.mnist.MNISTLoader
47
+ params:
48
+ batch_size: 512
49
+ num_workers: 1
50
+
51
+ lightning:
52
+ modelcheckpoint:
53
+ params:
54
+ every_n_train_steps: 5000
55
+
56
+ callbacks:
57
+ metrics_over_trainsteps_checkpoint:
58
+ params:
59
+ every_n_train_steps: 25000
60
+
61
+ image_logger:
62
+ target: main.ImageLogger
63
+ params:
64
+ disabled: False
65
+ batch_frequency: 1000
66
+ max_images: 64
67
+ increase_log_steps: False
68
+ log_first_step: False
69
+ log_images_kwargs:
70
+ use_ema_scope: False
71
+ N: 64
72
+ n_rows: 8
73
+
74
+ trainer:
75
+ devices: 0,
76
+ benchmark: True
77
+ num_sanity_val_steps: 0
78
+ accumulate_grad_batches: 1
79
+ max_epochs: 10
generative_models/configs/example_training/toy/mnist_cond.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ denoiser_config:
6
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
7
+ params:
8
+ scaling_config:
9
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
10
+ params:
11
+ sigma_data: 1.0
12
+
13
+ network_config:
14
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
15
+ params:
16
+ in_channels: 1
17
+ out_channels: 1
18
+ model_channels: 32
19
+ attention_resolutions: []
20
+ num_res_blocks: 4
21
+ channel_mult: [1, 2, 2]
22
+ num_head_channels: 32
23
+ num_classes: sequential
24
+ adm_in_channels: 128
25
+
26
+ conditioner_config:
27
+ target: sgm.modules.GeneralConditioner
28
+ params:
29
+ emb_models:
30
+ - is_trainable: True
31
+ input_key: cls
32
+ ucg_rate: 0.2
33
+ target: sgm.modules.encoders.modules.ClassEmbedder
34
+ params:
35
+ embed_dim: 128
36
+ n_classes: 10
37
+
38
+ first_stage_config:
39
+ target: sgm.models.autoencoder.IdentityFirstStage
40
+
41
+ loss_fn_config:
42
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
43
+ params:
44
+ loss_weighting_config:
45
+ target: sgm.modules.diffusionmodules.loss_weighting.EDMWeighting
46
+ params:
47
+ sigma_data: 1.0
48
+ sigma_sampler_config:
49
+ target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
50
+
51
+ sampler_config:
52
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
53
+ params:
54
+ num_steps: 50
55
+
56
+ discretization_config:
57
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
58
+
59
+ guider_config:
60
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
61
+ params:
62
+ scale: 3.0
63
+
64
+ data:
65
+ target: sgm.data.mnist.MNISTLoader
66
+ params:
67
+ batch_size: 512
68
+ num_workers: 1
69
+
70
+ lightning:
71
+ modelcheckpoint:
72
+ params:
73
+ every_n_train_steps: 5000
74
+
75
+ callbacks:
76
+ metrics_over_trainsteps_checkpoint:
77
+ params:
78
+ every_n_train_steps: 25000
79
+
80
+ image_logger:
81
+ target: main.ImageLogger
82
+ params:
83
+ disabled: False
84
+ batch_frequency: 1000
85
+ max_images: 16
86
+ increase_log_steps: True
87
+ log_first_step: False
88
+ log_images_kwargs:
89
+ use_ema_scope: False
90
+ N: 16
91
+ n_rows: 4
92
+
93
+ trainer:
94
+ devices: 0,
95
+ benchmark: True
96
+ num_sanity_val_steps: 0
97
+ accumulate_grad_batches: 1
98
+ max_epochs: 20
generative_models/configs/example_training/toy/mnist_cond_discrete_eps.yaml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ denoiser_config:
6
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
7
+ params:
8
+ num_idx: 1000
9
+
10
+ scaling_config:
11
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
12
+ discretization_config:
13
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
14
+
15
+ network_config:
16
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
17
+ params:
18
+ in_channels: 1
19
+ out_channels: 1
20
+ model_channels: 32
21
+ attention_resolutions: []
22
+ num_res_blocks: 4
23
+ channel_mult: [1, 2, 2]
24
+ num_head_channels: 32
25
+ num_classes: sequential
26
+ adm_in_channels: 128
27
+
28
+ conditioner_config:
29
+ target: sgm.modules.GeneralConditioner
30
+ params:
31
+ emb_models:
32
+ - is_trainable: True
33
+ input_key: cls
34
+ ucg_rate: 0.2
35
+ target: sgm.modules.encoders.modules.ClassEmbedder
36
+ params:
37
+ embed_dim: 128
38
+ n_classes: 10
39
+
40
+ first_stage_config:
41
+ target: sgm.models.autoencoder.IdentityFirstStage
42
+
43
+ loss_fn_config:
44
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
45
+ params:
46
+ loss_weighting_config:
47
+ target: sgm.modules.diffusionmodules.loss_weighting.EDMWeighting
48
+ sigma_sampler_config:
49
+ target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
50
+ params:
51
+ num_idx: 1000
52
+
53
+ discretization_config:
54
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
55
+
56
+ sampler_config:
57
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
58
+ params:
59
+ num_steps: 50
60
+
61
+ discretization_config:
62
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
63
+
64
+ guider_config:
65
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
66
+ params:
67
+ scale: 5.0
68
+
69
+ data:
70
+ target: sgm.data.mnist.MNISTLoader
71
+ params:
72
+ batch_size: 512
73
+ num_workers: 1
74
+
75
+ lightning:
76
+ modelcheckpoint:
77
+ params:
78
+ every_n_train_steps: 5000
79
+
80
+ callbacks:
81
+ metrics_over_trainsteps_checkpoint:
82
+ params:
83
+ every_n_train_steps: 25000
84
+
85
+ image_logger:
86
+ target: main.ImageLogger
87
+ params:
88
+ disabled: False
89
+ batch_frequency: 1000
90
+ max_images: 16
91
+ increase_log_steps: True
92
+ log_first_step: False
93
+ log_images_kwargs:
94
+ use_ema_scope: False
95
+ N: 16
96
+ n_rows: 4
97
+
98
+ trainer:
99
+ devices: 0,
100
+ benchmark: True
101
+ num_sanity_val_steps: 0
102
+ accumulate_grad_batches: 1
103
+ max_epochs: 20
generative_models/configs/example_training/toy/mnist_cond_l1_loss.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ denoiser_config:
6
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
7
+ params:
8
+ scaling_config:
9
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
10
+ params:
11
+ sigma_data: 1.0
12
+
13
+ network_config:
14
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
15
+ params:
16
+ in_channels: 1
17
+ out_channels: 1
18
+ model_channels: 32
19
+ attention_resolutions: []
20
+ num_res_blocks: 4
21
+ channel_mult: [1, 2, 2]
22
+ num_head_channels: 32
23
+ num_classes: sequential
24
+ adm_in_channels: 128
25
+
26
+ conditioner_config:
27
+ target: sgm.modules.GeneralConditioner
28
+ params:
29
+ emb_models:
30
+ - is_trainable: True
31
+ input_key: cls
32
+ ucg_rate: 0.2
33
+ target: sgm.modules.encoders.modules.ClassEmbedder
34
+ params:
35
+ embed_dim: 128
36
+ n_classes: 10
37
+
38
+ first_stage_config:
39
+ target: sgm.models.autoencoder.IdentityFirstStage
40
+
41
+ loss_fn_config:
42
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
43
+ params:
44
+ loss_type: l1
45
+ loss_weighting_config:
46
+ target: sgm.modules.diffusionmodules.loss_weighting.EDMWeighting
47
+ params:
48
+ sigma_data: 1.0
49
+ sigma_sampler_config:
50
+ target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
51
+
52
+ sampler_config:
53
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
54
+ params:
55
+ num_steps: 50
56
+
57
+ discretization_config:
58
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
59
+
60
+ guider_config:
61
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
62
+ params:
63
+ scale: 3.0
64
+
65
+ data:
66
+ target: sgm.data.mnist.MNISTLoader
67
+ params:
68
+ batch_size: 512
69
+ num_workers: 1
70
+
71
+ lightning:
72
+ modelcheckpoint:
73
+ params:
74
+ every_n_train_steps: 5000
75
+
76
+ callbacks:
77
+ metrics_over_trainsteps_checkpoint:
78
+ params:
79
+ every_n_train_steps: 25000
80
+
81
+ image_logger:
82
+ target: main.ImageLogger
83
+ params:
84
+ disabled: False
85
+ batch_frequency: 1000
86
+ max_images: 64
87
+ increase_log_steps: True
88
+ log_first_step: False
89
+ log_images_kwargs:
90
+ use_ema_scope: False
91
+ N: 64
92
+ n_rows: 8
93
+
94
+ trainer:
95
+ devices: 0,
96
+ benchmark: True
97
+ num_sanity_val_steps: 0
98
+ accumulate_grad_batches: 1
99
+ max_epochs: 20
generative_models/configs/example_training/toy/mnist_cond_with_ema.yaml ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ use_ema: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
9
+ params:
10
+ scaling_config:
11
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
12
+ params:
13
+ sigma_data: 1.0
14
+
15
+ network_config:
16
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
17
+ params:
18
+ in_channels: 1
19
+ out_channels: 1
20
+ model_channels: 32
21
+ attention_resolutions: []
22
+ num_res_blocks: 4
23
+ channel_mult: [1, 2, 2]
24
+ num_head_channels: 32
25
+ num_classes: sequential
26
+ adm_in_channels: 128
27
+
28
+ conditioner_config:
29
+ target: sgm.modules.GeneralConditioner
30
+ params:
31
+ emb_models:
32
+ - is_trainable: True
33
+ input_key: cls
34
+ ucg_rate: 0.2
35
+ target: sgm.modules.encoders.modules.ClassEmbedder
36
+ params:
37
+ embed_dim: 128
38
+ n_classes: 10
39
+
40
+ first_stage_config:
41
+ target: sgm.models.autoencoder.IdentityFirstStage
42
+
43
+ loss_fn_config:
44
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
45
+ params:
46
+ loss_weighting_config:
47
+ target: sgm.modules.diffusionmodules.loss_weighting.EDMWeighting
48
+ params:
49
+ sigma_data: 1.0
50
+ sigma_sampler_config:
51
+ target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
52
+
53
+ sampler_config:
54
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
55
+ params:
56
+ num_steps: 50
57
+
58
+ discretization_config:
59
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
60
+
61
+ guider_config:
62
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
63
+ params:
64
+ scale: 3.0
65
+
66
+ data:
67
+ target: sgm.data.mnist.MNISTLoader
68
+ params:
69
+ batch_size: 512
70
+ num_workers: 1
71
+
72
+ lightning:
73
+ modelcheckpoint:
74
+ params:
75
+ every_n_train_steps: 5000
76
+
77
+ callbacks:
78
+ metrics_over_trainsteps_checkpoint:
79
+ params:
80
+ every_n_train_steps: 25000
81
+
82
+ image_logger:
83
+ target: main.ImageLogger
84
+ params:
85
+ disabled: False
86
+ batch_frequency: 1000
87
+ max_images: 64
88
+ increase_log_steps: True
89
+ log_first_step: False
90
+ log_images_kwargs:
91
+ use_ema_scope: False
92
+ N: 64
93
+ n_rows: 8
94
+
95
+ trainer:
96
+ devices: 0,
97
+ benchmark: True
98
+ num_sanity_val_steps: 0
99
+ accumulate_grad_batches: 1
100
+ max_epochs: 20
generative_models/configs/example_training/txt2img-clipl-legacy-ucg-training.yaml ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ scale_factor: 0.13025
6
+ disable_first_stage_autocast: True
7
+ log_keys:
8
+ - txt
9
+
10
+ scheduler_config:
11
+ target: sgm.lr_scheduler.LambdaLinearScheduler
12
+ params:
13
+ warm_up_steps: [10000]
14
+ cycle_lengths: [10000000000000]
15
+ f_start: [1.e-6]
16
+ f_max: [1.]
17
+ f_min: [1.]
18
+
19
+ denoiser_config:
20
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
21
+ params:
22
+ num_idx: 1000
23
+
24
+ scaling_config:
25
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
26
+ discretization_config:
27
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
28
+
29
+ network_config:
30
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ use_checkpoint: True
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [1, 2, 4]
37
+ num_res_blocks: 2
38
+ channel_mult: [1, 2, 4, 4]
39
+ num_head_channels: 64
40
+ num_classes: sequential
41
+ adm_in_channels: 1792
42
+ num_heads: 1
43
+ transformer_depth: 1
44
+ context_dim: 768
45
+ spatial_transformer_attn_type: softmax-xformers
46
+
47
+ conditioner_config:
48
+ target: sgm.modules.GeneralConditioner
49
+ params:
50
+ emb_models:
51
+ - is_trainable: True
52
+ input_key: txt
53
+ ucg_rate: 0.1
54
+ legacy_ucg_value: ""
55
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
56
+ params:
57
+ always_return_pooled: True
58
+
59
+ - is_trainable: False
60
+ ucg_rate: 0.1
61
+ input_key: original_size_as_tuple
62
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
63
+ params:
64
+ outdim: 256
65
+
66
+ - is_trainable: False
67
+ input_key: crop_coords_top_left
68
+ ucg_rate: 0.1
69
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
70
+ params:
71
+ outdim: 256
72
+
73
+ first_stage_config:
74
+ target: sgm.models.autoencoder.AutoencoderKL
75
+ params:
76
+ ckpt_path: CKPT_PATH
77
+ embed_dim: 4
78
+ monitor: val/rec_loss
79
+ ddconfig:
80
+ attn_type: vanilla-xformers
81
+ double_z: true
82
+ z_channels: 4
83
+ resolution: 256
84
+ in_channels: 3
85
+ out_ch: 3
86
+ ch: 128
87
+ ch_mult: [ 1, 2, 4, 4 ]
88
+ num_res_blocks: 2
89
+ attn_resolutions: [ ]
90
+ dropout: 0.0
91
+ lossconfig:
92
+ target: torch.nn.Identity
93
+
94
+ loss_fn_config:
95
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
96
+ params:
97
+ loss_weighting_config:
98
+ target: sgm.modules.diffusionmodules.loss_weighting.EpsWeighting
99
+ sigma_sampler_config:
100
+ target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
101
+ params:
102
+ num_idx: 1000
103
+
104
+ discretization_config:
105
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
106
+
107
+ sampler_config:
108
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
109
+ params:
110
+ num_steps: 50
111
+
112
+ discretization_config:
113
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
114
+
115
+ guider_config:
116
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
117
+ params:
118
+ scale: 7.5
119
+
120
+ data:
121
+ target: sgm.data.dataset.StableDataModuleFromConfig
122
+ params:
123
+ train:
124
+ datapipeline:
125
+ urls:
126
+ # USER: adapt this path the root of your custom dataset
127
+ - DATA_PATH
128
+ pipeline_config:
129
+ shardshuffle: 10000
130
+ sample_shuffle: 10000 # USER: you might wanna adapt depending on your available RAM
131
+
132
+ decoders:
133
+ - pil
134
+
135
+ postprocessors:
136
+ - target: sdata.mappers.TorchVisionImageTransforms
137
+ params:
138
+ key: jpg # USER: you might wanna adapt this for your custom dataset
139
+ transforms:
140
+ - target: torchvision.transforms.Resize
141
+ params:
142
+ size: 256
143
+ interpolation: 3
144
+ - target: torchvision.transforms.ToTensor
145
+ - target: sdata.mappers.Rescaler
146
+ - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
147
+ # USER: you might wanna use non-default parameters due to your custom dataset
148
+
149
+ loader:
150
+ batch_size: 64
151
+ num_workers: 6
152
+
153
+ lightning:
154
+ modelcheckpoint:
155
+ params:
156
+ every_n_train_steps: 5000
157
+
158
+ callbacks:
159
+ metrics_over_trainsteps_checkpoint:
160
+ params:
161
+ every_n_train_steps: 25000
162
+
163
+ image_logger:
164
+ target: main.ImageLogger
165
+ params:
166
+ disabled: False
167
+ enable_autocast: False
168
+ batch_frequency: 1000
169
+ max_images: 8
170
+ increase_log_steps: True
171
+ log_first_step: False
172
+ log_images_kwargs:
173
+ use_ema_scope: False
174
+ N: 8
175
+ n_rows: 2
176
+
177
+ trainer:
178
+ devices: 0,
179
+ benchmark: True
180
+ num_sanity_val_steps: 0
181
+ accumulate_grad_batches: 1
182
+ max_epochs: 1000
generative_models/configs/example_training/txt2img-clipl.yaml ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ scale_factor: 0.13025
6
+ disable_first_stage_autocast: True
7
+ log_keys:
8
+ - txt
9
+
10
+ scheduler_config:
11
+ target: sgm.lr_scheduler.LambdaLinearScheduler
12
+ params:
13
+ warm_up_steps: [10000]
14
+ cycle_lengths: [10000000000000]
15
+ f_start: [1.e-6]
16
+ f_max: [1.]
17
+ f_min: [1.]
18
+
19
+ denoiser_config:
20
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
21
+ params:
22
+ num_idx: 1000
23
+
24
+ scaling_config:
25
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
26
+ discretization_config:
27
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
28
+
29
+ network_config:
30
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ use_checkpoint: True
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [1, 2, 4]
37
+ num_res_blocks: 2
38
+ channel_mult: [1, 2, 4, 4]
39
+ num_head_channels: 64
40
+ num_classes: sequential
41
+ adm_in_channels: 1792
42
+ num_heads: 1
43
+ transformer_depth: 1
44
+ context_dim: 768
45
+ spatial_transformer_attn_type: softmax-xformers
46
+
47
+ conditioner_config:
48
+ target: sgm.modules.GeneralConditioner
49
+ params:
50
+ emb_models:
51
+ - is_trainable: True
52
+ input_key: txt
53
+ ucg_rate: 0.1
54
+ legacy_ucg_value: ""
55
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
56
+ params:
57
+ always_return_pooled: True
58
+
59
+ - is_trainable: False
60
+ ucg_rate: 0.1
61
+ input_key: original_size_as_tuple
62
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
63
+ params:
64
+ outdim: 256
65
+
66
+ - is_trainable: False
67
+ input_key: crop_coords_top_left
68
+ ucg_rate: 0.1
69
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
70
+ params:
71
+ outdim: 256
72
+
73
+ first_stage_config:
74
+ target: sgm.models.autoencoder.AutoencoderKL
75
+ params:
76
+ ckpt_path: CKPT_PATH
77
+ embed_dim: 4
78
+ monitor: val/rec_loss
79
+ ddconfig:
80
+ attn_type: vanilla-xformers
81
+ double_z: true
82
+ z_channels: 4
83
+ resolution: 256
84
+ in_channels: 3
85
+ out_ch: 3
86
+ ch: 128
87
+ ch_mult: [1, 2, 4, 4]
88
+ num_res_blocks: 2
89
+ attn_resolutions: []
90
+ dropout: 0.0
91
+ lossconfig:
92
+ target: torch.nn.Identity
93
+
94
+ loss_fn_config:
95
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
96
+ params:
97
+ loss_weighting_config:
98
+ target: sgm.modules.diffusionmodules.loss_weighting.EpsWeighting
99
+ sigma_sampler_config:
100
+ target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
101
+ params:
102
+ num_idx: 1000
103
+
104
+ discretization_config:
105
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
106
+
107
+ sampler_config:
108
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
109
+ params:
110
+ num_steps: 50
111
+
112
+ discretization_config:
113
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
114
+
115
+ guider_config:
116
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
117
+ params:
118
+ scale: 7.5
119
+
120
+ data:
121
+ target: sgm.data.dataset.StableDataModuleFromConfig
122
+ params:
123
+ train:
124
+ datapipeline:
125
+ urls:
126
+ # USER: adapt this path the root of your custom dataset
127
+ - DATA_PATH
128
+ pipeline_config:
129
+ shardshuffle: 10000
130
+ sample_shuffle: 10000
131
+
132
+
133
+ decoders:
134
+ - pil
135
+
136
+ postprocessors:
137
+ - target: sdata.mappers.TorchVisionImageTransforms
138
+ params:
139
+ key: jpg # USER: you might wanna adapt this for your custom dataset
140
+ transforms:
141
+ - target: torchvision.transforms.Resize
142
+ params:
143
+ size: 256
144
+ interpolation: 3
145
+ - target: torchvision.transforms.ToTensor
146
+ - target: sdata.mappers.Rescaler
147
+ # USER: you might wanna use non-default parameters due to your custom dataset
148
+ - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
149
+ # USER: you might wanna use non-default parameters due to your custom dataset
150
+
151
+ loader:
152
+ batch_size: 64
153
+ num_workers: 6
154
+
155
+ lightning:
156
+ modelcheckpoint:
157
+ params:
158
+ every_n_train_steps: 5000
159
+
160
+ callbacks:
161
+ metrics_over_trainsteps_checkpoint:
162
+ params:
163
+ every_n_train_steps: 25000
164
+
165
+ image_logger:
166
+ target: main.ImageLogger
167
+ params:
168
+ disabled: False
169
+ enable_autocast: False
170
+ batch_frequency: 1000
171
+ max_images: 8
172
+ increase_log_steps: True
173
+ log_first_step: False
174
+ log_images_kwargs:
175
+ use_ema_scope: False
176
+ N: 8
177
+ n_rows: 2
178
+
179
+ trainer:
180
+ devices: 0,
181
+ benchmark: True
182
+ num_sanity_val_steps: 0
183
+ accumulate_grad_batches: 1
184
+ max_epochs: 1000
generative_models/configs/inference/.ipynb_checkpoints/sd_xl_base-checkpoint.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ scaling_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
14
+ discretization_config:
15
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
16
+
17
+ network_config:
18
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
19
+ params:
20
+ adm_in_channels: 2816
21
+ num_classes: sequential
22
+ use_checkpoint: True
23
+ in_channels: 4
24
+ out_channels: 4
25
+ model_channels: 320
26
+ attention_resolutions: [4, 2]
27
+ num_res_blocks: 2
28
+ channel_mult: [1, 2, 4]
29
+ num_head_channels: 64
30
+ use_linear_in_transformer: True
31
+ transformer_depth: [1, 2, 10]
32
+ context_dim: 2048
33
+ spatial_transformer_attn_type: softmax-xformers
34
+
35
+ conditioner_config:
36
+ target: sgm.modules.GeneralConditioner
37
+ params:
38
+ emb_models:
39
+ - is_trainable: False
40
+ input_key: txt
41
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
42
+ params:
43
+ layer: hidden
44
+ layer_idx: 11
45
+
46
+ - is_trainable: False
47
+ input_key: txt
48
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
49
+ params:
50
+ arch: ViT-bigG-14
51
+ version: laion2b_s39b_b160k
52
+ freeze: True
53
+ layer: penultimate
54
+ always_return_pooled: True
55
+ legacy: False
56
+
57
+ - is_trainable: False
58
+ input_key: original_size_as_tuple
59
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - is_trainable: False
64
+ input_key: crop_coords_top_left
65
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
66
+ params:
67
+ outdim: 256
68
+
69
+ - is_trainable: False
70
+ input_key: target_size_as_tuple
71
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
72
+ params:
73
+ outdim: 256
74
+
75
+ first_stage_config:
76
+ target: sgm.models.autoencoder.AutoencoderKL
77
+ params:
78
+ embed_dim: 4
79
+ monitor: val/rec_loss
80
+ ddconfig:
81
+ attn_type: vanilla-xformers
82
+ double_z: true
83
+ z_channels: 4
84
+ resolution: 256
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult: [1, 2, 4, 4]
89
+ num_res_blocks: 2
90
+ attn_resolutions: []
91
+ dropout: 0.0
92
+ lossconfig:
93
+ target: torch.nn.Identity
generative_models/configs/inference/sd_2_1.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ scaling_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
14
+ discretization_config:
15
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
16
+
17
+ network_config:
18
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
19
+ params:
20
+ use_checkpoint: True
21
+ in_channels: 4
22
+ out_channels: 4
23
+ model_channels: 320
24
+ attention_resolutions: [4, 2, 1]
25
+ num_res_blocks: 2
26
+ channel_mult: [1, 2, 4, 4]
27
+ num_head_channels: 64
28
+ use_linear_in_transformer: True
29
+ transformer_depth: 1
30
+ context_dim: 1024
31
+
32
+ conditioner_config:
33
+ target: sgm.modules.GeneralConditioner
34
+ params:
35
+ emb_models:
36
+ - is_trainable: False
37
+ input_key: txt
38
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder
39
+ params:
40
+ freeze: true
41
+ layer: penultimate
42
+
43
+ first_stage_config:
44
+ target: sgm.models.autoencoder.AutoencoderKL
45
+ params:
46
+ embed_dim: 4
47
+ monitor: val/rec_loss
48
+ ddconfig:
49
+ double_z: true
50
+ z_channels: 4
51
+ resolution: 256
52
+ in_channels: 3
53
+ out_ch: 3
54
+ ch: 128
55
+ ch_mult: [1, 2, 4, 4]
56
+ num_res_blocks: 2
57
+ attn_resolutions: []
58
+ dropout: 0.0
59
+ lossconfig:
60
+ target: torch.nn.Identity
generative_models/configs/inference/sd_2_1_768.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ scaling_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
14
+ discretization_config:
15
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
16
+
17
+ network_config:
18
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
19
+ params:
20
+ use_checkpoint: True
21
+ in_channels: 4
22
+ out_channels: 4
23
+ model_channels: 320
24
+ attention_resolutions: [4, 2, 1]
25
+ num_res_blocks: 2
26
+ channel_mult: [1, 2, 4, 4]
27
+ num_head_channels: 64
28
+ use_linear_in_transformer: True
29
+ transformer_depth: 1
30
+ context_dim: 1024
31
+
32
+ conditioner_config:
33
+ target: sgm.modules.GeneralConditioner
34
+ params:
35
+ emb_models:
36
+ - is_trainable: False
37
+ input_key: txt
38
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder
39
+ params:
40
+ freeze: true
41
+ layer: penultimate
42
+
43
+ first_stage_config:
44
+ target: sgm.models.autoencoder.AutoencoderKL
45
+ params:
46
+ embed_dim: 4
47
+ monitor: val/rec_loss
48
+ ddconfig:
49
+ double_z: true
50
+ z_channels: 4
51
+ resolution: 256
52
+ in_channels: 3
53
+ out_ch: 3
54
+ ch: 128
55
+ ch_mult: [1, 2, 4, 4]
56
+ num_res_blocks: 2
57
+ attn_resolutions: []
58
+ dropout: 0.0
59
+ lossconfig:
60
+ target: torch.nn.Identity
generative_models/configs/inference/sd_xl_base.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ scaling_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
14
+ discretization_config:
15
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
16
+
17
+ network_config:
18
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
19
+ params:
20
+ adm_in_channels: 2816
21
+ num_classes: sequential
22
+ use_checkpoint: True
23
+ in_channels: 4
24
+ out_channels: 4
25
+ model_channels: 320
26
+ attention_resolutions: [4, 2]
27
+ num_res_blocks: 2
28
+ channel_mult: [1, 2, 4]
29
+ num_head_channels: 64
30
+ use_linear_in_transformer: True
31
+ transformer_depth: [1, 2, 10]
32
+ context_dim: 2048
33
+ spatial_transformer_attn_type: softmax-xformers
34
+
35
+ conditioner_config:
36
+ target: sgm.modules.GeneralConditioner
37
+ params:
38
+ emb_models:
39
+ - is_trainable: False
40
+ input_key: txt
41
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
42
+ params:
43
+ layer: hidden
44
+ layer_idx: 11
45
+
46
+ - is_trainable: False
47
+ input_key: txt
48
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
49
+ params:
50
+ arch: ViT-bigG-14
51
+ version: laion2b_s39b_b160k
52
+ freeze: True
53
+ layer: penultimate
54
+ always_return_pooled: True
55
+ legacy: False
56
+
57
+ - is_trainable: False
58
+ input_key: original_size_as_tuple
59
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - is_trainable: False
64
+ input_key: crop_coords_top_left
65
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
66
+ params:
67
+ outdim: 256
68
+
69
+ - is_trainable: False
70
+ input_key: target_size_as_tuple
71
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
72
+ params:
73
+ outdim: 256
74
+
75
+ first_stage_config:
76
+ target: sgm.models.autoencoder.AutoencoderKL
77
+ params:
78
+ embed_dim: 4
79
+ monitor: val/rec_loss
80
+ ddconfig:
81
+ attn_type: vanilla-xformers
82
+ double_z: true
83
+ z_channels: 4
84
+ resolution: 256
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult: [1, 2, 4, 4]
89
+ num_res_blocks: 2
90
+ attn_resolutions: []
91
+ dropout: 0.0
92
+ lossconfig:
93
+ target: torch.nn.Identity
generative_models/configs/inference/sd_xl_refiner.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ scaling_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
14
+ discretization_config:
15
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
16
+
17
+ network_config:
18
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
19
+ params:
20
+ adm_in_channels: 2560
21
+ num_classes: sequential
22
+ use_checkpoint: True
23
+ in_channels: 4
24
+ out_channels: 4
25
+ model_channels: 384
26
+ attention_resolutions: [4, 2]
27
+ num_res_blocks: 2
28
+ channel_mult: [1, 2, 4, 4]
29
+ num_head_channels: 64
30
+ use_linear_in_transformer: True
31
+ transformer_depth: 4
32
+ context_dim: [1280, 1280, 1280, 1280]
33
+ spatial_transformer_attn_type: softmax-xformers
34
+
35
+ conditioner_config:
36
+ target: sgm.modules.GeneralConditioner
37
+ params:
38
+ emb_models:
39
+ - is_trainable: False
40
+ input_key: txt
41
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
42
+ params:
43
+ arch: ViT-bigG-14
44
+ version: laion2b_s39b_b160k
45
+ legacy: False
46
+ freeze: True
47
+ layer: penultimate
48
+ always_return_pooled: True
49
+
50
+ - is_trainable: False
51
+ input_key: original_size_as_tuple
52
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
53
+ params:
54
+ outdim: 256
55
+
56
+ - is_trainable: False
57
+ input_key: crop_coords_top_left
58
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
59
+ params:
60
+ outdim: 256
61
+
62
+ - is_trainable: False
63
+ input_key: aesthetic_score
64
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
65
+ params:
66
+ outdim: 256
67
+
68
+ first_stage_config:
69
+ target: sgm.models.autoencoder.AutoencoderKL
70
+ params:
71
+ embed_dim: 4
72
+ monitor: val/rec_loss
73
+ ddconfig:
74
+ attn_type: vanilla-xformers
75
+ double_z: true
76
+ z_channels: 4
77
+ resolution: 256
78
+ in_channels: 3
79
+ out_ch: 3
80
+ ch: 128
81
+ ch_mult: [1, 2, 4, 4]
82
+ num_res_blocks: 2
83
+ attn_resolutions: []
84
+ dropout: 0.0
85
+ lossconfig:
86
+ target: torch.nn.Identity
generative_models/configs/inference/svd.yaml ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
9
+ params:
10
+ scaling_config:
11
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
12
+
13
+ network_config:
14
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
15
+ params:
16
+ adm_in_channels: 768
17
+ num_classes: sequential
18
+ use_checkpoint: True
19
+ in_channels: 8
20
+ out_channels: 4
21
+ model_channels: 320
22
+ attention_resolutions: [4, 2, 1]
23
+ num_res_blocks: 2
24
+ channel_mult: [1, 2, 4, 4]
25
+ num_head_channels: 64
26
+ use_linear_in_transformer: True
27
+ transformer_depth: 1
28
+ context_dim: 1024
29
+ spatial_transformer_attn_type: softmax-xformers
30
+ extra_ff_mix_layer: True
31
+ use_spatial_context: True
32
+ merge_strategy: learned_with_images
33
+ video_kernel_size: [3, 1, 1]
34
+
35
+ conditioner_config:
36
+ target: sgm.modules.GeneralConditioner
37
+ params:
38
+ emb_models:
39
+ - is_trainable: False
40
+ input_key: cond_frames_without_noise
41
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
42
+ params:
43
+ n_cond_frames: 1
44
+ n_copies: 1
45
+ open_clip_embedding_config:
46
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
47
+ params:
48
+ freeze: True
49
+
50
+ - input_key: fps_id
51
+ is_trainable: False
52
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
53
+ params:
54
+ outdim: 256
55
+
56
+ - input_key: motion_bucket_id
57
+ is_trainable: False
58
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
59
+ params:
60
+ outdim: 256
61
+
62
+ - input_key: cond_frames
63
+ is_trainable: False
64
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
65
+ params:
66
+ disable_encoder_autocast: True
67
+ n_cond_frames: 1
68
+ n_copies: 1
69
+ is_ae: True
70
+ encoder_config:
71
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
72
+ params:
73
+ embed_dim: 4
74
+ monitor: val/rec_loss
75
+ ddconfig:
76
+ attn_type: vanilla-xformers
77
+ double_z: True
78
+ z_channels: 4
79
+ resolution: 256
80
+ in_channels: 3
81
+ out_ch: 3
82
+ ch: 128
83
+ ch_mult: [1, 2, 4, 4]
84
+ num_res_blocks: 2
85
+ attn_resolutions: []
86
+ dropout: 0.0
87
+ lossconfig:
88
+ target: torch.nn.Identity
89
+
90
+ - input_key: cond_aug
91
+ is_trainable: False
92
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
93
+ params:
94
+ outdim: 256
95
+
96
+ first_stage_config:
97
+ target: sgm.models.autoencoder.AutoencodingEngine
98
+ params:
99
+ loss_config:
100
+ target: torch.nn.Identity
101
+ regularizer_config:
102
+ target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
103
+ encoder_config:
104
+ target: sgm.modules.diffusionmodules.model.Encoder
105
+ params:
106
+ attn_type: vanilla
107
+ double_z: True
108
+ z_channels: 4
109
+ resolution: 256
110
+ in_channels: 3
111
+ out_ch: 3
112
+ ch: 128
113
+ ch_mult: [1, 2, 4, 4]
114
+ num_res_blocks: 2
115
+ attn_resolutions: []
116
+ dropout: 0.0
117
+ decoder_config:
118
+ target: sgm.modules.autoencoding.temporal_ae.VideoDecoder
119
+ params:
120
+ attn_type: vanilla
121
+ double_z: True
122
+ z_channels: 4
123
+ resolution: 256
124
+ in_channels: 3
125
+ out_ch: 3
126
+ ch: 128
127
+ ch_mult: [1, 2, 4, 4]
128
+ num_res_blocks: 2
129
+ attn_resolutions: []
130
+ dropout: 0.0
131
+ video_kernel_size: [3, 1, 1]
generative_models/configs/inference/svd_image_decoder.yaml ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
9
+ params:
10
+ scaling_config:
11
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
12
+
13
+ network_config:
14
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
15
+ params:
16
+ adm_in_channels: 768
17
+ num_classes: sequential
18
+ use_checkpoint: True
19
+ in_channels: 8
20
+ out_channels: 4
21
+ model_channels: 320
22
+ attention_resolutions: [4, 2, 1]
23
+ num_res_blocks: 2
24
+ channel_mult: [1, 2, 4, 4]
25
+ num_head_channels: 64
26
+ use_linear_in_transformer: True
27
+ transformer_depth: 1
28
+ context_dim: 1024
29
+ spatial_transformer_attn_type: softmax-xformers
30
+ extra_ff_mix_layer: True
31
+ use_spatial_context: True
32
+ merge_strategy: learned_with_images
33
+ video_kernel_size: [3, 1, 1]
34
+
35
+ conditioner_config:
36
+ target: sgm.modules.GeneralConditioner
37
+ params:
38
+ emb_models:
39
+ - is_trainable: False
40
+ input_key: cond_frames_without_noise
41
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
42
+ params:
43
+ n_cond_frames: 1
44
+ n_copies: 1
45
+ open_clip_embedding_config:
46
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
47
+ params:
48
+ freeze: True
49
+
50
+ - input_key: fps_id
51
+ is_trainable: False
52
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
53
+ params:
54
+ outdim: 256
55
+
56
+ - input_key: motion_bucket_id
57
+ is_trainable: False
58
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
59
+ params:
60
+ outdim: 256
61
+
62
+ - input_key: cond_frames
63
+ is_trainable: False
64
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
65
+ params:
66
+ disable_encoder_autocast: True
67
+ n_cond_frames: 1
68
+ n_copies: 1
69
+ is_ae: True
70
+ encoder_config:
71
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
72
+ params:
73
+ embed_dim: 4
74
+ monitor: val/rec_loss
75
+ ddconfig:
76
+ attn_type: vanilla-xformers
77
+ double_z: True
78
+ z_channels: 4
79
+ resolution: 256
80
+ in_channels: 3
81
+ out_ch: 3
82
+ ch: 128
83
+ ch_mult: [1, 2, 4, 4]
84
+ num_res_blocks: 2
85
+ attn_resolutions: []
86
+ dropout: 0.0
87
+ lossconfig:
88
+ target: torch.nn.Identity
89
+
90
+ - input_key: cond_aug
91
+ is_trainable: False
92
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
93
+ params:
94
+ outdim: 256
95
+
96
+ first_stage_config:
97
+ target: sgm.models.autoencoder.AutoencoderKL
98
+ params:
99
+ embed_dim: 4
100
+ monitor: val/rec_loss
101
+ ddconfig:
102
+ attn_type: vanilla-xformers
103
+ double_z: True
104
+ z_channels: 4
105
+ resolution: 256
106
+ in_channels: 3
107
+ out_ch: 3
108
+ ch: 128
109
+ ch_mult: [1, 2, 4, 4]
110
+ num_res_blocks: 2
111
+ attn_resolutions: []
112
+ dropout: 0.0
113
+ lossconfig:
114
+ target: torch.nn.Identity
generative_models/configs/unclip6.yaml ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-5
3
+ target: sgm.models.diffusion.DiffusionEngine
4
+ params:
5
+ scale_factor: 0.13025
6
+ disable_first_stage_autocast: True
7
+ no_cond_log: True
8
+
9
+ ckpt_config:
10
+ target: sgm.modules.checkpoint.CheckpointEngine
11
+ params:
12
+ ckpt_path: checkpoints/sd_xl_base_1.0.safetensors
13
+ pre_adapters:
14
+ - target: sgm.modules.checkpoint.Finetuner
15
+ params:
16
+ keys:
17
+ - model\.diffusion_model\.(input_blocks|middle_block|output_blocks)(\.[0-9])?\.[0-9]\.transformer_blocks\.[0-9]\.attn2\.(to_k|to_v)\.weight
18
+ - target: sgm.modules.checkpoint.Pruner
19
+ params:
20
+ keys:
21
+ - model\.diffusion_model\.label_emb\.0\.0\.weight
22
+ slices:
23
+ - ":, :1024"
24
+ print_sd_keys: False
25
+ print_model: False
26
+
27
+ scheduler_config:
28
+ target: sgm.lr_scheduler.LambdaLinearScheduler
29
+ params:
30
+ warm_up_steps: [ 1000 ]
31
+ cycle_lengths: [ 10000000000000 ]
32
+ f_start: [ 1.e-6 ]
33
+ f_max: [ 1. ]
34
+ f_min: [ 1. ]
35
+
36
+ denoiser_config:
37
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
38
+ params:
39
+ num_idx: 1000
40
+
41
+ scaling_config:
42
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
43
+
44
+ discretization_config:
45
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
46
+
47
+ network_config:
48
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
49
+ params:
50
+ adm_in_channels: 1024 #2816
51
+ num_classes: sequential
52
+ use_checkpoint: True
53
+ in_channels: 4
54
+ out_channels: 4
55
+ model_channels: 320
56
+ attention_resolutions: [ 4, 2 ]
57
+ num_res_blocks: 2
58
+ channel_mult: [ 1, 2, 4 ]
59
+ num_head_channels: 64
60
+ use_linear_in_transformer: True
61
+ transformer_depth: [ 1, 2, 10 ] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
62
+ context_dim: 1664 #1280
63
+ spatial_transformer_attn_type: softmax-xformers
64
+
65
+ conditioner_config:
66
+ target: sgm.modules.GeneralConditioner
67
+ params:
68
+ emb_models:
69
+ # cross atn
70
+ - is_trainable: False
71
+ input_key: jpg
72
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
73
+ params:
74
+ arch: ViT-bigG-14
75
+ version: laion2b_s39b_b160k
76
+ freeze: True
77
+ repeat_to_max_len: False
78
+ output_tokens: True
79
+ only_tokens: True
80
+ # vector cond
81
+ - is_trainable: False
82
+ input_key: original_size_as_tuple
83
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
84
+ params:
85
+ outdim: 256 # multiplied by two
86
+ # vector cond
87
+ - is_trainable: False
88
+ input_key: crop_coords_top_left
89
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
90
+ params:
91
+ outdim: 256 # multiplied by two
92
+ # # vector cond
93
+ # - is_trainable: False
94
+ # input_key: target_size_as_tuple
95
+ # target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
96
+ # params:
97
+ # outdim: 256 # multiplied by two
98
+
99
+ first_stage_config:
100
+ target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
101
+ params:
102
+ embed_dim: 4
103
+ monitor: val/rec_loss
104
+ ddconfig:
105
+ attn_type: vanilla-xformers
106
+ double_z: true
107
+ z_channels: 4
108
+ resolution: 256
109
+ in_channels: 3
110
+ out_ch: 3
111
+ ch: 128
112
+ ch_mult: [ 1, 2, 4, 4 ]
113
+ num_res_blocks: 2
114
+ attn_resolutions: [ ]
115
+ dropout: 0.0
116
+ lossconfig:
117
+ target: torch.nn.Identity
118
+
119
+ loss_fn_config:
120
+ target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
121
+ params:
122
+ offset_noise_level: 0.04
123
+ sigma_sampler_config:
124
+ target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
125
+ params:
126
+ num_idx: 1000
127
+
128
+ discretization_config:
129
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
130
+ loss_weighting_config:
131
+ target: sgm.modules.diffusionmodules.loss_weighting.EpsWeighting
132
+
133
+ sampler_config:
134
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
135
+ params:
136
+ num_steps: 50
137
+
138
+ discretization_config:
139
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
140
+
141
+ guider_config:
142
+ target: sgm.modules.diffusionmodules.guiders.VanillaCFG
143
+ params:
144
+ scale: 5.0
145
+
146
+ data:
147
+ target: sgm.data.dataset.StableDataModuleFromConfig
148
+ params:
149
+ train:
150
+ datapipeline:
151
+ urls:
152
+ - s3://stability-west/sddatasets/laiocosplitv1c/
153
+ pipeline_config:
154
+ shardshuffle: 10000
155
+ sample_shuffle: 10000
156
+
157
+ preprocessors:
158
+ - target: sdata.filters.SimpleKeyFilter
159
+ params:
160
+ keys: [txt, jpg]
161
+ - target: sdata.filters.AttributeFilter
162
+ params:
163
+ filter_dict:
164
+ SSCD_65: False
165
+ is_spawning: True
166
+ is_getty: True
167
+
168
+ decoders:
169
+ - pil
170
+
171
+ loader:
172
+ batch_size: 1
173
+ num_workers: 4
174
+ batched_transforms:
175
+ - target: sdata.mappers.MultiAspectCacher
176
+ params:
177
+ batch_size: 16
178
+ debug: False
179
+ crop_coords_key: crop_coords_top_left
180
+ target_size_key: target_size_as_tuple
181
+ original_size_key: original_size_as_tuple
182
+ max_pixels: 262144
183
+
184
+
185
+ lightning:
186
+ strategy:
187
+ target: pytorch_lightning.strategies.DDPStrategy
188
+
189
+ modelcheckpoint:
190
+ params:
191
+ every_n_train_steps: 100000
192
+
193
+ callbacks:
194
+ metrics_over_trainsteps_checkpoint:
195
+ params:
196
+ every_n_train_steps: 5000
197
+
198
+ image_logger:
199
+ target: sgm.modules.loggers.train_logging.SampleLogger
200
+ params:
201
+ disabled: False
202
+ enable_autocast: True
203
+ batch_frequency: 2000
204
+ max_images: 4
205
+ increase_log_steps: True
206
+ log_first_step: False
207
+ log_before_first_step: True
208
+ log_images_kwargs:
209
+ N: 4
210
+ num_steps:
211
+ - 50
212
+ ucg_keys: [ ]
213
+
214
+ trainer:
215
+ devices: 0,
216
+ benchmark: False
217
+ num_sanity_val_steps: 0
218
+ accumulate_grad_batches: 1
219
+ max_epochs: 1000
220
+ precision: 16
generative_models/data/DejaVuSans.ttf ADDED
Binary file (757 kB). View file
 
generative_models/main.py ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime
3
+ import glob
4
+ import inspect
5
+ import os
6
+ import sys
7
+ from inspect import Parameter
8
+ from typing import Union
9
+
10
+ import numpy as np
11
+ import pytorch_lightning as pl
12
+ import torch
13
+ import torchvision
14
+ import wandb
15
+ from matplotlib import pyplot as plt
16
+ from natsort import natsorted
17
+ from omegaconf import OmegaConf
18
+ from packaging import version
19
+ from PIL import Image
20
+ from pytorch_lightning import seed_everything
21
+ from pytorch_lightning.callbacks import Callback
22
+ from pytorch_lightning.loggers import WandbLogger
23
+ from pytorch_lightning.trainer import Trainer
24
+ from pytorch_lightning.utilities import rank_zero_only
25
+
26
+ from sgm.util import exists, instantiate_from_config, isheatmap
27
+
28
+ MULTINODE_HACKS = True
29
+
30
+
31
+ def default_trainer_args():
32
+ argspec = dict(inspect.signature(Trainer.__init__).parameters)
33
+ argspec.pop("self")
34
+ default_args = {
35
+ param: argspec[param].default
36
+ for param in argspec
37
+ if argspec[param] != Parameter.empty
38
+ }
39
+ return default_args
40
+
41
+
42
+ def get_parser(**parser_kwargs):
43
+ def str2bool(v):
44
+ if isinstance(v, bool):
45
+ return v
46
+ if v.lower() in ("yes", "true", "t", "y", "1"):
47
+ return True
48
+ elif v.lower() in ("no", "false", "f", "n", "0"):
49
+ return False
50
+ else:
51
+ raise argparse.ArgumentTypeError("Boolean value expected.")
52
+
53
+ parser = argparse.ArgumentParser(**parser_kwargs)
54
+ parser.add_argument(
55
+ "-n",
56
+ "--name",
57
+ type=str,
58
+ const=True,
59
+ default="",
60
+ nargs="?",
61
+ help="postfix for logdir",
62
+ )
63
+ parser.add_argument(
64
+ "--no_date",
65
+ type=str2bool,
66
+ nargs="?",
67
+ const=True,
68
+ default=False,
69
+ help="if True, skip date generation for logdir and only use naming via opt.base or opt.name (+ opt.postfix, optionally)",
70
+ )
71
+ parser.add_argument(
72
+ "-r",
73
+ "--resume",
74
+ type=str,
75
+ const=True,
76
+ default="",
77
+ nargs="?",
78
+ help="resume from logdir or checkpoint in logdir",
79
+ )
80
+ parser.add_argument(
81
+ "-b",
82
+ "--base",
83
+ nargs="*",
84
+ metavar="base_config.yaml",
85
+ help="paths to base configs. Loaded from left-to-right. "
86
+ "Parameters can be overwritten or added with command-line options of the form `--key value`.",
87
+ default=list(),
88
+ )
89
+ parser.add_argument(
90
+ "-t",
91
+ "--train",
92
+ type=str2bool,
93
+ const=True,
94
+ default=True,
95
+ nargs="?",
96
+ help="train",
97
+ )
98
+ parser.add_argument(
99
+ "--no-test",
100
+ type=str2bool,
101
+ const=True,
102
+ default=False,
103
+ nargs="?",
104
+ help="disable test",
105
+ )
106
+ parser.add_argument(
107
+ "-p", "--project", help="name of new or path to existing project"
108
+ )
109
+ parser.add_argument(
110
+ "-d",
111
+ "--debug",
112
+ type=str2bool,
113
+ nargs="?",
114
+ const=True,
115
+ default=False,
116
+ help="enable post-mortem debugging",
117
+ )
118
+ parser.add_argument(
119
+ "-s",
120
+ "--seed",
121
+ type=int,
122
+ default=23,
123
+ help="seed for seed_everything",
124
+ )
125
+ parser.add_argument(
126
+ "-f",
127
+ "--postfix",
128
+ type=str,
129
+ default="",
130
+ help="post-postfix for default name",
131
+ )
132
+ parser.add_argument(
133
+ "--projectname",
134
+ type=str,
135
+ default="stablediffusion",
136
+ )
137
+ parser.add_argument(
138
+ "-l",
139
+ "--logdir",
140
+ type=str,
141
+ default="logs",
142
+ help="directory for logging dat shit",
143
+ )
144
+ parser.add_argument(
145
+ "--scale_lr",
146
+ type=str2bool,
147
+ nargs="?",
148
+ const=True,
149
+ default=False,
150
+ help="scale base-lr by ngpu * batch_size * n_accumulate",
151
+ )
152
+ parser.add_argument(
153
+ "--legacy_naming",
154
+ type=str2bool,
155
+ nargs="?",
156
+ const=True,
157
+ default=False,
158
+ help="name run based on config file name if true, else by whole path",
159
+ )
160
+ parser.add_argument(
161
+ "--enable_tf32",
162
+ type=str2bool,
163
+ nargs="?",
164
+ const=True,
165
+ default=False,
166
+ help="enables the TensorFloat32 format both for matmuls and cuDNN for pytorch 1.12",
167
+ )
168
+ parser.add_argument(
169
+ "--startup",
170
+ type=str,
171
+ default=None,
172
+ help="Startuptime from distributed script",
173
+ )
174
+ parser.add_argument(
175
+ "--wandb",
176
+ type=str2bool,
177
+ nargs="?",
178
+ const=True,
179
+ default=False, # TODO: later default to True
180
+ help="log to wandb",
181
+ )
182
+ parser.add_argument(
183
+ "--no_base_name",
184
+ type=str2bool,
185
+ nargs="?",
186
+ const=True,
187
+ default=False, # TODO: later default to True
188
+ help="log to wandb",
189
+ )
190
+ if version.parse(torch.__version__) >= version.parse("2.0.0"):
191
+ parser.add_argument(
192
+ "--resume_from_checkpoint",
193
+ type=str,
194
+ default=None,
195
+ help="single checkpoint file to resume from",
196
+ )
197
+ default_args = default_trainer_args()
198
+ for key in default_args:
199
+ parser.add_argument("--" + key, default=default_args[key])
200
+ return parser
201
+
202
+
203
+ def get_checkpoint_name(logdir):
204
+ ckpt = os.path.join(logdir, "checkpoints", "last**.ckpt")
205
+ ckpt = natsorted(glob.glob(ckpt))
206
+ print('available "last" checkpoints:')
207
+ print(ckpt)
208
+ if len(ckpt) > 1:
209
+ print("got most recent checkpoint")
210
+ ckpt = sorted(ckpt, key=lambda x: os.path.getmtime(x))[-1]
211
+ print(f"Most recent ckpt is {ckpt}")
212
+ with open(os.path.join(logdir, "most_recent_ckpt.txt"), "w") as f:
213
+ f.write(ckpt + "\n")
214
+ try:
215
+ version = int(ckpt.split("/")[-1].split("-v")[-1].split(".")[0])
216
+ except Exception as e:
217
+ print("version confusion but not bad")
218
+ print(e)
219
+ version = 1
220
+ # version = last_version + 1
221
+ else:
222
+ # in this case, we only have one "last.ckpt"
223
+ ckpt = ckpt[0]
224
+ version = 1
225
+ melk_ckpt_name = f"last-v{version}.ckpt"
226
+ print(f"Current melk ckpt name: {melk_ckpt_name}")
227
+ return ckpt, melk_ckpt_name
228
+
229
+
230
+ class SetupCallback(Callback):
231
+ def __init__(
232
+ self,
233
+ resume,
234
+ now,
235
+ logdir,
236
+ ckptdir,
237
+ cfgdir,
238
+ config,
239
+ lightning_config,
240
+ debug,
241
+ ckpt_name=None,
242
+ ):
243
+ super().__init__()
244
+ self.resume = resume
245
+ self.now = now
246
+ self.logdir = logdir
247
+ self.ckptdir = ckptdir
248
+ self.cfgdir = cfgdir
249
+ self.config = config
250
+ self.lightning_config = lightning_config
251
+ self.debug = debug
252
+ self.ckpt_name = ckpt_name
253
+
254
+ def on_exception(self, trainer: pl.Trainer, pl_module, exception):
255
+ if not self.debug and trainer.global_rank == 0:
256
+ print("Summoning checkpoint.")
257
+ if self.ckpt_name is None:
258
+ ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
259
+ else:
260
+ ckpt_path = os.path.join(self.ckptdir, self.ckpt_name)
261
+ trainer.save_checkpoint(ckpt_path)
262
+
263
+ def on_fit_start(self, trainer, pl_module):
264
+ if trainer.global_rank == 0:
265
+ # Create logdirs and save configs
266
+ os.makedirs(self.logdir, exist_ok=True)
267
+ os.makedirs(self.ckptdir, exist_ok=True)
268
+ os.makedirs(self.cfgdir, exist_ok=True)
269
+
270
+ if "callbacks" in self.lightning_config:
271
+ if (
272
+ "metrics_over_trainsteps_checkpoint"
273
+ in self.lightning_config["callbacks"]
274
+ ):
275
+ os.makedirs(
276
+ os.path.join(self.ckptdir, "trainstep_checkpoints"),
277
+ exist_ok=True,
278
+ )
279
+ print("Project config")
280
+ print(OmegaConf.to_yaml(self.config))
281
+ if MULTINODE_HACKS:
282
+ import time
283
+
284
+ time.sleep(5)
285
+ OmegaConf.save(
286
+ self.config,
287
+ os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)),
288
+ )
289
+
290
+ print("Lightning config")
291
+ print(OmegaConf.to_yaml(self.lightning_config))
292
+ OmegaConf.save(
293
+ OmegaConf.create({"lightning": self.lightning_config}),
294
+ os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)),
295
+ )
296
+
297
+ else:
298
+ # ModelCheckpoint callback created log directory --- remove it
299
+ if not MULTINODE_HACKS and not self.resume and os.path.exists(self.logdir):
300
+ dst, name = os.path.split(self.logdir)
301
+ dst = os.path.join(dst, "child_runs", name)
302
+ os.makedirs(os.path.split(dst)[0], exist_ok=True)
303
+ try:
304
+ os.rename(self.logdir, dst)
305
+ except FileNotFoundError:
306
+ pass
307
+
308
+
309
+ class ImageLogger(Callback):
310
+ def __init__(
311
+ self,
312
+ batch_frequency,
313
+ max_images,
314
+ clamp=True,
315
+ increase_log_steps=True,
316
+ rescale=True,
317
+ disabled=False,
318
+ log_on_batch_idx=False,
319
+ log_first_step=False,
320
+ log_images_kwargs=None,
321
+ log_before_first_step=False,
322
+ enable_autocast=True,
323
+ ):
324
+ super().__init__()
325
+ self.enable_autocast = enable_autocast
326
+ self.rescale = rescale
327
+ self.batch_freq = batch_frequency
328
+ self.max_images = max_images
329
+ self.log_steps = [2**n for n in range(int(np.log2(self.batch_freq)) + 1)]
330
+ if not increase_log_steps:
331
+ self.log_steps = [self.batch_freq]
332
+ self.clamp = clamp
333
+ self.disabled = disabled
334
+ self.log_on_batch_idx = log_on_batch_idx
335
+ self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
336
+ self.log_first_step = log_first_step
337
+ self.log_before_first_step = log_before_first_step
338
+
339
+ @rank_zero_only
340
+ def log_local(
341
+ self,
342
+ save_dir,
343
+ split,
344
+ images,
345
+ global_step,
346
+ current_epoch,
347
+ batch_idx,
348
+ pl_module: Union[None, pl.LightningModule] = None,
349
+ ):
350
+ root = os.path.join(save_dir, "images", split)
351
+ for k in images:
352
+ if isheatmap(images[k]):
353
+ fig, ax = plt.subplots()
354
+ ax = ax.matshow(
355
+ images[k].cpu().numpy(), cmap="hot", interpolation="lanczos"
356
+ )
357
+ plt.colorbar(ax)
358
+ plt.axis("off")
359
+
360
+ filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
361
+ k, global_step, current_epoch, batch_idx
362
+ )
363
+ os.makedirs(root, exist_ok=True)
364
+ path = os.path.join(root, filename)
365
+ plt.savefig(path)
366
+ plt.close()
367
+ # TODO: support wandb
368
+ else:
369
+ grid = torchvision.utils.make_grid(images[k], nrow=4)
370
+ if self.rescale:
371
+ grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
372
+ grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
373
+ grid = grid.numpy()
374
+ grid = (grid * 255).astype(np.uint8)
375
+ filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
376
+ k, global_step, current_epoch, batch_idx
377
+ )
378
+ path = os.path.join(root, filename)
379
+ os.makedirs(os.path.split(path)[0], exist_ok=True)
380
+ img = Image.fromarray(grid)
381
+ img.save(path)
382
+ if exists(pl_module):
383
+ assert isinstance(
384
+ pl_module.logger, WandbLogger
385
+ ), "logger_log_image only supports WandbLogger currently"
386
+ pl_module.logger.log_image(
387
+ key=f"{split}/{k}",
388
+ images=[
389
+ img,
390
+ ],
391
+ step=pl_module.global_step,
392
+ )
393
+
394
+ @rank_zero_only
395
+ def log_img(self, pl_module, batch, batch_idx, split="train"):
396
+ check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
397
+ if (
398
+ self.check_frequency(check_idx)
399
+ and hasattr(pl_module, "log_images") # batch_idx % self.batch_freq == 0
400
+ and callable(pl_module.log_images)
401
+ and
402
+ # batch_idx > 5 and
403
+ self.max_images > 0
404
+ ):
405
+ logger = type(pl_module.logger)
406
+ is_train = pl_module.training
407
+ if is_train:
408
+ pl_module.eval()
409
+
410
+ gpu_autocast_kwargs = {
411
+ "enabled": self.enable_autocast, # torch.is_autocast_enabled(),
412
+ "dtype": torch.get_autocast_gpu_dtype(),
413
+ "cache_enabled": torch.is_autocast_cache_enabled(),
414
+ }
415
+ with torch.no_grad(), torch.cuda.amp.autocast(**gpu_autocast_kwargs):
416
+ images = pl_module.log_images(
417
+ batch, split=split, **self.log_images_kwargs
418
+ )
419
+
420
+ for k in images:
421
+ N = min(images[k].shape[0], self.max_images)
422
+ if not isheatmap(images[k]):
423
+ images[k] = images[k][:N]
424
+ if isinstance(images[k], torch.Tensor):
425
+ images[k] = images[k].detach().float().cpu()
426
+ if self.clamp and not isheatmap(images[k]):
427
+ images[k] = torch.clamp(images[k], -1.0, 1.0)
428
+
429
+ self.log_local(
430
+ pl_module.logger.save_dir,
431
+ split,
432
+ images,
433
+ pl_module.global_step,
434
+ pl_module.current_epoch,
435
+ batch_idx,
436
+ pl_module=pl_module
437
+ if isinstance(pl_module.logger, WandbLogger)
438
+ else None,
439
+ )
440
+
441
+ if is_train:
442
+ pl_module.train()
443
+
444
+ def check_frequency(self, check_idx):
445
+ if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
446
+ check_idx > 0 or self.log_first_step
447
+ ):
448
+ try:
449
+ self.log_steps.pop(0)
450
+ except IndexError as e:
451
+ print(e)
452
+ pass
453
+ return True
454
+ return False
455
+
456
+ @rank_zero_only
457
+ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
458
+ if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
459
+ self.log_img(pl_module, batch, batch_idx, split="train")
460
+
461
+ @rank_zero_only
462
+ def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
463
+ if self.log_before_first_step and pl_module.global_step == 0:
464
+ print(f"{self.__class__.__name__}: logging before training")
465
+ self.log_img(pl_module, batch, batch_idx, split="train")
466
+
467
+ @rank_zero_only
468
+ def on_validation_batch_end(
469
+ self, trainer, pl_module, outputs, batch, batch_idx, *args, **kwargs
470
+ ):
471
+ if not self.disabled and pl_module.global_step > 0:
472
+ self.log_img(pl_module, batch, batch_idx, split="val")
473
+ if hasattr(pl_module, "calibrate_grad_norm"):
474
+ if (
475
+ pl_module.calibrate_grad_norm and batch_idx % 25 == 0
476
+ ) and batch_idx > 0:
477
+ self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
478
+
479
+
480
+ @rank_zero_only
481
+ def init_wandb(save_dir, opt, config, group_name, name_str):
482
+ print(f"setting WANDB_DIR to {save_dir}")
483
+ os.makedirs(save_dir, exist_ok=True)
484
+
485
+ os.environ["WANDB_DIR"] = save_dir
486
+ if opt.debug:
487
+ wandb.init(project=opt.projectname, mode="offline", group=group_name)
488
+ else:
489
+ wandb.init(
490
+ project=opt.projectname,
491
+ config=config,
492
+ settings=wandb.Settings(code_dir="./sgm"),
493
+ group=group_name,
494
+ name=name_str,
495
+ )
496
+
497
+
498
+ if __name__ == "__main__":
499
+ # custom parser to specify config files, train, test and debug mode,
500
+ # postfix, resume.
501
+ # `--key value` arguments are interpreted as arguments to the trainer.
502
+ # `nested.key=value` arguments are interpreted as config parameters.
503
+ # configs are merged from left-to-right followed by command line parameters.
504
+
505
+ # model:
506
+ # base_learning_rate: float
507
+ # target: path to lightning module
508
+ # params:
509
+ # key: value
510
+ # data:
511
+ # target: main.DataModuleFromConfig
512
+ # params:
513
+ # batch_size: int
514
+ # wrap: bool
515
+ # train:
516
+ # target: path to train dataset
517
+ # params:
518
+ # key: value
519
+ # validation:
520
+ # target: path to validation dataset
521
+ # params:
522
+ # key: value
523
+ # test:
524
+ # target: path to test dataset
525
+ # params:
526
+ # key: value
527
+ # lightning: (optional, has sane defaults and can be specified on cmdline)
528
+ # trainer:
529
+ # additional arguments to trainer
530
+ # logger:
531
+ # logger to instantiate
532
+ # modelcheckpoint:
533
+ # modelcheckpoint to instantiate
534
+ # callbacks:
535
+ # callback1:
536
+ # target: importpath
537
+ # params:
538
+ # key: value
539
+
540
+ now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
541
+
542
+ # add cwd for convenience and to make classes in this file available when
543
+ # running as `python main.py`
544
+ # (in particular `main.DataModuleFromConfig`)
545
+ sys.path.append(os.getcwd())
546
+
547
+ parser = get_parser()
548
+
549
+ opt, unknown = parser.parse_known_args()
550
+
551
+ if opt.name and opt.resume:
552
+ raise ValueError(
553
+ "-n/--name and -r/--resume cannot be specified both."
554
+ "If you want to resume training in a new log folder, "
555
+ "use -n/--name in combination with --resume_from_checkpoint"
556
+ )
557
+ melk_ckpt_name = None
558
+ name = None
559
+ if opt.resume:
560
+ if not os.path.exists(opt.resume):
561
+ raise ValueError("Cannot find {}".format(opt.resume))
562
+ if os.path.isfile(opt.resume):
563
+ paths = opt.resume.split("/")
564
+ # idx = len(paths)-paths[::-1].index("logs")+1
565
+ # logdir = "/".join(paths[:idx])
566
+ logdir = "/".join(paths[:-2])
567
+ ckpt = opt.resume
568
+ _, melk_ckpt_name = get_checkpoint_name(logdir)
569
+ else:
570
+ assert os.path.isdir(opt.resume), opt.resume
571
+ logdir = opt.resume.rstrip("/")
572
+ ckpt, melk_ckpt_name = get_checkpoint_name(logdir)
573
+
574
+ print("#" * 100)
575
+ print(f'Resuming from checkpoint "{ckpt}"')
576
+ print("#" * 100)
577
+
578
+ opt.resume_from_checkpoint = ckpt
579
+ base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
580
+ opt.base = base_configs + opt.base
581
+ _tmp = logdir.split("/")
582
+ nowname = _tmp[-1]
583
+ else:
584
+ if opt.name:
585
+ name = "_" + opt.name
586
+ elif opt.base:
587
+ if opt.no_base_name:
588
+ name = ""
589
+ else:
590
+ if opt.legacy_naming:
591
+ cfg_fname = os.path.split(opt.base[0])[-1]
592
+ cfg_name = os.path.splitext(cfg_fname)[0]
593
+ else:
594
+ assert "configs" in os.path.split(opt.base[0])[0], os.path.split(
595
+ opt.base[0]
596
+ )[0]
597
+ cfg_path = os.path.split(opt.base[0])[0].split(os.sep)[
598
+ os.path.split(opt.base[0])[0].split(os.sep).index("configs")
599
+ + 1 :
600
+ ] # cut away the first one (we assert all configs are in "configs")
601
+ cfg_name = os.path.splitext(os.path.split(opt.base[0])[-1])[0]
602
+ cfg_name = "-".join(cfg_path) + f"-{cfg_name}"
603
+ name = "_" + cfg_name
604
+ else:
605
+ name = ""
606
+ if not opt.no_date:
607
+ nowname = now + name + opt.postfix
608
+ else:
609
+ nowname = name + opt.postfix
610
+ if nowname.startswith("_"):
611
+ nowname = nowname[1:]
612
+ logdir = os.path.join(opt.logdir, nowname)
613
+ print(f"LOGDIR: {logdir}")
614
+
615
+ ckptdir = os.path.join(logdir, "checkpoints")
616
+ cfgdir = os.path.join(logdir, "configs")
617
+ seed_everything(opt.seed, workers=True)
618
+
619
+ # move before model init, in case a torch.compile(...) is called somewhere
620
+ if opt.enable_tf32:
621
+ # pt_version = version.parse(torch.__version__)
622
+ torch.backends.cuda.matmul.allow_tf32 = True
623
+ torch.backends.cudnn.allow_tf32 = True
624
+ print(f"Enabling TF32 for PyTorch {torch.__version__}")
625
+ else:
626
+ print(f"Using default TF32 settings for PyTorch {torch.__version__}:")
627
+ print(
628
+ f"torch.backends.cuda.matmul.allow_tf32={torch.backends.cuda.matmul.allow_tf32}"
629
+ )
630
+ print(f"torch.backends.cudnn.allow_tf32={torch.backends.cudnn.allow_tf32}")
631
+
632
+ try:
633
+ # init and save configs
634
+ configs = [OmegaConf.load(cfg) for cfg in opt.base]
635
+ cli = OmegaConf.from_dotlist(unknown)
636
+ config = OmegaConf.merge(*configs, cli)
637
+ lightning_config = config.pop("lightning", OmegaConf.create())
638
+ # merge trainer cli with config
639
+ trainer_config = lightning_config.get("trainer", OmegaConf.create())
640
+
641
+ # default to gpu
642
+ trainer_config["accelerator"] = "gpu"
643
+ #
644
+ standard_args = default_trainer_args()
645
+ for k in standard_args:
646
+ if getattr(opt, k) != standard_args[k]:
647
+ trainer_config[k] = getattr(opt, k)
648
+
649
+ ckpt_resume_path = opt.resume_from_checkpoint
650
+
651
+ if not "devices" in trainer_config and trainer_config["accelerator"] != "gpu":
652
+ del trainer_config["accelerator"]
653
+ cpu = True
654
+ else:
655
+ gpuinfo = trainer_config["devices"]
656
+ print(f"Running on GPUs {gpuinfo}")
657
+ cpu = False
658
+ trainer_opt = argparse.Namespace(**trainer_config)
659
+ lightning_config.trainer = trainer_config
660
+
661
+ # model
662
+ model = instantiate_from_config(config.model)
663
+
664
+ # trainer and callbacks
665
+ trainer_kwargs = dict()
666
+
667
+ # default logger configs
668
+ default_logger_cfgs = {
669
+ "wandb": {
670
+ "target": "pytorch_lightning.loggers.WandbLogger",
671
+ "params": {
672
+ "name": nowname,
673
+ # "save_dir": logdir,
674
+ "offline": opt.debug,
675
+ "id": nowname,
676
+ "project": opt.projectname,
677
+ "log_model": False,
678
+ # "dir": logdir,
679
+ },
680
+ },
681
+ "csv": {
682
+ "target": "pytorch_lightning.loggers.CSVLogger",
683
+ "params": {
684
+ "name": "testtube", # hack for sbord fanatics
685
+ "save_dir": logdir,
686
+ },
687
+ },
688
+ }
689
+ default_logger_cfg = default_logger_cfgs["wandb" if opt.wandb else "csv"]
690
+ if opt.wandb:
691
+ # TODO change once leaving "swiffer" config directory
692
+ try:
693
+ group_name = nowname.split(now)[-1].split("-")[1]
694
+ except:
695
+ group_name = nowname
696
+ default_logger_cfg["params"]["group"] = group_name
697
+ init_wandb(
698
+ os.path.join(os.getcwd(), logdir),
699
+ opt=opt,
700
+ group_name=group_name,
701
+ config=config,
702
+ name_str=nowname,
703
+ )
704
+ if "logger" in lightning_config:
705
+ logger_cfg = lightning_config.logger
706
+ else:
707
+ logger_cfg = OmegaConf.create()
708
+ logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
709
+ trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
710
+
711
+ # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
712
+ # specify which metric is used to determine best models
713
+ default_modelckpt_cfg = {
714
+ "target": "pytorch_lightning.callbacks.ModelCheckpoint",
715
+ "params": {
716
+ "dirpath": ckptdir,
717
+ "filename": "{epoch:06}",
718
+ "verbose": True,
719
+ "save_last": True,
720
+ },
721
+ }
722
+ if hasattr(model, "monitor"):
723
+ print(f"Monitoring {model.monitor} as checkpoint metric.")
724
+ default_modelckpt_cfg["params"]["monitor"] = model.monitor
725
+ default_modelckpt_cfg["params"]["save_top_k"] = 3
726
+
727
+ if "modelcheckpoint" in lightning_config:
728
+ modelckpt_cfg = lightning_config.modelcheckpoint
729
+ else:
730
+ modelckpt_cfg = OmegaConf.create()
731
+ modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
732
+ print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
733
+
734
+ # https://pytorch-lightning.readthedocs.io/en/stable/extensions/strategy.html
735
+ # default to ddp if not further specified
736
+ default_strategy_config = {"target": "pytorch_lightning.strategies.DDPStrategy"}
737
+
738
+ if "strategy" in lightning_config:
739
+ strategy_cfg = lightning_config.strategy
740
+ else:
741
+ strategy_cfg = OmegaConf.create()
742
+ default_strategy_config["params"] = {
743
+ "find_unused_parameters": False,
744
+ # "static_graph": True,
745
+ # "ddp_comm_hook": default.fp16_compress_hook # TODO: experiment with this, also for DDPSharded
746
+ }
747
+ strategy_cfg = OmegaConf.merge(default_strategy_config, strategy_cfg)
748
+ print(
749
+ f"strategy config: \n ++++++++++++++ \n {strategy_cfg} \n ++++++++++++++ "
750
+ )
751
+ trainer_kwargs["strategy"] = instantiate_from_config(strategy_cfg)
752
+
753
+ # add callback which sets up log directory
754
+ default_callbacks_cfg = {
755
+ "setup_callback": {
756
+ "target": "main.SetupCallback",
757
+ "params": {
758
+ "resume": opt.resume,
759
+ "now": now,
760
+ "logdir": logdir,
761
+ "ckptdir": ckptdir,
762
+ "cfgdir": cfgdir,
763
+ "config": config,
764
+ "lightning_config": lightning_config,
765
+ "debug": opt.debug,
766
+ "ckpt_name": melk_ckpt_name,
767
+ },
768
+ },
769
+ "image_logger": {
770
+ "target": "main.ImageLogger",
771
+ "params": {"batch_frequency": 1000, "max_images": 4, "clamp": True},
772
+ },
773
+ "learning_rate_logger": {
774
+ "target": "pytorch_lightning.callbacks.LearningRateMonitor",
775
+ "params": {
776
+ "logging_interval": "step",
777
+ # "log_momentum": True
778
+ },
779
+ },
780
+ }
781
+ if version.parse(pl.__version__) >= version.parse("1.4.0"):
782
+ default_callbacks_cfg.update({"checkpoint_callback": modelckpt_cfg})
783
+
784
+ if "callbacks" in lightning_config:
785
+ callbacks_cfg = lightning_config.callbacks
786
+ else:
787
+ callbacks_cfg = OmegaConf.create()
788
+
789
+ if "metrics_over_trainsteps_checkpoint" in callbacks_cfg:
790
+ print(
791
+ "Caution: Saving checkpoints every n train steps without deleting. This might require some free space."
792
+ )
793
+ default_metrics_over_trainsteps_ckpt_dict = {
794
+ "metrics_over_trainsteps_checkpoint": {
795
+ "target": "pytorch_lightning.callbacks.ModelCheckpoint",
796
+ "params": {
797
+ "dirpath": os.path.join(ckptdir, "trainstep_checkpoints"),
798
+ "filename": "{epoch:06}-{step:09}",
799
+ "verbose": True,
800
+ "save_top_k": -1,
801
+ "every_n_train_steps": 10000,
802
+ "save_weights_only": True,
803
+ },
804
+ }
805
+ }
806
+ default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
807
+
808
+ callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
809
+ if "ignore_keys_callback" in callbacks_cfg and ckpt_resume_path is not None:
810
+ callbacks_cfg.ignore_keys_callback.params["ckpt_path"] = ckpt_resume_path
811
+ elif "ignore_keys_callback" in callbacks_cfg:
812
+ del callbacks_cfg["ignore_keys_callback"]
813
+
814
+ trainer_kwargs["callbacks"] = [
815
+ instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg
816
+ ]
817
+ if not "plugins" in trainer_kwargs:
818
+ trainer_kwargs["plugins"] = list()
819
+
820
+ # cmd line trainer args (which are in trainer_opt) have always priority over config-trainer-args (which are in trainer_kwargs)
821
+ trainer_opt = vars(trainer_opt)
822
+ trainer_kwargs = {
823
+ key: val for key, val in trainer_kwargs.items() if key not in trainer_opt
824
+ }
825
+ trainer = Trainer(**trainer_opt, **trainer_kwargs)
826
+
827
+ trainer.logdir = logdir ###
828
+
829
+ # data
830
+ data = instantiate_from_config(config.data)
831
+ # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
832
+ # calling these ourselves should not be necessary but it is.
833
+ # lightning still takes care of proper multiprocessing though
834
+ data.prepare_data()
835
+ # data.setup()
836
+ print("#### Data #####")
837
+ try:
838
+ for k in data.datasets:
839
+ print(
840
+ f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}"
841
+ )
842
+ except:
843
+ print("datasets not yet initialized.")
844
+
845
+ # configure learning rate
846
+ if "batch_size" in config.data.params:
847
+ bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
848
+ else:
849
+ bs, base_lr = (
850
+ config.data.params.train.loader.batch_size,
851
+ config.model.base_learning_rate,
852
+ )
853
+ if not cpu:
854
+ ngpu = len(lightning_config.trainer.devices.strip(",").split(","))
855
+ else:
856
+ ngpu = 1
857
+ if "accumulate_grad_batches" in lightning_config.trainer:
858
+ accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
859
+ else:
860
+ accumulate_grad_batches = 1
861
+ print(f"accumulate_grad_batches = {accumulate_grad_batches}")
862
+ lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
863
+ if opt.scale_lr:
864
+ model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
865
+ print(
866
+ "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
867
+ model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr
868
+ )
869
+ )
870
+ else:
871
+ model.learning_rate = base_lr
872
+ print("++++ NOT USING LR SCALING ++++")
873
+ print(f"Setting learning rate to {model.learning_rate:.2e}")
874
+
875
+ # allow checkpointing via USR1
876
+ def melk(*args, **kwargs):
877
+ # run all checkpoint hooks
878
+ if trainer.global_rank == 0:
879
+ print("Summoning checkpoint.")
880
+ if melk_ckpt_name is None:
881
+ ckpt_path = os.path.join(ckptdir, "last.ckpt")
882
+ else:
883
+ ckpt_path = os.path.join(ckptdir, melk_ckpt_name)
884
+ trainer.save_checkpoint(ckpt_path)
885
+
886
+ def divein(*args, **kwargs):
887
+ if trainer.global_rank == 0:
888
+ import pudb
889
+
890
+ pudb.set_trace()
891
+
892
+ import signal
893
+
894
+ signal.signal(signal.SIGUSR1, melk)
895
+ signal.signal(signal.SIGUSR2, divein)
896
+
897
+ # run
898
+ if opt.train:
899
+ try:
900
+ trainer.fit(model, data, ckpt_path=ckpt_resume_path)
901
+ except Exception:
902
+ if not opt.debug:
903
+ melk()
904
+ raise
905
+ if not opt.no_test and not trainer.interrupted:
906
+ trainer.test(model, data)
907
+ except RuntimeError as err:
908
+ if MULTINODE_HACKS:
909
+ import datetime
910
+ import os
911
+ import socket
912
+
913
+ import requests
914
+
915
+ device = os.environ.get("CUDA_VISIBLE_DEVICES", "?")
916
+ hostname = socket.gethostname()
917
+ ts = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
918
+ resp = requests.get("http://169.254.169.254/latest/meta-data/instance-id")
919
+ print(
920
+ f"ERROR at {ts} on {hostname}/{resp.text} (CUDA_VISIBLE_DEVICES={device}): {type(err).__name__}: {err}",
921
+ flush=True,
922
+ )
923
+ raise err
924
+ except Exception:
925
+ if opt.debug and trainer.global_rank == 0:
926
+ try:
927
+ import pudb as debugger
928
+ except ImportError:
929
+ import pdb as debugger
930
+ debugger.post_mortem()
931
+ raise
932
+ finally:
933
+ # move newly created debug project to debug_runs
934
+ if opt.debug and not opt.resume and trainer.global_rank == 0:
935
+ dst, name = os.path.split(logdir)
936
+ dst = os.path.join(dst, "debug_runs", name)
937
+ os.makedirs(os.path.split(dst)[0], exist_ok=True)
938
+ os.rename(logdir, dst)
939
+
940
+ if opt.wandb:
941
+ wandb.finish()
942
+ # if trainer.global_rank == 0:
943
+ # print(trainer.profiler.summary())
generative_models/model_licenses/LICENCE-SD-Turbo ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
2
+ Dated: November 28, 2023
3
+
4
+
5
+ By using or distributing any portion or element of the Models, Software, Software Products or Derivative Works, you agree to be bound by this Agreement.
6
+
7
+
8
+ "Agreement" means this Stable Non-Commercial Research Community License Agreement.
9
+
10
+
11
+ “AUP” means the Stability AI Acceptable Use Policy available at https://stability.ai/use-policy, as may be updated from time to time.
12
+
13
+
14
+ "Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model.
15
+
16
+
17
+ “Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software.
18
+
19
+
20
+ "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
21
+
22
+
23
+ “Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement.
24
+
25
+
26
+ “Non-Commercial Uses” means exercising any of the rights granted herein for the purpose of research or non-commercial purposes. Non-Commercial Uses does not include any production use of the Software Products or any Derivative Works.
27
+
28
+
29
+ "Stability AI" or "we" means Stability AI Ltd. and its affiliates.
30
+
31
+ "Software" means Stability AI’s proprietary software made available under this Agreement.
32
+
33
+
34
+ “Software Products” means the Models, Software and Documentation, individually or in any combination.
35
+
36
+
37
+
38
+ 1. License Rights and Redistribution.
39
+
40
+ a. Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned or controlled by Stability AI embodied in the Software Products to reproduce the Software Products and produce, reproduce, distribute, and create Derivative Works of the Software Products for Non-Commercial Uses only, respectively.
41
+
42
+ b. You may not use the Software Products or Derivative Works to enable third parties to use the Software Products or Derivative Works as part of your hosted service or via your APIs, whether you are adding substantial additional functionality thereto or not. Merely distributing the Software Products or Derivative Works for download online without offering any related service (ex. by distributing the Models on HuggingFace) is not a violation of this subsection. If you wish to use the Software Products or any Derivative Works for commercial or production use or you wish to make the Software Products or any Derivative Works available to third parties via your hosted service or your APIs, contact Stability AI at https://stability.ai/contact.
43
+
44
+ c. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Non-Commercial Research Community License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified.
45
+
46
+ 2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS.
47
+
48
+ 3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
49
+
50
+ 4. Intellectual Property.
51
+
52
+ a. No trademark licenses are granted under this Agreement, and in connection with the Software Products or Derivative Works, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products or Derivative Works.
53
+
54
+ b. Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works
55
+
56
+ c. If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products or Derivative Works in violation of this Agreement.
57
+
58
+ 5. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of any Software Products or Derivative Works. Sections 2-4 shall survive the termination of this Agreement.
generative_models/model_licenses/LICENSE-SDV ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STABLE VIDEO DIFFUSION NON-COMMERCIAL COMMUNITY LICENSE AGREEMENT
2
+ Dated: November 21, 2023
3
+
4
+ “AUP” means the Stability AI Acceptable Use Policy available at https://stability.ai/use-policy, as may be updated from time to time.
5
+
6
+ "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Software Products set forth herein.
7
+ "Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model.
8
+ “Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software.
9
+
10
+ "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
11
+
12
+ "Stability AI" or "we" means Stability AI Ltd.
13
+
14
+ "Software" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement.
15
+
16
+ “Software Products” means Software and Documentation.
17
+
18
+ By using or distributing any portion or element of the Software Products, you agree to be bound by this Agreement.
19
+
20
+
21
+
22
+ License Rights and Redistribution.
23
+ Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned by Stability AI embodied in the Software Products to reproduce, distribute, and create Derivative Works of the Software Products for purposes other than commercial or production use.
24
+ b. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "Stable Video Diffusion is licensed under the Stable Video Diffusion Research License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified.
25
+ 2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS.
26
+ 3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
27
+ 3. Intellectual Property.
28
+ a. No trademark licenses are granted under this Agreement, and in connection with the Software Products, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products.
29
+ Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works.
30
+ If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products in violation of this Agreement.
31
+ 4. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Software Products. Sections 2-4 shall survive the termination of this Agreement.
generative_models/model_licenses/LICENSE-SDXL-Turbo ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
2
+ Dated: November 28, 2023
3
+
4
+
5
+ By using or distributing any portion or element of the Models, Software, Software Products or Derivative Works, you agree to be bound by this Agreement.
6
+
7
+
8
+ "Agreement" means this Stable Non-Commercial Research Community License Agreement.
9
+
10
+
11
+ “AUP” means the Stability AI Acceptable Use Policy available at https://stability.ai/use-policy, as may be updated from time to time.
12
+
13
+
14
+ "Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model.
15
+
16
+
17
+ “Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software.
18
+
19
+
20
+ "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
21
+
22
+
23
+ “Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement.
24
+
25
+
26
+ “Non-Commercial Uses” means exercising any of the rights granted herein for the purpose of research or non-commercial purposes. Non-Commercial Uses does not include any production use of the Software Products or any Derivative Works.
27
+
28
+
29
+ "Stability AI" or "we" means Stability AI Ltd. and its affiliates.
30
+
31
+ "Software" means Stability AI’s proprietary software made available under this Agreement.
32
+
33
+
34
+ “Software Products” means the Models, Software and Documentation, individually or in any combination.
35
+
36
+
37
+
38
+ 1. License Rights and Redistribution.
39
+
40
+ a. Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned or controlled by Stability AI embodied in the Software Products to reproduce the Software Products and produce, reproduce, distribute, and create Derivative Works of the Software Products for Non-Commercial Uses only, respectively.
41
+
42
+ b. You may not use the Software Products or Derivative Works to enable third parties to use the Software Products or Derivative Works as part of your hosted service or via your APIs, whether you are adding substantial additional functionality thereto or not. Merely distributing the Software Products or Derivative Works for download online without offering any related service (ex. by distributing the Models on HuggingFace) is not a violation of this subsection. If you wish to use the Software Products or any Derivative Works for commercial or production use or you wish to make the Software Products or any Derivative Works available to third parties via your hosted service or your APIs, contact Stability AI at https://stability.ai/contact.
43
+
44
+ c. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Non-Commercial Research Community License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified.
45
+
46
+ 2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS.
47
+
48
+ 3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
49
+
50
+ 4. Intellectual Property.
51
+
52
+ a. No trademark licenses are granted under this Agreement, and in connection with the Software Products or Derivative Works, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products or Derivative Works.
53
+
54
+ b. Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works
55
+
56
+ c. If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products or Derivative Works in violation of this Agreement.
57
+
58
+ 5. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of any Software Products or Derivative Works. Sections 2-4 shall survive the termination of this Agreement.
generative_models/model_licenses/LICENSE-SDXL0.9 ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SDXL 0.9 RESEARCH LICENSE AGREEMENT
2
+ Copyright (c) Stability AI Ltd.
3
+ This License Agreement (as may be amended in accordance with this License Agreement, “License”), between you, or your employer or other entity (if you are entering into this agreement on behalf of your employer or other entity) (“Licensee” or “you”) and Stability AI Ltd. (“Stability AI” or “we”) applies to your use of any computer program, algorithm, source code, object code, or software that is made available by Stability AI under this License (“Software”) and any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software (“Documentation”).
4
+ By clicking “I Accept” below or by using the Software, you agree to the terms of this License. If you do not agree to this License, then you do not have any rights to use the Software or Documentation (collectively, the “Software Products”), and you must immediately cease using the Software Products. If you are agreeing to be bound by the terms of this License on behalf of your employer or other entity, you represent and warrant to Stability AI that you have full legal authority to bind your employer or such entity to this License. If you do not have the requisite authority, you may not accept the License or access the Software Products on behalf of your employer or other entity.
5
+ 1. LICENSE GRANT
6
+
7
+ a. Subject to your compliance with the Documentation and Sections 2, 3, and 5, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s copyright interests to reproduce, distribute, and create derivative works of the Software solely for your non-commercial research purposes. The foregoing license is personal to you, and you may not assign or sublicense this License or any other rights or obligations under this License without Stability AI’s prior written consent; any such assignment or sublicense will be void and will automatically and immediately terminate this License.
8
+
9
+ b. You may make a reasonable number of copies of the Documentation solely for use in connection with the license to the Software granted above.
10
+
11
+ c. The grant of rights expressly set forth in this Section 1 (License Grant) are the complete grant of rights to you in the Software Products, and no other licenses are granted, whether by waiver, estoppel, implication, equity or otherwise. Stability AI and its licensors reserve all rights not expressly granted by this License.
12
+
13
+
14
+ 2. RESTRICTIONS
15
+
16
+ You will not, and will not permit, assist or cause any third party to:
17
+
18
+ a. use, modify, copy, reproduce, create derivative works of, or distribute the Software Products (or any derivative works thereof, works incorporating the Software Products, or any data produced by the Software), in whole or in part, for (i) any commercial or production purposes, (ii) military purposes or in the service of nuclear technology, (iii) purposes of surveillance, including any research or development relating to surveillance, (iv) biometric processing, (v) in any manner that infringes, misappropriates, or otherwise violates any third-party rights, or (vi) in any manner that violates any applicable law and violating any privacy or security laws, rules, regulations, directives, or governmental requirements (including the General Data Privacy Regulation (Regulation (EU) 2016/679), the California Consumer Privacy Act, and any and all laws governing the processing of biometric information), as well as all amendments and successor laws to any of the foregoing;
19
+
20
+ b. alter or remove copyright and other proprietary notices which appear on or in the Software Products;
21
+
22
+ c. utilize any equipment, device, software, or other means to circumvent or remove any security or protection used by Stability AI in connection with the Software, or to circumvent or remove any usage restrictions, or to enable functionality disabled by Stability AI; or
23
+
24
+ d. offer or impose any terms on the Software Products that alter, restrict, or are inconsistent with the terms of this License.
25
+
26
+ e. 1) violate any applicable U.S. and non-U.S. export control and trade sanctions laws (“Export Laws”); 2) directly or indirectly export, re-export, provide, or otherwise transfer Software Products: (a) to any individual, entity, or country prohibited by Export Laws; (b) to anyone on U.S. or non-U.S. government restricted parties lists; or (c) for any purpose prohibited by Export Laws, including nuclear, chemical or biological weapons, or missile technology applications; 3) use or download Software Products if you or they are: (a) located in a comprehensively sanctioned jurisdiction, (b) currently listed on any U.S. or non-U.S. restricted parties list, or (c) for any purpose prohibited by Export Laws; and (4) will not disguise your location through IP proxying or other methods.
27
+
28
+
29
+ 3. ATTRIBUTION
30
+
31
+ Together with any copies of the Software Products (as well as derivative works thereof or works incorporating the Software Products) that you distribute, you must provide (i) a copy of this License, and (ii) the following attribution notice: “SDXL 0.9 is licensed under the SDXL Research License, Copyright (c) Stability AI Ltd. All Rights Reserved.”
32
+
33
+
34
+ 4. DISCLAIMERS
35
+
36
+ THE SOFTWARE PRODUCTS ARE PROVIDED “AS IS” AND “WITH ALL FAULTS” WITH NO WARRANTY OF ANY KIND, EXPRESS OR IMPLIED. STABILITY AIEXPRESSLY DISCLAIMS ALL REPRESENTATIONS AND WARRANTIES, EXPRESS OR IMPLIED, WHETHER BY STATUTE, CUSTOM, USAGE OR OTHERWISE AS TO ANY MATTERS RELATED TO THE SOFTWARE PRODUCTS, INCLUDING BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, SATISFACTORY QUALITY, OR NON-INFRINGEMENT. STABILITY AI MAKES NO WARRANTIES OR REPRESENTATIONS THAT THE SOFTWARE PRODUCTS WILL BE ERROR FREE OR FREE OF VIRUSES OR OTHER HARMFUL COMPONENTS, OR PRODUCE ANY PARTICULAR RESULTS.
37
+
38
+
39
+ 5. LIMITATION OF LIABILITY
40
+
41
+ TO THE FULLEST EXTENT PERMITTED BY LAW, IN NO EVENT WILL STABILITY AI BE LIABLE TO YOU (A) UNDER ANY THEORY OF LIABILITY, WHETHER BASED IN CONTRACT, TORT, NEGLIGENCE, STRICT LIABILITY, WARRANTY, OR OTHERWISE UNDER THIS LICENSE, OR (B) FOR ANY INDIRECT, CONSEQUENTIAL, EXEMPLARY, INCIDENTAL, PUNITIVE OR SPECIAL DAMAGES OR LOST PROFITS, EVEN IF STABILITY AI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE SOFTWARE PRODUCTS, THEIR CONSTITUENT COMPONENTS, AND ANY OUTPUT (COLLECTIVELY, “SOFTWARE MATERIALS”) ARE NOT DESIGNED OR INTENDED FOR USE IN ANY APPLICATION OR SITUATION WHERE FAILURE OR FAULT OF THE SOFTWARE MATERIALS COULD REASONABLY BE ANTICIPATED TO LEAD TO SERIOUS INJURY OF ANY PERSON, INCLUDING POTENTIAL DISCRIMINATION OR VIOLATION OF AN INDIVIDUAL’S PRIVACY RIGHTS, OR TO SEVERE PHYSICAL, PROPERTY, OR ENVIRONMENTAL DAMAGE (EACH, A “HIGH-RISK USE”). IF YOU ELECT TO USE ANY OF THE SOFTWARE MATERIALS FOR A HIGH-RISK USE, YOU DO SO AT YOUR OWN RISK. YOU AGREE TO DESIGN AND IMPLEMENT APPROPRIATE DECISION-MAKING AND RISK-MITIGATION PROCEDURES AND POLICIES IN CONNECTION WITH A HIGH-RISK USE SUCH THAT EVEN IF THERE IS A FAILURE OR FAULT IN ANY OF THE SOFTWARE MATERIALS, THE SAFETY OF PERSONS OR PROPERTY AFFECTED BY THE ACTIVITY STAYS AT A LEVEL THAT IS REASONABLE, APPROPRIATE, AND LAWFUL FOR THE FIELD OF THE HIGH-RISK USE.
42
+
43
+
44
+ 6. INDEMNIFICATION
45
+
46
+ You will indemnify, defend and hold harmless Stability AI and our subsidiaries and affiliates, and each of our respective shareholders, directors, officers, employees, agents, successors, and assigns (collectively, the “Stability AI Parties”) from and against any losses, liabilities, damages, fines, penalties, and expenses (including reasonable attorneys’ fees) incurred by any Stability AI Party in connection with any claim, demand, allegation, lawsuit, proceeding, or investigation (collectively, “Claims”) arising out of or related to: (a) your access to or use of the Software Products (as well as any results or data generated from such access or use), including any High-Risk Use (defined below); (b) your violation of this License; or (c) your violation, misappropriation or infringement of any rights of another (including intellectual property or other proprietary rights and privacy rights). You will promptly notify the Stability AI Parties of any such Claims, and cooperate with Stability AI Parties in defending such Claims. You will also grant the Stability AI Parties sole control of the defense or settlement, at Stability AI’s sole option, of any Claims. This indemnity is in addition to, and not in lieu of, any other indemnities or remedies set forth in a written agreement between you and Stability AI or the other Stability AI Parties.
47
+
48
+
49
+ 7. TERMINATION; SURVIVAL
50
+
51
+ a. This License will automatically terminate upon any breach by you of the terms of this License.
52
+
53
+ b. We may terminate this License, in whole or in part, at any time upon notice (including electronic) to you.
54
+
55
+ c. The following sections survive termination of this License: 2 (Restrictions), 3 (Attribution), 4 (Disclaimers), 5 (Limitation on Liability), 6 (Indemnification) 7 (Termination; Survival), 8 (Third Party Materials), 9 (Trademarks), 10 (Applicable Law; Dispute Resolution), and 11 (Miscellaneous).
56
+
57
+
58
+ 8. THIRD PARTY MATERIALS
59
+
60
+ The Software Products may contain third-party software or other components (including free and open source software) (all of the foregoing, “Third Party Materials”), which are subject to the license terms of the respective third-party licensors. Your dealings or correspondence with third parties and your use of or interaction with any Third Party Materials are solely between you and the third party. Stability AI does not control or endorse, and makes no representations or warranties regarding, any Third Party Materials, and your access to and use of such Third Party Materials are at your own risk.
61
+
62
+
63
+ 9. TRADEMARKS
64
+
65
+ Licensee has not been granted any trademark license as part of this License and may not use any name or mark associated with Stability AI without the prior written permission of Stability AI, except to the extent necessary to make the reference required by the “ATTRIBUTION” section of this Agreement.
66
+
67
+
68
+ 10. APPLICABLE LAW; DISPUTE RESOLUTION
69
+
70
+ This License will be governed and construed under the laws of the State of California without regard to conflicts of law provisions. Any suit or proceeding arising out of or relating to this License will be brought in the federal or state courts, as applicable, in San Mateo County, California, and each party irrevocably submits to the jurisdiction and venue of such courts.
71
+
72
+
73
+ 11. MISCELLANEOUS
74
+
75
+ If any provision or part of a provision of this License is unlawful, void or unenforceable, that provision or part of the provision is deemed severed from this License, and will not affect the validity and enforceability of any remaining provisions. The failure of Stability AI to exercise or enforce any right or provision of this License will not operate as a waiver of such right or provision. This License does not confer any third-party beneficiary rights upon any other person or entity. This License, together with the Documentation, contains the entire understanding between you and Stability AI regarding the subject matter of this License, and supersedes all other written or oral agreements and understandings between you and Stability AI regarding such subject matter. No change or addition to any provision of this License will be binding unless it is in writing and signed by an authorized representative of both you and Stability AI.
generative_models/model_licenses/LICENSE-SDXL1.0 ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2023 Stability AI CreativeML Open RAIL++-M License dated July 26, 2023
2
+
3
+ Section I: PREAMBLE Multimodal generative models are being widely adopted and used, and
4
+ have the potential to transform the way artists, among other individuals, conceive and
5
+ benefit from AI or ML technologies as a tool for content creation. Notwithstanding the
6
+ current and potential benefits that these artifacts can bring to society at large, there
7
+ are also concerns about potential misuses of them, either due to their technical
8
+ limitations or ethical considerations. In short, this license strives for both the open
9
+ and responsible downstream use of the accompanying model. When it comes to the open
10
+ character, we took inspiration from open source permissive licenses regarding the grant
11
+ of IP rights. Referring to the downstream responsible use, we added use-based
12
+ restrictions not permitting the use of the model in very specific scenarios, in order
13
+ for the licensor to be able to enforce the license in case potential misuses of the
14
+ Model may occur. At the same time, we strive to promote open and responsible research on
15
+ generative models for art and content generation. Even though downstream derivative
16
+ versions of the model could be released under different licensing terms, the latter will
17
+ always have to include - at minimum - the same use-based restrictions as the ones in the
18
+ original license (this license). We believe in the intersection between open and
19
+ responsible AI development; thus, this agreement aims to strike a balance between both
20
+ in order to enable responsible open-science in the field of AI. This CreativeML Open
21
+ RAIL++-M License governs the use of the model (and its derivatives) and is informed by
22
+ the model card associated with the model. NOW THEREFORE, You and Licensor agree as
23
+ follows: Definitions "License" means the terms and conditions for use, reproduction, and
24
+ Distribution as defined in this document. "Data" means a collection of information
25
+ and/or content extracted from the dataset used with the Model, including to train,
26
+ pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
27
+ "Output" means the results of operating a Model as embodied in informational content
28
+ resulting therefrom. "Model" means any accompanying machine-learning based assemblies
29
+ (including checkpoints), consisting of learnt weights, parameters (including optimizer
30
+ states), corresponding to the model architecture as embodied in the Complementary
31
+ Material, that have been trained or tuned, in whole or in part on the Data, using the
32
+ Complementary Material. "Derivatives of the Model" means all modifications to the Model,
33
+ works based on the Model, or any other model which is created or initialized by transfer
34
+ of patterns of the weights, parameters, activations or output of the Model, to the other
35
+ model, in order to cause the other model to perform similarly to the Model, including -
36
+ but not limited to - distillation methods entailing the use of intermediate data
37
+ representations or methods based on the generation of synthetic data by the Model for
38
+ training the other model. "Complementary Material" means the accompanying source code
39
+ and scripts used to define, run, load, benchmark or evaluate the Model, and used to
40
+ prepare data for training or evaluation, if any. This includes any accompanying
41
+ documentation, tutorials, examples, etc, if any. "Distribution" means any transmission,
42
+ reproduction, publication or other sharing of the Model or Derivatives of the Model to a
43
+ third party, including providing the Model as a hosted service made available by
44
+ electronic or other remote means - e.g. API-based or web access. "Licensor" means the
45
+ copyright owner or entity authorized by the copyright owner that is granting the
46
+ License, including the persons or entities that may have rights in the Model and/or
47
+ distributing the Model. "You" (or "Your") means an individual or Legal Entity exercising
48
+ permissions granted by this License and/or making use of the Model for whichever purpose
49
+ and in any field of use, including usage of the Model in an end-use application - e.g.
50
+ chatbot, translator, image generator. "Third Parties" means individuals or legal
51
+ entities that are not under common control with Licensor or You. "Contribution" means
52
+ any work of authorship, including the original version of the Model and any
53
+ modifications or additions to that Model or Derivatives of the Model thereof, that is
54
+ intentionally submitted to Licensor for inclusion in the Model by the copyright owner or
55
+ by an individual or Legal Entity authorized to submit on behalf of the copyright owner.
56
+ For the purposes of this definition, "submitted" means any form of electronic, verbal,
57
+ or written communication sent to the Licensor or its representatives, including but not
58
+ limited to communication on electronic mailing lists, source code control systems, and
59
+ issue tracking systems that are managed by, or on behalf of, the Licensor for the
60
+ purpose of discussing and improving the Model, but excluding communication that is
61
+ conspicuously marked or otherwise designated in writing by the copyright owner as "Not a
62
+ Contribution." "Contributor" means Licensor and any individual or Legal Entity on behalf
63
+ of whom a Contribution has been received by Licensor and subsequently incorporated
64
+ within the Model.
65
+
66
+ Section II: INTELLECTUAL PROPERTY RIGHTS Both copyright and patent grants apply to the
67
+ Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of
68
+ the Model are subject to additional terms as described in
69
+
70
+ Section III. Grant of Copyright License. Subject to the terms and conditions of this
71
+ License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive,
72
+ no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly
73
+ display, publicly perform, sublicense, and distribute the Complementary Material, the
74
+ Model, and Derivatives of the Model. Grant of Patent License. Subject to the terms and
75
+ conditions of this License and where and as applicable, each Contributor hereby grants
76
+ to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this paragraph) patent license to make, have made, use, offer to
78
+ sell, sell, import, and otherwise transfer the Model and the Complementary Material,
79
+ where such license applies only to those patent claims licensable by such Contributor
80
+ that are necessarily infringed by their Contribution(s) alone or by combination of their
81
+ Contribution(s) with the Model to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a cross-claim or counterclaim
83
+ in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution
84
+ incorporated within the Model and/or Complementary Material constitutes direct or
85
+ contributory patent infringement, then any patent licenses granted to You under this
86
+ License for the Model and/or Work shall terminate as of the date such litigation is
87
+ asserted or filed. Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
88
+ Distribution and Redistribution. You may host for Third Party remote access purposes
89
+ (e.g. software-as-a-service), reproduce and distribute copies of the Model or
90
+ Derivatives of the Model thereof in any medium, with or without modifications, provided
91
+ that You meet the following conditions: Use-based restrictions as referenced in
92
+ paragraph 5 MUST be included as an enforceable provision by You in any type of legal
93
+ agreement (e.g. a license) governing the use and/or distribution of the Model or
94
+ Derivatives of the Model, and You shall give notice to subsequent users You Distribute
95
+ to, that the Model or Derivatives of the Model are subject to paragraph 5. This
96
+ provision does not apply to the use of Complementary Material. You must give any Third
97
+ Party recipients of the Model or Derivatives of the Model a copy of this License; You
98
+ must cause any modified files to carry prominent notices stating that You changed the
99
+ files; You must retain all copyright, patent, trademark, and attribution notices
100
+ excluding those notices that do not pertain to any part of the Model, Derivatives of the
101
+ Model. You may add Your own copyright statement to Your modifications and may provide
102
+ additional or different license terms and conditions - respecting paragraph 4.a. - for
103
+ use, reproduction, or Distribution of Your modifications, or for any such Derivatives of
104
+ the Model as a whole, provided Your use, reproduction, and Distribution of the Model
105
+ otherwise complies with the conditions stated in this License. Use-based restrictions.
106
+ The restrictions set forth in Attachment A are considered Use-based restrictions.
107
+ Therefore You cannot use the Model and the Derivatives of the Model for the specified
108
+ restricted uses. You may use the Model subject to this License, including only for
109
+ lawful purposes and in accordance with the License. Use may include creating any content
110
+ with, finetuning, updating, running, training, evaluating and/or reparametrizing the
111
+ Model. You shall require all of Your users who use the Model or a Derivative of the
112
+ Model to comply with the terms of this paragraph (paragraph 5). The Output You Generate.
113
+ Except as set forth herein, Licensor claims no rights in the Output You generate using
114
+ the Model. You are accountable for the Output you generate and its subsequent uses. No
115
+ use of the output can contravene any provision as stated in the License.
116
+
117
+ Section IV: OTHER PROVISIONS Updates and Runtime Restrictions. To the maximum extent
118
+ permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage
119
+ of the Model in violation of this License. Trademarks and related. Nothing in this
120
+ License permits You to make use of Licensors’ trademarks, trade names, logos or to
121
+ otherwise suggest endorsement or misrepresent the relationship between the parties; and
122
+ any rights not expressly granted herein are reserved by the Licensors. Disclaimer of
123
+ Warranty. Unless required by applicable law or agreed to in writing, Licensor provides
124
+ the Model and the Complementary Material (and each Contributor provides its
125
+ Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
126
+ express or implied, including, without limitation, any warranties or conditions of
127
+ TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
128
+ solely responsible for determining the appropriateness of using or redistributing the
129
+ Model, Derivatives of the Model, and the Complementary Material and assume any risks
130
+ associated with Your exercise of permissions under this License. Limitation of
131
+ Liability. In no event and under no legal theory, whether in tort (including
132
+ negligence), contract, or otherwise, unless required by applicable law (such as
133
+ deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be
134
+ liable to You for damages, including any direct, indirect, special, incidental, or
135
+ consequential damages of any character arising as a result of this License or out of the
136
+ use or inability to use the Model and the Complementary Material (including but not
137
+ limited to damages for loss of goodwill, work stoppage, computer failure or malfunction,
138
+ or any and all other commercial damages or losses), even if such Contributor has been
139
+ advised of the possibility of such damages. Accepting Warranty or Additional Liability.
140
+ While redistributing the Model, Derivatives of the Model and the Complementary Material
141
+ thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty,
142
+ indemnity, or other liability obligations and/or rights consistent with this License.
143
+ However, in accepting such obligations, You may act only on Your own behalf and on Your
144
+ sole responsibility, not on behalf of any other Contributor, and only if You agree to
145
+ indemnify, defend, and hold each Contributor harmless for any liability incurred by, or
146
+ claims asserted against, such Contributor by reason of your accepting any such warranty
147
+ or additional liability. If any provision of this License is held to be invalid, illegal
148
+ or unenforceable, the remaining provisions shall be unaffected thereby and remain valid
149
+ as if such provision had not been set forth herein.
150
+
151
+ END OF TERMS AND CONDITIONS
152
+
153
+ Attachment A Use Restrictions
154
+ You agree not to use the Model or Derivatives of the Model:
155
+ In any way that violates any applicable national, federal, state, local or
156
+ international law or regulation; For the purpose of exploiting, harming or attempting to
157
+ exploit or harm minors in any way; To generate or disseminate verifiably false
158
+ information and/or content with the purpose of harming others; To generate or
159
+ disseminate personal identifiable information that can be used to harm an individual; To
160
+ defame, disparage or otherwise harass others; For fully automated decision making that
161
+ adversely impacts an individual’s legal rights or otherwise creates or modifies a
162
+ binding, enforceable obligation; For any use intended to or which has the effect of
163
+ discriminating against or harming individuals or groups based on online or offline
164
+ social behavior or known or predicted personal or personality characteristics; To
165
+ exploit any of the vulnerabilities of a specific group of persons based on their age,
166
+ social, physical or mental characteristics, in order to materially distort the behavior
167
+ of a person pertaining to that group in a manner that causes or is likely to cause that
168
+ person or another person physical or psychological harm; For any use intended to or
169
+ which has the effect of discriminating against individuals or groups based on legally
170
+ protected characteristics or categories; To provide medical advice and medical results
171
+ interpretation; To generate or disseminate information for the purpose to be used for
172
+ administration of justice, law enforcement, immigration or asylum processes, such as
173
+ predicting an individual will commit fraud/crime commitment (e.g. by text profiling,
174
+ drawing causal relationships between assertions made in documents, indiscriminate and
175
+ arbitrarily-targeted use).
generative_models/pyproject.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "sgm"
7
+ dynamic = ["version"]
8
+ description = "Stability Generative Models"
9
+ readme = "README.md"
10
+ license-files = { paths = ["LICENSE-CODE"] }
11
+ requires-python = ">=3.8"
12
+
13
+ [project.urls]
14
+ Homepage = "https://github.com/Stability-AI/generative-models"
15
+
16
+ [tool.hatch.version]
17
+ path = "sgm/__init__.py"
18
+
19
+ [tool.hatch.build]
20
+ # This needs to be explicitly set so the configuration files
21
+ # grafted into the `sgm` directory get included in the wheel's
22
+ # RECORD file.
23
+ include = [
24
+ "sgm",
25
+ ]
26
+ # The force-include configurations below make Hatch copy
27
+ # the configs/ directory (containing the various YAML files required
28
+ # to generatively model) into the source distribution and the wheel.
29
+
30
+ [tool.hatch.build.targets.sdist.force-include]
31
+ "./configs" = "sgm/configs"
32
+
33
+ [tool.hatch.build.targets.wheel.force-include]
34
+ "./configs" = "sgm/configs"
35
+
36
+ [tool.hatch.envs.ci]
37
+ skip-install = false
38
+
39
+ dependencies = [
40
+ "pytest"
41
+ ]
42
+
43
+ [tool.hatch.envs.ci.scripts]
44
+ test-inference = [
45
+ "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2+cu118 --index-url https://download.pytorch.org/whl/cu118",
46
+ "pip install -r requirements/pt2.txt",
47
+ "pytest -v tests/inference/test_inference.py {args}",
48
+ ]
generative_models/pytest.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [pytest]
2
+ markers =
3
+ inference: mark as inference test (deselect with '-m "not inference"')
generative_models/requirements/pt2.txt ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ black==23.7.0
2
+ chardet==5.1.0
3
+ clip @ git+https://github.com/openai/CLIP.git
4
+ einops>=0.6.1
5
+ fairscale>=0.4.13
6
+ fire>=0.5.0
7
+ fsspec>=2023.6.0
8
+ invisible-watermark>=0.2.0
9
+ kornia==0.6.9
10
+ matplotlib>=3.7.2
11
+ natsort>=8.4.0
12
+ ninja>=1.11.1
13
+ numpy>=1.24.4
14
+ omegaconf>=2.3.0
15
+ open-clip-torch>=2.20.0
16
+ opencv-python==4.6.0.66
17
+ pandas>=2.0.3
18
+ pillow>=9.5.0
19
+ pudb>=2022.1.3
20
+ pytorch-lightning==2.0.1
21
+ pyyaml>=6.0.1
22
+ scipy>=1.10.1
23
+ streamlit>=0.73.1
24
+ tensorboardx==2.6
25
+ timm>=0.9.2
26
+ tokenizers==0.12.1
27
+ torch>=2.0.1
28
+ torchaudio>=2.0.2
29
+ torchdata==0.6.1
30
+ torchmetrics>=1.0.1
31
+ torchvision>=0.15.2
32
+ tqdm>=4.65.0
33
+ transformers==4.19.1
34
+ triton==2.0.0
35
+ urllib3<1.27,>=1.25.4
36
+ wandb>=0.15.6
37
+ webdataset>=0.2.33
38
+ wheel>=0.41.0
39
+ xformers>=0.0.20
40
+ streamlit-keyup==0.2.0
generative_models/scripts/__init__.py ADDED
File without changes
generative_models/scripts/demo/__init__.py ADDED
File without changes
generative_models/scripts/demo/detect.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ try:
7
+ from imwatermark import WatermarkDecoder
8
+ except ImportError as e:
9
+ try:
10
+ # Assume some of the other dependencies such as torch are not fulfilled
11
+ # import file without loading unnecessary libraries.
12
+ import importlib.util
13
+ import sys
14
+
15
+ spec = importlib.util.find_spec("imwatermark.maxDct")
16
+ assert spec is not None
17
+ maxDct = importlib.util.module_from_spec(spec)
18
+ sys.modules["maxDct"] = maxDct
19
+ spec.loader.exec_module(maxDct)
20
+
21
+ class WatermarkDecoder(object):
22
+ """A minimal version of
23
+ https://github.com/ShieldMnt/invisible-watermark/blob/main/imwatermark/watermark.py
24
+ to only reconstruct bits using dwtDct"""
25
+
26
+ def __init__(self, wm_type="bytes", length=0):
27
+ assert wm_type == "bits", "Only bits defined in minimal import"
28
+ self._wmType = wm_type
29
+ self._wmLen = length
30
+
31
+ def reconstruct(self, bits):
32
+ if len(bits) != self._wmLen:
33
+ raise RuntimeError("bits are not matched with watermark length")
34
+
35
+ return bits
36
+
37
+ def decode(self, cv2Image, method="dwtDct", **configs):
38
+ (r, c, channels) = cv2Image.shape
39
+ if r * c < 256 * 256:
40
+ raise RuntimeError("image too small, should be larger than 256x256")
41
+
42
+ bits = []
43
+ assert method == "dwtDct"
44
+ embed = maxDct.EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
45
+ bits = embed.decode(cv2Image)
46
+ return self.reconstruct(bits)
47
+
48
+ except:
49
+ raise e
50
+
51
+
52
+ # A fixed 48-bit message that was choosen at random
53
+ # WATERMARK_MESSAGE = 0xB3EC907BB19E
54
+ WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110
55
+ # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
56
+ WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
57
+ MATCH_VALUES = [
58
+ [27, "No watermark detected"],
59
+ [33, "Partial watermark match. Cannot determine with certainty."],
60
+ [
61
+ 35,
62
+ (
63
+ "Likely watermarked. In our test 0.02% of real images were "
64
+ 'falsely detected as "Likely watermarked"'
65
+ ),
66
+ ],
67
+ [
68
+ 49,
69
+ (
70
+ "Very likely watermarked. In our test no real images were "
71
+ 'falsely detected as "Very likely watermarked"'
72
+ ),
73
+ ],
74
+ ]
75
+
76
+
77
+ class GetWatermarkMatch:
78
+ def __init__(self, watermark):
79
+ self.watermark = watermark
80
+ self.num_bits = len(self.watermark)
81
+ self.decoder = WatermarkDecoder("bits", self.num_bits)
82
+
83
+ def __call__(self, x: np.ndarray) -> np.ndarray:
84
+ """
85
+ Detects the number of matching bits the predefined watermark with one
86
+ or multiple images. Images should be in cv2 format, e.g. h x w x c BGR.
87
+
88
+ Args:
89
+ x: ([B], h w, c) in range [0, 255]
90
+
91
+ Returns:
92
+ number of matched bits ([B],)
93
+ """
94
+ squeeze = len(x.shape) == 3
95
+ if squeeze:
96
+ x = x[None, ...]
97
+
98
+ bs = x.shape[0]
99
+ detected = np.empty((bs, self.num_bits), dtype=bool)
100
+ for k in range(bs):
101
+ detected[k] = self.decoder.decode(x[k], "dwtDct")
102
+ result = np.sum(detected == self.watermark, axis=-1)
103
+ if squeeze:
104
+ return result[0]
105
+ else:
106
+ return result
107
+
108
+
109
+ get_watermark_match = GetWatermarkMatch(WATERMARK_BITS)
110
+
111
+
112
+ if __name__ == "__main__":
113
+ parser = argparse.ArgumentParser()
114
+ parser.add_argument(
115
+ "filename",
116
+ nargs="+",
117
+ type=str,
118
+ help="Image files to check for watermarks",
119
+ )
120
+ opts = parser.parse_args()
121
+
122
+ print(
123
+ """
124
+ This script tries to detect watermarked images. Please be aware of
125
+ the following:
126
+ - As the watermark is supposed to be invisible, there is the risk that
127
+ watermarked images may not be detected.
128
+ - To maximize the chance of detection make sure that the image has the same
129
+ dimensions as when the watermark was applied (most likely 1024x1024
130
+ or 512x512).
131
+ - Specific image manipulation may drastically decrease the chance that
132
+ watermarks can be detected.
133
+ - There is also the chance that an image has the characteristics of the
134
+ watermark by chance.
135
+ - The watermark script is public, anybody may watermark any images, and
136
+ could therefore claim it to be generated.
137
+ - All numbers below are based on a test using 10,000 images without any
138
+ modifications after applying the watermark.
139
+ """
140
+ )
141
+
142
+ for fn in opts.filename:
143
+ image = cv2.imread(fn)
144
+ if image is None:
145
+ print(f"Couldn't read {fn}. Skipping")
146
+ continue
147
+
148
+ num_bits = get_watermark_match(image)
149
+ k = 0
150
+ while num_bits > MATCH_VALUES[k][0]:
151
+ k += 1
152
+ print(
153
+ f"{fn}: {MATCH_VALUES[k][1]}",
154
+ f"Bits that matched the watermark {num_bits} from {len(WATERMARK_BITS)}\n",
155
+ sep="\n\t",
156
+ )
generative_models/scripts/demo/discretization.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from sgm.modules.diffusionmodules.discretizer import Discretization
4
+
5
+
6
+ class Img2ImgDiscretizationWrapper:
7
+ """
8
+ wraps a discretizer, and prunes the sigmas
9
+ params:
10
+ strength: float between 0.0 and 1.0. 1.0 means full sampling (all sigmas are returned)
11
+ """
12
+
13
+ def __init__(self, discretization: Discretization, strength: float = 1.0):
14
+ self.discretization = discretization
15
+ self.strength = strength
16
+ assert 0.0 <= self.strength <= 1.0
17
+
18
+ def __call__(self, *args, **kwargs):
19
+ # sigmas start large first, and decrease then
20
+ sigmas = self.discretization(*args, **kwargs)
21
+ print(f"sigmas after discretization, before pruning img2img: ", sigmas)
22
+ sigmas = torch.flip(sigmas, (0,))
23
+ sigmas = sigmas[: max(int(self.strength * len(sigmas)), 1)]
24
+ print("prune index:", max(int(self.strength * len(sigmas)), 1))
25
+ sigmas = torch.flip(sigmas, (0,))
26
+ print(f"sigmas after pruning: ", sigmas)
27
+ return sigmas
28
+
29
+
30
+ class Txt2NoisyDiscretizationWrapper:
31
+ """
32
+ wraps a discretizer, and prunes the sigmas
33
+ params:
34
+ strength: float between 0.0 and 1.0. 0.0 means full sampling (all sigmas are returned)
35
+ """
36
+
37
+ def __init__(
38
+ self, discretization: Discretization, strength: float = 0.0, original_steps=None
39
+ ):
40
+ self.discretization = discretization
41
+ self.strength = strength
42
+ self.original_steps = original_steps
43
+ assert 0.0 <= self.strength <= 1.0
44
+
45
+ def __call__(self, *args, **kwargs):
46
+ # sigmas start large first, and decrease then
47
+ sigmas = self.discretization(*args, **kwargs)
48
+ print(f"sigmas after discretization, before pruning img2img: ", sigmas)
49
+ sigmas = torch.flip(sigmas, (0,))
50
+ if self.original_steps is None:
51
+ steps = len(sigmas)
52
+ else:
53
+ steps = self.original_steps + 1
54
+ prune_index = max(min(int(self.strength * steps) - 1, steps - 1), 0)
55
+ sigmas = sigmas[prune_index:]
56
+ print("prune index:", prune_index)
57
+ sigmas = torch.flip(sigmas, (0,))
58
+ print(f"sigmas after pruning: ", sigmas)
59
+ return sigmas
generative_models/scripts/demo/sampling.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pytorch_lightning import seed_everything
2
+
3
+ from scripts.demo.streamlit_helpers import *
4
+
5
+ SAVE_PATH = "outputs/demo/txt2img/"
6
+
7
+ SD_XL_BASE_RATIOS = {
8
+ "0.5": (704, 1408),
9
+ "0.52": (704, 1344),
10
+ "0.57": (768, 1344),
11
+ "0.6": (768, 1280),
12
+ "0.68": (832, 1216),
13
+ "0.72": (832, 1152),
14
+ "0.78": (896, 1152),
15
+ "0.82": (896, 1088),
16
+ "0.88": (960, 1088),
17
+ "0.94": (960, 1024),
18
+ "1.0": (1024, 1024),
19
+ "1.07": (1024, 960),
20
+ "1.13": (1088, 960),
21
+ "1.21": (1088, 896),
22
+ "1.29": (1152, 896),
23
+ "1.38": (1152, 832),
24
+ "1.46": (1216, 832),
25
+ "1.67": (1280, 768),
26
+ "1.75": (1344, 768),
27
+ "1.91": (1344, 704),
28
+ "2.0": (1408, 704),
29
+ "2.09": (1472, 704),
30
+ "2.4": (1536, 640),
31
+ "2.5": (1600, 640),
32
+ "2.89": (1664, 576),
33
+ "3.0": (1728, 576),
34
+ }
35
+
36
+ VERSION2SPECS = {
37
+ "SDXL-base-1.0": {
38
+ "H": 1024,
39
+ "W": 1024,
40
+ "C": 4,
41
+ "f": 8,
42
+ "is_legacy": False,
43
+ "config": "configs/inference/sd_xl_base.yaml",
44
+ "ckpt": "checkpoints/sd_xl_base_1.0.safetensors",
45
+ },
46
+ "SDXL-base-0.9": {
47
+ "H": 1024,
48
+ "W": 1024,
49
+ "C": 4,
50
+ "f": 8,
51
+ "is_legacy": False,
52
+ "config": "configs/inference/sd_xl_base.yaml",
53
+ "ckpt": "checkpoints/sd_xl_base_0.9.safetensors",
54
+ },
55
+ "SD-2.1": {
56
+ "H": 512,
57
+ "W": 512,
58
+ "C": 4,
59
+ "f": 8,
60
+ "is_legacy": True,
61
+ "config": "configs/inference/sd_2_1.yaml",
62
+ "ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors",
63
+ },
64
+ "SD-2.1-768": {
65
+ "H": 768,
66
+ "W": 768,
67
+ "C": 4,
68
+ "f": 8,
69
+ "is_legacy": True,
70
+ "config": "configs/inference/sd_2_1_768.yaml",
71
+ "ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors",
72
+ },
73
+ "SDXL-refiner-0.9": {
74
+ "H": 1024,
75
+ "W": 1024,
76
+ "C": 4,
77
+ "f": 8,
78
+ "is_legacy": True,
79
+ "config": "configs/inference/sd_xl_refiner.yaml",
80
+ "ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors",
81
+ },
82
+ "SDXL-refiner-1.0": {
83
+ "H": 1024,
84
+ "W": 1024,
85
+ "C": 4,
86
+ "f": 8,
87
+ "is_legacy": True,
88
+ "config": "configs/inference/sd_xl_refiner.yaml",
89
+ "ckpt": "checkpoints/sd_xl_refiner_1.0.safetensors",
90
+ },
91
+ }
92
+
93
+
94
+ def load_img(display=True, key=None, device="cuda"):
95
+ image = get_interactive_image(key=key)
96
+ if image is None:
97
+ return None
98
+ if display:
99
+ st.image(image)
100
+ w, h = image.size
101
+ print(f"loaded input image of size ({w}, {h})")
102
+ width, height = map(
103
+ lambda x: x - x % 64, (w, h)
104
+ ) # resize to integer multiple of 64
105
+ image = image.resize((width, height))
106
+ image = np.array(image.convert("RGB"))
107
+ image = image[None].transpose(0, 3, 1, 2)
108
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
109
+ return image.to(device)
110
+
111
+
112
+ def run_txt2img(
113
+ state,
114
+ version,
115
+ version_dict,
116
+ is_legacy=False,
117
+ return_latents=False,
118
+ filter=None,
119
+ stage2strength=None,
120
+ ):
121
+ if version.startswith("SDXL-base"):
122
+ W, H = st.selectbox("Resolution:", list(SD_XL_BASE_RATIOS.values()), 10)
123
+ else:
124
+ H = st.number_input("H", value=version_dict["H"], min_value=64, max_value=2048)
125
+ W = st.number_input("W", value=version_dict["W"], min_value=64, max_value=2048)
126
+ C = version_dict["C"]
127
+ F = version_dict["f"]
128
+
129
+ init_dict = {
130
+ "orig_width": W,
131
+ "orig_height": H,
132
+ "target_width": W,
133
+ "target_height": H,
134
+ }
135
+ value_dict = init_embedder_options(
136
+ get_unique_embedder_keys_from_conditioner(state["model"].conditioner),
137
+ init_dict,
138
+ prompt=prompt,
139
+ negative_prompt=negative_prompt,
140
+ )
141
+ sampler, num_rows, num_cols = init_sampling(stage2strength=stage2strength)
142
+ num_samples = num_rows * num_cols
143
+
144
+ if st.button("Sample"):
145
+ st.write(f"**Model I:** {version}")
146
+ out = do_sample(
147
+ state["model"],
148
+ sampler,
149
+ value_dict,
150
+ num_samples,
151
+ H,
152
+ W,
153
+ C,
154
+ F,
155
+ force_uc_zero_embeddings=["txt"] if not is_legacy else [],
156
+ return_latents=return_latents,
157
+ filter=filter,
158
+ )
159
+ return out
160
+
161
+
162
+ def run_img2img(
163
+ state,
164
+ version_dict,
165
+ is_legacy=False,
166
+ return_latents=False,
167
+ filter=None,
168
+ stage2strength=None,
169
+ ):
170
+ img = load_img()
171
+ if img is None:
172
+ return None
173
+ H, W = img.shape[2], img.shape[3]
174
+
175
+ init_dict = {
176
+ "orig_width": W,
177
+ "orig_height": H,
178
+ "target_width": W,
179
+ "target_height": H,
180
+ }
181
+ value_dict = init_embedder_options(
182
+ get_unique_embedder_keys_from_conditioner(state["model"].conditioner),
183
+ init_dict,
184
+ prompt=prompt,
185
+ negative_prompt=negative_prompt,
186
+ )
187
+ strength = st.number_input(
188
+ "**Img2Img Strength**", value=0.75, min_value=0.0, max_value=1.0
189
+ )
190
+ sampler, num_rows, num_cols = init_sampling(
191
+ img2img_strength=strength,
192
+ stage2strength=stage2strength,
193
+ )
194
+ num_samples = num_rows * num_cols
195
+
196
+ if st.button("Sample"):
197
+ out = do_img2img(
198
+ repeat(img, "1 ... -> n ...", n=num_samples),
199
+ state["model"],
200
+ sampler,
201
+ value_dict,
202
+ num_samples,
203
+ force_uc_zero_embeddings=["txt"] if not is_legacy else [],
204
+ return_latents=return_latents,
205
+ filter=filter,
206
+ )
207
+ return out
208
+
209
+
210
+ def apply_refiner(
211
+ input,
212
+ state,
213
+ sampler,
214
+ num_samples,
215
+ prompt,
216
+ negative_prompt,
217
+ filter=None,
218
+ finish_denoising=False,
219
+ ):
220
+ init_dict = {
221
+ "orig_width": input.shape[3] * 8,
222
+ "orig_height": input.shape[2] * 8,
223
+ "target_width": input.shape[3] * 8,
224
+ "target_height": input.shape[2] * 8,
225
+ }
226
+
227
+ value_dict = init_dict
228
+ value_dict["prompt"] = prompt
229
+ value_dict["negative_prompt"] = negative_prompt
230
+
231
+ value_dict["crop_coords_top"] = 0
232
+ value_dict["crop_coords_left"] = 0
233
+
234
+ value_dict["aesthetic_score"] = 6.0
235
+ value_dict["negative_aesthetic_score"] = 2.5
236
+
237
+ st.warning(f"refiner input shape: {input.shape}")
238
+ samples = do_img2img(
239
+ input,
240
+ state["model"],
241
+ sampler,
242
+ value_dict,
243
+ num_samples,
244
+ skip_encode=True,
245
+ filter=filter,
246
+ add_noise=not finish_denoising,
247
+ )
248
+
249
+ return samples
250
+
251
+
252
+ if __name__ == "__main__":
253
+ st.title("Stable Diffusion")
254
+ version = st.selectbox("Model Version", list(VERSION2SPECS.keys()), 0)
255
+ version_dict = VERSION2SPECS[version]
256
+ if st.checkbox("Load Model"):
257
+ mode = st.radio("Mode", ("txt2img", "img2img"), 0)
258
+ else:
259
+ mode = "skip"
260
+ st.write("__________________________")
261
+
262
+ set_lowvram_mode(st.checkbox("Low vram mode", True))
263
+
264
+ if version.startswith("SDXL-base"):
265
+ add_pipeline = st.checkbox("Load SDXL-refiner?", False)
266
+ st.write("__________________________")
267
+ else:
268
+ add_pipeline = False
269
+
270
+ seed = st.sidebar.number_input("seed", value=42, min_value=0, max_value=int(1e9))
271
+ seed_everything(seed)
272
+
273
+ save_locally, save_path = init_save_locally(os.path.join(SAVE_PATH, version))
274
+
275
+ if mode != "skip":
276
+ state = init_st(version_dict, load_filter=True)
277
+ if state["msg"]:
278
+ st.info(state["msg"])
279
+ model = state["model"]
280
+
281
+ is_legacy = version_dict["is_legacy"]
282
+
283
+ prompt = st.text_input(
284
+ "prompt",
285
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
286
+ )
287
+ if is_legacy:
288
+ negative_prompt = st.text_input("negative prompt", "")
289
+ else:
290
+ negative_prompt = "" # which is unused
291
+
292
+ stage2strength = None
293
+ finish_denoising = False
294
+
295
+ if add_pipeline:
296
+ st.write("__________________________")
297
+ version2 = st.selectbox("Refiner:", ["SDXL-refiner-1.0", "SDXL-refiner-0.9"])
298
+ st.warning(
299
+ f"Running with {version2} as the second stage model. Make sure to provide (V)RAM :) "
300
+ )
301
+ st.write("**Refiner Options:**")
302
+
303
+ version_dict2 = VERSION2SPECS[version2]
304
+ state2 = init_st(version_dict2, load_filter=False)
305
+ st.info(state2["msg"])
306
+
307
+ stage2strength = st.number_input(
308
+ "**Refinement strength**", value=0.15, min_value=0.0, max_value=1.0
309
+ )
310
+
311
+ sampler2, *_ = init_sampling(
312
+ key=2,
313
+ img2img_strength=stage2strength,
314
+ specify_num_samples=False,
315
+ )
316
+ st.write("__________________________")
317
+ finish_denoising = st.checkbox("Finish denoising with refiner.", True)
318
+ if not finish_denoising:
319
+ stage2strength = None
320
+
321
+ if mode == "txt2img":
322
+ out = run_txt2img(
323
+ state,
324
+ version,
325
+ version_dict,
326
+ is_legacy=is_legacy,
327
+ return_latents=add_pipeline,
328
+ filter=state.get("filter"),
329
+ stage2strength=stage2strength,
330
+ )
331
+ elif mode == "img2img":
332
+ out = run_img2img(
333
+ state,
334
+ version_dict,
335
+ is_legacy=is_legacy,
336
+ return_latents=add_pipeline,
337
+ filter=state.get("filter"),
338
+ stage2strength=stage2strength,
339
+ )
340
+ elif mode == "skip":
341
+ out = None
342
+ else:
343
+ raise ValueError(f"unknown mode {mode}")
344
+ if isinstance(out, (tuple, list)):
345
+ samples, samples_z = out
346
+ else:
347
+ samples = out
348
+ samples_z = None
349
+
350
+ if add_pipeline and samples_z is not None:
351
+ st.write("**Running Refinement Stage**")
352
+ samples = apply_refiner(
353
+ samples_z,
354
+ state2,
355
+ sampler2,
356
+ samples_z.shape[0],
357
+ prompt=prompt,
358
+ negative_prompt=negative_prompt if is_legacy else "",
359
+ filter=state.get("filter"),
360
+ finish_denoising=finish_denoising,
361
+ )
362
+
363
+ if save_locally and samples is not None:
364
+ perform_save_locally(save_path, samples)
generative_models/scripts/demo/streamlit_helpers.py ADDED
@@ -0,0 +1,887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import os
4
+ from glob import glob
5
+ from typing import Dict, List, Optional, Tuple, Union
6
+
7
+ import cv2
8
+ import numpy as np
9
+ import streamlit as st
10
+ import torch
11
+ import torch.nn as nn
12
+ import torchvision.transforms as TT
13
+ from einops import rearrange, repeat
14
+ from imwatermark import WatermarkEncoder
15
+ from omegaconf import ListConfig, OmegaConf
16
+ from PIL import Image
17
+ from safetensors.torch import load_file as load_safetensors
18
+ from torch import autocast
19
+ from torchvision import transforms
20
+ from torchvision.utils import make_grid, save_image
21
+
22
+ from scripts.demo.discretization import (Img2ImgDiscretizationWrapper,
23
+ Txt2NoisyDiscretizationWrapper)
24
+ from scripts.util.detection.nsfw_and_watermark_dectection import \
25
+ DeepFloydDataFiltering
26
+ from sgm.inference.helpers import embed_watermark
27
+ from sgm.modules.diffusionmodules.guiders import (LinearPredictionGuider,
28
+ VanillaCFG)
29
+ from sgm.modules.diffusionmodules.sampling import (DPMPP2MSampler,
30
+ DPMPP2SAncestralSampler,
31
+ EulerAncestralSampler,
32
+ EulerEDMSampler,
33
+ HeunEDMSampler,
34
+ LinearMultistepSampler)
35
+ from sgm.util import append_dims, default, instantiate_from_config
36
+
37
+
38
+ @st.cache_resource()
39
+ def init_st(version_dict, load_ckpt=True, load_filter=True):
40
+ state = dict()
41
+ if not "model" in state:
42
+ config = version_dict["config"]
43
+ ckpt = version_dict["ckpt"]
44
+
45
+ config = OmegaConf.load(config)
46
+ model, msg = load_model_from_config(config, ckpt if load_ckpt else None)
47
+
48
+ state["msg"] = msg
49
+ state["model"] = model
50
+ state["ckpt"] = ckpt if load_ckpt else None
51
+ state["config"] = config
52
+ if load_filter:
53
+ state["filter"] = DeepFloydDataFiltering(verbose=False)
54
+ return state
55
+
56
+
57
+ def load_model(model):
58
+ model.cuda()
59
+
60
+
61
+ lowvram_mode = False
62
+
63
+
64
+ def set_lowvram_mode(mode):
65
+ global lowvram_mode
66
+ lowvram_mode = mode
67
+
68
+
69
+ def initial_model_load(model):
70
+ global lowvram_mode
71
+ if lowvram_mode:
72
+ model.model.half()
73
+ else:
74
+ model.cuda()
75
+ return model
76
+
77
+
78
+ def unload_model(model):
79
+ global lowvram_mode
80
+ if lowvram_mode:
81
+ model.cpu()
82
+ torch.cuda.empty_cache()
83
+
84
+
85
+ def load_model_from_config(config, ckpt=None, verbose=True):
86
+ model = instantiate_from_config(config.model)
87
+
88
+ if ckpt is not None:
89
+ print(f"Loading model from {ckpt}")
90
+ if ckpt.endswith("ckpt"):
91
+ pl_sd = torch.load(ckpt, map_location="cpu")
92
+ if "global_step" in pl_sd:
93
+ global_step = pl_sd["global_step"]
94
+ st.info(f"loaded ckpt from global step {global_step}")
95
+ print(f"Global Step: {pl_sd['global_step']}")
96
+ sd = pl_sd["state_dict"]
97
+ elif ckpt.endswith("safetensors"):
98
+ sd = load_safetensors(ckpt)
99
+ else:
100
+ raise NotImplementedError
101
+
102
+ msg = None
103
+
104
+ m, u = model.load_state_dict(sd, strict=False)
105
+
106
+ if len(m) > 0 and verbose:
107
+ print("missing keys:")
108
+ print(m)
109
+ if len(u) > 0 and verbose:
110
+ print("unexpected keys:")
111
+ print(u)
112
+ else:
113
+ msg = None
114
+
115
+ model = initial_model_load(model)
116
+ model.eval()
117
+ return model, msg
118
+
119
+
120
+ def get_unique_embedder_keys_from_conditioner(conditioner):
121
+ return list(set([x.input_key for x in conditioner.embedders]))
122
+
123
+
124
+ def init_embedder_options(keys, init_dict, prompt=None, negative_prompt=None):
125
+ # Hardcoded demo settings; might undergo some changes in the future
126
+
127
+ value_dict = {}
128
+ for key in keys:
129
+ if key == "txt":
130
+ if prompt is None:
131
+ prompt = "A professional photograph of an astronaut riding a pig"
132
+ if negative_prompt is None:
133
+ negative_prompt = ""
134
+
135
+ prompt = st.text_input("Prompt", prompt)
136
+ negative_prompt = st.text_input("Negative prompt", negative_prompt)
137
+
138
+ value_dict["prompt"] = prompt
139
+ value_dict["negative_prompt"] = negative_prompt
140
+
141
+ if key == "original_size_as_tuple":
142
+ orig_width = st.number_input(
143
+ "orig_width",
144
+ value=init_dict["orig_width"],
145
+ min_value=16,
146
+ )
147
+ orig_height = st.number_input(
148
+ "orig_height",
149
+ value=init_dict["orig_height"],
150
+ min_value=16,
151
+ )
152
+
153
+ value_dict["orig_width"] = orig_width
154
+ value_dict["orig_height"] = orig_height
155
+
156
+ if key == "crop_coords_top_left":
157
+ crop_coord_top = st.number_input("crop_coords_top", value=0, min_value=0)
158
+ crop_coord_left = st.number_input("crop_coords_left", value=0, min_value=0)
159
+
160
+ value_dict["crop_coords_top"] = crop_coord_top
161
+ value_dict["crop_coords_left"] = crop_coord_left
162
+
163
+ if key == "aesthetic_score":
164
+ value_dict["aesthetic_score"] = 6.0
165
+ value_dict["negative_aesthetic_score"] = 2.5
166
+
167
+ if key == "target_size_as_tuple":
168
+ value_dict["target_width"] = init_dict["target_width"]
169
+ value_dict["target_height"] = init_dict["target_height"]
170
+
171
+ if key in ["fps_id", "fps"]:
172
+ fps = st.number_input("fps", value=6, min_value=1)
173
+
174
+ value_dict["fps"] = fps
175
+ value_dict["fps_id"] = fps - 1
176
+
177
+ if key == "motion_bucket_id":
178
+ mb_id = st.number_input("motion bucket id", 0, 511, value=127)
179
+ value_dict["motion_bucket_id"] = mb_id
180
+
181
+ if key == "pool_image":
182
+ st.text("Image for pool conditioning")
183
+ image = load_img(
184
+ key="pool_image_input",
185
+ size=224,
186
+ center_crop=True,
187
+ )
188
+ if image is None:
189
+ st.info("Need an image here")
190
+ image = torch.zeros(1, 3, 224, 224)
191
+ value_dict["pool_image"] = image
192
+
193
+ return value_dict
194
+
195
+
196
+ def perform_save_locally(save_path, samples):
197
+ os.makedirs(os.path.join(save_path), exist_ok=True)
198
+ base_count = len(os.listdir(os.path.join(save_path)))
199
+ samples = embed_watermark(samples)
200
+ for sample in samples:
201
+ sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c")
202
+ Image.fromarray(sample.astype(np.uint8)).save(
203
+ os.path.join(save_path, f"{base_count:09}.png")
204
+ )
205
+ base_count += 1
206
+
207
+
208
+ def init_save_locally(_dir, init_value: bool = False):
209
+ save_locally = st.sidebar.checkbox("Save images locally", value=init_value)
210
+ if save_locally:
211
+ save_path = st.text_input("Save path", value=os.path.join(_dir, "samples"))
212
+ else:
213
+ save_path = None
214
+
215
+ return save_locally, save_path
216
+
217
+
218
+ def get_guider(options, key):
219
+ guider = st.sidebar.selectbox(
220
+ f"Discretization #{key}",
221
+ [
222
+ "VanillaCFG",
223
+ "IdentityGuider",
224
+ "LinearPredictionGuider",
225
+ ],
226
+ options.get("guider", 0),
227
+ )
228
+
229
+ additional_guider_kwargs = options.pop("additional_guider_kwargs", {})
230
+
231
+ if guider == "IdentityGuider":
232
+ guider_config = {
233
+ "target": "sgm.modules.diffusionmodules.guiders.IdentityGuider"
234
+ }
235
+ elif guider == "VanillaCFG":
236
+ scale = st.number_input(
237
+ f"cfg-scale #{key}",
238
+ value=options.get("cfg", 5.0),
239
+ min_value=0.0,
240
+ )
241
+
242
+ guider_config = {
243
+ "target": "sgm.modules.diffusionmodules.guiders.VanillaCFG",
244
+ "params": {
245
+ "scale": scale,
246
+ **additional_guider_kwargs,
247
+ },
248
+ }
249
+ elif guider == "LinearPredictionGuider":
250
+ max_scale = st.number_input(
251
+ f"max-cfg-scale #{key}",
252
+ value=options.get("cfg", 1.5),
253
+ min_value=1.0,
254
+ )
255
+ min_scale = st.number_input(
256
+ f"min guidance scale",
257
+ value=options.get("min_cfg", 1.0),
258
+ min_value=1.0,
259
+ max_value=10.0,
260
+ )
261
+
262
+ guider_config = {
263
+ "target": "sgm.modules.diffusionmodules.guiders.LinearPredictionGuider",
264
+ "params": {
265
+ "max_scale": max_scale,
266
+ "min_scale": min_scale,
267
+ "num_frames": options["num_frames"],
268
+ **additional_guider_kwargs,
269
+ },
270
+ }
271
+ else:
272
+ raise NotImplementedError
273
+ return guider_config
274
+
275
+
276
+ def init_sampling(
277
+ key=1,
278
+ img2img_strength: Optional[float] = None,
279
+ specify_num_samples: bool = True,
280
+ stage2strength: Optional[float] = None,
281
+ options: Optional[Dict[str, int]] = None,
282
+ ):
283
+ options = {} if options is None else options
284
+
285
+ num_rows, num_cols = 1, 1
286
+ if specify_num_samples:
287
+ num_cols = st.number_input(
288
+ f"num cols #{key}", value=num_cols, min_value=1, max_value=10
289
+ )
290
+
291
+ steps = st.sidebar.number_input(
292
+ f"steps #{key}", value=options.get("num_steps", 40), min_value=1, max_value=1000
293
+ )
294
+ sampler = st.sidebar.selectbox(
295
+ f"Sampler #{key}",
296
+ [
297
+ "EulerEDMSampler",
298
+ "HeunEDMSampler",
299
+ "EulerAncestralSampler",
300
+ "DPMPP2SAncestralSampler",
301
+ "DPMPP2MSampler",
302
+ "LinearMultistepSampler",
303
+ ],
304
+ options.get("sampler", 0),
305
+ )
306
+ discretization = st.sidebar.selectbox(
307
+ f"Discretization #{key}",
308
+ [
309
+ "LegacyDDPMDiscretization",
310
+ "EDMDiscretization",
311
+ ],
312
+ options.get("discretization", 0),
313
+ )
314
+
315
+ discretization_config = get_discretization(discretization, options=options, key=key)
316
+
317
+ guider_config = get_guider(options=options, key=key)
318
+
319
+ sampler = get_sampler(sampler, steps, discretization_config, guider_config, key=key)
320
+ if img2img_strength is not None:
321
+ st.warning(
322
+ f"Wrapping {sampler.__class__.__name__} with Img2ImgDiscretizationWrapper"
323
+ )
324
+ sampler.discretization = Img2ImgDiscretizationWrapper(
325
+ sampler.discretization, strength=img2img_strength
326
+ )
327
+ if stage2strength is not None:
328
+ sampler.discretization = Txt2NoisyDiscretizationWrapper(
329
+ sampler.discretization, strength=stage2strength, original_steps=steps
330
+ )
331
+ return sampler, num_rows, num_cols
332
+
333
+
334
+ def get_discretization(discretization, options, key=1):
335
+ if discretization == "LegacyDDPMDiscretization":
336
+ discretization_config = {
337
+ "target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization",
338
+ }
339
+ elif discretization == "EDMDiscretization":
340
+ sigma_min = st.number_input(
341
+ f"sigma_min #{key}", value=options.get("sigma_min", 0.03)
342
+ ) # 0.0292
343
+ sigma_max = st.number_input(
344
+ f"sigma_max #{key}", value=options.get("sigma_max", 14.61)
345
+ ) # 14.6146
346
+ rho = st.number_input(f"rho #{key}", value=options.get("rho", 3.0))
347
+ discretization_config = {
348
+ "target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization",
349
+ "params": {
350
+ "sigma_min": sigma_min,
351
+ "sigma_max": sigma_max,
352
+ "rho": rho,
353
+ },
354
+ }
355
+
356
+ return discretization_config
357
+
358
+
359
+ def get_sampler(sampler_name, steps, discretization_config, guider_config, key=1):
360
+ if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler":
361
+ s_churn = st.sidebar.number_input(f"s_churn #{key}", value=0.0, min_value=0.0)
362
+ s_tmin = st.sidebar.number_input(f"s_tmin #{key}", value=0.0, min_value=0.0)
363
+ s_tmax = st.sidebar.number_input(f"s_tmax #{key}", value=999.0, min_value=0.0)
364
+ s_noise = st.sidebar.number_input(f"s_noise #{key}", value=1.0, min_value=0.0)
365
+
366
+ if sampler_name == "EulerEDMSampler":
367
+ sampler = EulerEDMSampler(
368
+ num_steps=steps,
369
+ discretization_config=discretization_config,
370
+ guider_config=guider_config,
371
+ s_churn=s_churn,
372
+ s_tmin=s_tmin,
373
+ s_tmax=s_tmax,
374
+ s_noise=s_noise,
375
+ verbose=True,
376
+ )
377
+ elif sampler_name == "HeunEDMSampler":
378
+ sampler = HeunEDMSampler(
379
+ num_steps=steps,
380
+ discretization_config=discretization_config,
381
+ guider_config=guider_config,
382
+ s_churn=s_churn,
383
+ s_tmin=s_tmin,
384
+ s_tmax=s_tmax,
385
+ s_noise=s_noise,
386
+ verbose=True,
387
+ )
388
+ elif (
389
+ sampler_name == "EulerAncestralSampler"
390
+ or sampler_name == "DPMPP2SAncestralSampler"
391
+ ):
392
+ s_noise = st.sidebar.number_input("s_noise", value=1.0, min_value=0.0)
393
+ eta = st.sidebar.number_input("eta", value=1.0, min_value=0.0)
394
+
395
+ if sampler_name == "EulerAncestralSampler":
396
+ sampler = EulerAncestralSampler(
397
+ num_steps=steps,
398
+ discretization_config=discretization_config,
399
+ guider_config=guider_config,
400
+ eta=eta,
401
+ s_noise=s_noise,
402
+ verbose=True,
403
+ )
404
+ elif sampler_name == "DPMPP2SAncestralSampler":
405
+ sampler = DPMPP2SAncestralSampler(
406
+ num_steps=steps,
407
+ discretization_config=discretization_config,
408
+ guider_config=guider_config,
409
+ eta=eta,
410
+ s_noise=s_noise,
411
+ verbose=True,
412
+ )
413
+ elif sampler_name == "DPMPP2MSampler":
414
+ sampler = DPMPP2MSampler(
415
+ num_steps=steps,
416
+ discretization_config=discretization_config,
417
+ guider_config=guider_config,
418
+ verbose=True,
419
+ )
420
+ elif sampler_name == "LinearMultistepSampler":
421
+ order = st.sidebar.number_input("order", value=4, min_value=1)
422
+ sampler = LinearMultistepSampler(
423
+ num_steps=steps,
424
+ discretization_config=discretization_config,
425
+ guider_config=guider_config,
426
+ order=order,
427
+ verbose=True,
428
+ )
429
+ else:
430
+ raise ValueError(f"unknown sampler {sampler_name}!")
431
+
432
+ return sampler
433
+
434
+
435
+ def get_interactive_image() -> Image.Image:
436
+ image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
437
+ if image is not None:
438
+ image = Image.open(image)
439
+ if not image.mode == "RGB":
440
+ image = image.convert("RGB")
441
+ return image
442
+
443
+
444
+ def load_img(
445
+ display: bool = True,
446
+ size: Union[None, int, Tuple[int, int]] = None,
447
+ center_crop: bool = False,
448
+ ):
449
+ image = get_interactive_image()
450
+ if image is None:
451
+ return None
452
+ if display:
453
+ st.image(image)
454
+ w, h = image.size
455
+ print(f"loaded input image of size ({w}, {h})")
456
+
457
+ transform = []
458
+ if size is not None:
459
+ transform.append(transforms.Resize(size))
460
+ if center_crop:
461
+ transform.append(transforms.CenterCrop(size))
462
+ transform.append(transforms.ToTensor())
463
+ transform.append(transforms.Lambda(lambda x: 2.0 * x - 1.0))
464
+
465
+ transform = transforms.Compose(transform)
466
+ img = transform(image)[None, ...]
467
+ st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}")
468
+ return img
469
+
470
+
471
+ def get_init_img(batch_size=1, key=None):
472
+ init_image = load_img(key=key).cuda()
473
+ init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
474
+ return init_image
475
+
476
+
477
+ def do_sample(
478
+ model,
479
+ sampler,
480
+ value_dict,
481
+ num_samples,
482
+ H,
483
+ W,
484
+ C,
485
+ F,
486
+ force_uc_zero_embeddings: Optional[List] = None,
487
+ force_cond_zero_embeddings: Optional[List] = None,
488
+ batch2model_input: List = None,
489
+ return_latents=False,
490
+ filter=None,
491
+ T=None,
492
+ additional_batch_uc_fields=None,
493
+ decoding_t=None,
494
+ ):
495
+ force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
496
+ batch2model_input = default(batch2model_input, [])
497
+ additional_batch_uc_fields = default(additional_batch_uc_fields, [])
498
+
499
+ st.text("Sampling")
500
+
501
+ outputs = st.empty()
502
+ precision_scope = autocast
503
+ with torch.no_grad():
504
+ with precision_scope("cuda"):
505
+ with model.ema_scope():
506
+ if T is not None:
507
+ num_samples = [num_samples, T]
508
+ else:
509
+ num_samples = [num_samples]
510
+
511
+ load_model(model.conditioner)
512
+ batch, batch_uc = get_batch(
513
+ get_unique_embedder_keys_from_conditioner(model.conditioner),
514
+ value_dict,
515
+ num_samples,
516
+ T=T,
517
+ additional_batch_uc_fields=additional_batch_uc_fields,
518
+ )
519
+
520
+ c, uc = model.conditioner.get_unconditional_conditioning(
521
+ batch,
522
+ batch_uc=batch_uc,
523
+ force_uc_zero_embeddings=force_uc_zero_embeddings,
524
+ force_cond_zero_embeddings=force_cond_zero_embeddings,
525
+ )
526
+ unload_model(model.conditioner)
527
+
528
+ for k in c:
529
+ if not k == "crossattn":
530
+ c[k], uc[k] = map(
531
+ lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
532
+ )
533
+ if k in ["crossattn", "concat"] and T is not None:
534
+ uc[k] = repeat(uc[k], "b ... -> b t ...", t=T)
535
+ uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=T)
536
+ c[k] = repeat(c[k], "b ... -> b t ...", t=T)
537
+ c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=T)
538
+
539
+ additional_model_inputs = {}
540
+ for k in batch2model_input:
541
+ if k == "image_only_indicator":
542
+ assert T is not None
543
+
544
+ if isinstance(
545
+ sampler.guider, (VanillaCFG, LinearPredictionGuider)
546
+ ):
547
+ additional_model_inputs[k] = torch.zeros(
548
+ num_samples[0] * 2, num_samples[1]
549
+ ).to("cuda")
550
+ else:
551
+ additional_model_inputs[k] = torch.zeros(num_samples).to(
552
+ "cuda"
553
+ )
554
+ else:
555
+ additional_model_inputs[k] = batch[k]
556
+
557
+ shape = (math.prod(num_samples), C, H // F, W // F)
558
+ randn = torch.randn(shape).to("cuda")
559
+
560
+ def denoiser(input, sigma, c):
561
+ return model.denoiser(
562
+ model.model, input, sigma, c, **additional_model_inputs
563
+ )
564
+
565
+ load_model(model.denoiser)
566
+ load_model(model.model)
567
+ samples_z = sampler(denoiser, randn, cond=c, uc=uc)
568
+ unload_model(model.model)
569
+ unload_model(model.denoiser)
570
+
571
+ load_model(model.first_stage_model)
572
+ model.en_and_decode_n_samples_a_time = (
573
+ decoding_t # Decode n frames at a time
574
+ )
575
+ samples_x = model.decode_first_stage(samples_z)
576
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
577
+ unload_model(model.first_stage_model)
578
+
579
+ if filter is not None:
580
+ samples = filter(samples)
581
+
582
+ if T is None:
583
+ grid = torch.stack([samples])
584
+ grid = rearrange(grid, "n b c h w -> (n h) (b w) c")
585
+ outputs.image(grid.cpu().numpy())
586
+ else:
587
+ as_vids = rearrange(samples, "(b t) c h w -> b t c h w", t=T)
588
+ for i, vid in enumerate(as_vids):
589
+ grid = rearrange(make_grid(vid, nrow=4), "c h w -> h w c")
590
+ st.image(
591
+ grid.cpu().numpy(),
592
+ f"Sample #{i} as image",
593
+ )
594
+
595
+ if return_latents:
596
+ return samples, samples_z
597
+ return samples
598
+
599
+
600
+ def get_batch(
601
+ keys,
602
+ value_dict: dict,
603
+ N: Union[List, ListConfig],
604
+ device: str = "cuda",
605
+ T: int = None,
606
+ additional_batch_uc_fields: List[str] = [],
607
+ ):
608
+ # Hardcoded demo setups; might undergo some changes in the future
609
+
610
+ batch = {}
611
+ batch_uc = {}
612
+
613
+ for key in keys:
614
+ if key == "txt":
615
+ batch["txt"] = [value_dict["prompt"]] * math.prod(N)
616
+
617
+ batch_uc["txt"] = [value_dict["negative_prompt"]] * math.prod(N)
618
+
619
+ elif key == "original_size_as_tuple":
620
+ batch["original_size_as_tuple"] = (
621
+ torch.tensor([value_dict["orig_height"], value_dict["orig_width"]])
622
+ .to(device)
623
+ .repeat(math.prod(N), 1)
624
+ )
625
+ elif key == "crop_coords_top_left":
626
+ batch["crop_coords_top_left"] = (
627
+ torch.tensor(
628
+ [value_dict["crop_coords_top"], value_dict["crop_coords_left"]]
629
+ )
630
+ .to(device)
631
+ .repeat(math.prod(N), 1)
632
+ )
633
+ elif key == "aesthetic_score":
634
+ batch["aesthetic_score"] = (
635
+ torch.tensor([value_dict["aesthetic_score"]])
636
+ .to(device)
637
+ .repeat(math.prod(N), 1)
638
+ )
639
+ batch_uc["aesthetic_score"] = (
640
+ torch.tensor([value_dict["negative_aesthetic_score"]])
641
+ .to(device)
642
+ .repeat(math.prod(N), 1)
643
+ )
644
+
645
+ elif key == "target_size_as_tuple":
646
+ batch["target_size_as_tuple"] = (
647
+ torch.tensor([value_dict["target_height"], value_dict["target_width"]])
648
+ .to(device)
649
+ .repeat(math.prod(N), 1)
650
+ )
651
+ elif key == "fps":
652
+ batch[key] = (
653
+ torch.tensor([value_dict["fps"]]).to(device).repeat(math.prod(N))
654
+ )
655
+ elif key == "fps_id":
656
+ batch[key] = (
657
+ torch.tensor([value_dict["fps_id"]]).to(device).repeat(math.prod(N))
658
+ )
659
+ elif key == "motion_bucket_id":
660
+ batch[key] = (
661
+ torch.tensor([value_dict["motion_bucket_id"]])
662
+ .to(device)
663
+ .repeat(math.prod(N))
664
+ )
665
+ elif key == "pool_image":
666
+ batch[key] = repeat(value_dict[key], "1 ... -> b ...", b=math.prod(N)).to(
667
+ device, dtype=torch.half
668
+ )
669
+ elif key == "cond_aug":
670
+ batch[key] = repeat(
671
+ torch.tensor([value_dict["cond_aug"]]).to("cuda"),
672
+ "1 -> b",
673
+ b=math.prod(N),
674
+ )
675
+ elif key == "cond_frames":
676
+ batch[key] = repeat(value_dict["cond_frames"], "1 ... -> b ...", b=N[0])
677
+ elif key == "cond_frames_without_noise":
678
+ batch[key] = repeat(
679
+ value_dict["cond_frames_without_noise"], "1 ... -> b ...", b=N[0]
680
+ )
681
+ else:
682
+ batch[key] = value_dict[key]
683
+
684
+ if T is not None:
685
+ batch["num_video_frames"] = T
686
+
687
+ for key in batch.keys():
688
+ if key not in batch_uc and isinstance(batch[key], torch.Tensor):
689
+ batch_uc[key] = torch.clone(batch[key])
690
+ elif key in additional_batch_uc_fields and key not in batch_uc:
691
+ batch_uc[key] = copy.copy(batch[key])
692
+ return batch, batch_uc
693
+
694
+
695
+ @torch.no_grad()
696
+ def do_img2img(
697
+ img,
698
+ model,
699
+ sampler,
700
+ value_dict,
701
+ num_samples,
702
+ force_uc_zero_embeddings: Optional[List] = None,
703
+ force_cond_zero_embeddings: Optional[List] = None,
704
+ additional_kwargs={},
705
+ offset_noise_level: int = 0.0,
706
+ return_latents=False,
707
+ skip_encode=False,
708
+ filter=None,
709
+ add_noise=True,
710
+ ):
711
+ st.text("Sampling")
712
+
713
+ outputs = st.empty()
714
+ precision_scope = autocast
715
+ with torch.no_grad():
716
+ with precision_scope("cuda"):
717
+ with model.ema_scope():
718
+ load_model(model.conditioner)
719
+ batch, batch_uc = get_batch(
720
+ get_unique_embedder_keys_from_conditioner(model.conditioner),
721
+ value_dict,
722
+ [num_samples],
723
+ )
724
+ c, uc = model.conditioner.get_unconditional_conditioning(
725
+ batch,
726
+ batch_uc=batch_uc,
727
+ force_uc_zero_embeddings=force_uc_zero_embeddings,
728
+ force_cond_zero_embeddings=force_cond_zero_embeddings,
729
+ )
730
+ unload_model(model.conditioner)
731
+ for k in c:
732
+ c[k], uc[k] = map(lambda y: y[k][:num_samples].to("cuda"), (c, uc))
733
+
734
+ for k in additional_kwargs:
735
+ c[k] = uc[k] = additional_kwargs[k]
736
+ if skip_encode:
737
+ z = img
738
+ else:
739
+ load_model(model.first_stage_model)
740
+ z = model.encode_first_stage(img)
741
+ unload_model(model.first_stage_model)
742
+
743
+ noise = torch.randn_like(z)
744
+
745
+ sigmas = sampler.discretization(sampler.num_steps).cuda()
746
+ sigma = sigmas[0]
747
+
748
+ st.info(f"all sigmas: {sigmas}")
749
+ st.info(f"noising sigma: {sigma}")
750
+ if offset_noise_level > 0.0:
751
+ noise = noise + offset_noise_level * append_dims(
752
+ torch.randn(z.shape[0], device=z.device), z.ndim
753
+ )
754
+ if add_noise:
755
+ noised_z = z + noise * append_dims(sigma, z.ndim).cuda()
756
+ noised_z = noised_z / torch.sqrt(
757
+ 1.0 + sigmas[0] ** 2.0
758
+ ) # Note: hardcoded to DDPM-like scaling. need to generalize later.
759
+ else:
760
+ noised_z = z / torch.sqrt(1.0 + sigmas[0] ** 2.0)
761
+
762
+ def denoiser(x, sigma, c):
763
+ return model.denoiser(model.model, x, sigma, c)
764
+
765
+ load_model(model.denoiser)
766
+ load_model(model.model)
767
+ samples_z = sampler(denoiser, noised_z, cond=c, uc=uc)
768
+ unload_model(model.model)
769
+ unload_model(model.denoiser)
770
+
771
+ load_model(model.first_stage_model)
772
+ samples_x = model.decode_first_stage(samples_z)
773
+ unload_model(model.first_stage_model)
774
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
775
+
776
+ if filter is not None:
777
+ samples = filter(samples)
778
+
779
+ grid = rearrange(grid, "n b c h w -> (n h) (b w) c")
780
+ outputs.image(grid.cpu().numpy())
781
+ if return_latents:
782
+ return samples, samples_z
783
+ return samples
784
+
785
+
786
+ def get_resizing_factor(
787
+ desired_shape: Tuple[int, int], current_shape: Tuple[int, int]
788
+ ) -> float:
789
+ r_bound = desired_shape[1] / desired_shape[0]
790
+ aspect_r = current_shape[1] / current_shape[0]
791
+ if r_bound >= 1.0:
792
+ if aspect_r >= r_bound:
793
+ factor = min(desired_shape) / min(current_shape)
794
+ else:
795
+ if aspect_r < 1.0:
796
+ factor = max(desired_shape) / min(current_shape)
797
+ else:
798
+ factor = max(desired_shape) / max(current_shape)
799
+ else:
800
+ if aspect_r <= r_bound:
801
+ factor = min(desired_shape) / min(current_shape)
802
+ else:
803
+ if aspect_r > 1:
804
+ factor = max(desired_shape) / min(current_shape)
805
+ else:
806
+ factor = max(desired_shape) / max(current_shape)
807
+
808
+ return factor
809
+
810
+
811
+ def get_interactive_image(key=None) -> Image.Image:
812
+ image = st.file_uploader("Input", type=["jpg", "JPEG", "png"], key=key)
813
+ if image is not None:
814
+ image = Image.open(image)
815
+ if not image.mode == "RGB":
816
+ image = image.convert("RGB")
817
+ return image
818
+
819
+
820
+ def load_img_for_prediction(
821
+ W: int, H: int, display=True, key=None, device="cuda"
822
+ ) -> torch.Tensor:
823
+ image = get_interactive_image(key=key)
824
+ if image is None:
825
+ return None
826
+ if display:
827
+ st.image(image)
828
+ w, h = image.size
829
+
830
+ image = np.array(image).transpose(2, 0, 1)
831
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 255.0
832
+ image = image.unsqueeze(0)
833
+
834
+ rfs = get_resizing_factor((H, W), (h, w))
835
+ resize_size = [int(np.ceil(rfs * s)) for s in (h, w)]
836
+ top = (resize_size[0] - H) // 2
837
+ left = (resize_size[1] - W) // 2
838
+
839
+ image = torch.nn.functional.interpolate(
840
+ image, resize_size, mode="area", antialias=False
841
+ )
842
+ image = TT.functional.crop(image, top=top, left=left, height=H, width=W)
843
+
844
+ if display:
845
+ numpy_img = np.transpose(image[0].numpy(), (1, 2, 0))
846
+ pil_image = Image.fromarray((numpy_img * 255).astype(np.uint8))
847
+ st.image(pil_image)
848
+ return image.to(device) * 2.0 - 1.0
849
+
850
+
851
+ def save_video_as_grid_and_mp4(
852
+ video_batch: torch.Tensor, save_path: str, T: int, fps: int = 5
853
+ ):
854
+ os.makedirs(save_path, exist_ok=True)
855
+ base_count = len(glob(os.path.join(save_path, "*.mp4")))
856
+
857
+ video_batch = rearrange(video_batch, "(b t) c h w -> b t c h w", t=T)
858
+ video_batch = embed_watermark(video_batch)
859
+ for vid in video_batch:
860
+ save_image(vid, fp=os.path.join(save_path, f"{base_count:06d}.png"), nrow=4)
861
+
862
+ video_path = os.path.join(save_path, f"{base_count:06d}.mp4")
863
+
864
+ writer = cv2.VideoWriter(
865
+ video_path,
866
+ cv2.VideoWriter_fourcc(*"MP4V"),
867
+ fps,
868
+ (vid.shape[-1], vid.shape[-2]),
869
+ )
870
+
871
+ vid = (
872
+ (rearrange(vid, "t c h w -> t h w c") * 255).cpu().numpy().astype(np.uint8)
873
+ )
874
+ for frame in vid:
875
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
876
+ writer.write(frame)
877
+
878
+ writer.release()
879
+
880
+ video_path_h264 = video_path[:-4] + "_h264.mp4"
881
+ os.system(f"ffmpeg -i {video_path} -c:v libx264 {video_path_h264}")
882
+
883
+ with open(video_path_h264, "rb") as f:
884
+ video_bytes = f.read()
885
+ st.video(video_bytes)
886
+
887
+ base_count += 1
generative_models/scripts/demo/turbo.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from streamlit_helpers import *
2
+ from st_keyup import st_keyup
3
+ from sgm.modules.diffusionmodules.sampling import EulerAncestralSampler
4
+
5
+ VERSION2SPECS = {
6
+ "SDXL-Turbo": {
7
+ "H": 512,
8
+ "W": 512,
9
+ "C": 4,
10
+ "f": 8,
11
+ "is_legacy": False,
12
+ "config": "configs/inference/sd_xl_base.yaml",
13
+ "ckpt": "checkpoints/sd_xl_turbo_1.0.safetensors",
14
+ },
15
+ "SD-Turbo": {
16
+ "H": 512,
17
+ "W": 512,
18
+ "C": 4,
19
+ "f": 8,
20
+ "is_legacy": False,
21
+ "config": "configs/inference/sd_2_1.yaml",
22
+ "ckpt": "checkpoints/sd_turbo.safetensors",
23
+ },
24
+ }
25
+
26
+
27
+ class SubstepSampler(EulerAncestralSampler):
28
+ def __init__(self, n_sample_steps=1, *args, **kwargs):
29
+ super().__init__(*args, **kwargs)
30
+ self.n_sample_steps = n_sample_steps
31
+ self.steps_subset = [0, 100, 200, 300, 1000]
32
+
33
+ def prepare_sampling_loop(self, x, cond, uc=None, num_steps=None):
34
+ sigmas = self.discretization(
35
+ self.num_steps if num_steps is None else num_steps, device=self.device
36
+ )
37
+ sigmas = sigmas[
38
+ self.steps_subset[: self.n_sample_steps] + self.steps_subset[-1:]
39
+ ]
40
+ uc = cond
41
+ x *= torch.sqrt(1.0 + sigmas[0] ** 2.0)
42
+ num_sigmas = len(sigmas)
43
+ s_in = x.new_ones([x.shape[0]])
44
+ return x, s_in, sigmas, num_sigmas, cond, uc
45
+
46
+
47
+ def seeded_randn(shape, seed):
48
+ randn = np.random.RandomState(seed).randn(*shape)
49
+ randn = torch.from_numpy(randn).to(device="cuda", dtype=torch.float32)
50
+ return randn
51
+
52
+
53
+ class SeededNoise:
54
+ def __init__(self, seed):
55
+ self.seed = seed
56
+
57
+ def __call__(self, x):
58
+ self.seed = self.seed + 1
59
+ return seeded_randn(x.shape, self.seed)
60
+
61
+
62
+ def init_embedder_options(keys, init_dict, prompt=None, negative_prompt=None):
63
+ value_dict = {}
64
+ for key in keys:
65
+ if key == "txt":
66
+ value_dict["prompt"] = prompt
67
+ value_dict["negative_prompt"] = ""
68
+
69
+ if key == "original_size_as_tuple":
70
+ orig_width = init_dict["orig_width"]
71
+ orig_height = init_dict["orig_height"]
72
+
73
+ value_dict["orig_width"] = orig_width
74
+ value_dict["orig_height"] = orig_height
75
+
76
+ if key == "crop_coords_top_left":
77
+ crop_coord_top = 0
78
+ crop_coord_left = 0
79
+
80
+ value_dict["crop_coords_top"] = crop_coord_top
81
+ value_dict["crop_coords_left"] = crop_coord_left
82
+
83
+ if key == "aesthetic_score":
84
+ value_dict["aesthetic_score"] = 6.0
85
+ value_dict["negative_aesthetic_score"] = 2.5
86
+
87
+ if key == "target_size_as_tuple":
88
+ value_dict["target_width"] = init_dict["target_width"]
89
+ value_dict["target_height"] = init_dict["target_height"]
90
+
91
+ return value_dict
92
+
93
+
94
+ def sample(
95
+ model,
96
+ sampler,
97
+ prompt="A lush garden with oversized flowers and vibrant colors, inhabited by miniature animals.",
98
+ H=1024,
99
+ W=1024,
100
+ seed=0,
101
+ filter=None,
102
+ ):
103
+ F = 8
104
+ C = 4
105
+ shape = (1, C, H // F, W // F)
106
+
107
+ value_dict = init_embedder_options(
108
+ keys=get_unique_embedder_keys_from_conditioner(model.conditioner),
109
+ init_dict={
110
+ "orig_width": W,
111
+ "orig_height": H,
112
+ "target_width": W,
113
+ "target_height": H,
114
+ },
115
+ prompt=prompt,
116
+ )
117
+
118
+ if seed is None:
119
+ seed = torch.seed()
120
+ precision_scope = autocast
121
+ with torch.no_grad():
122
+ with precision_scope("cuda"):
123
+ batch, batch_uc = get_batch(
124
+ get_unique_embedder_keys_from_conditioner(model.conditioner),
125
+ value_dict,
126
+ [1],
127
+ )
128
+ c = model.conditioner(batch)
129
+ uc = None
130
+ randn = seeded_randn(shape, seed)
131
+
132
+ def denoiser(input, sigma, c):
133
+ return model.denoiser(
134
+ model.model,
135
+ input,
136
+ sigma,
137
+ c,
138
+ )
139
+
140
+ samples_z = sampler(denoiser, randn, cond=c, uc=uc)
141
+ samples_x = model.decode_first_stage(samples_z)
142
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
143
+ if filter is not None:
144
+ samples = filter(samples)
145
+ samples = (
146
+ (255 * samples)
147
+ .to(dtype=torch.uint8)
148
+ .permute(0, 2, 3, 1)
149
+ .detach()
150
+ .cpu()
151
+ .numpy()
152
+ )
153
+ return samples
154
+
155
+
156
+ def v_spacer(height) -> None:
157
+ for _ in range(height):
158
+ st.write("\n")
159
+
160
+
161
+ if __name__ == "__main__":
162
+ st.title("Turbo")
163
+
164
+ head_cols = st.columns([1, 1, 1])
165
+ with head_cols[0]:
166
+ version = st.selectbox("Model Version", list(VERSION2SPECS.keys()), 0)
167
+ version_dict = VERSION2SPECS[version]
168
+
169
+ with head_cols[1]:
170
+ v_spacer(2)
171
+ if st.checkbox("Load Model"):
172
+ mode = "txt2img"
173
+ else:
174
+ mode = "skip"
175
+
176
+ if mode != "skip":
177
+ state = init_st(version_dict, load_filter=True)
178
+ if state["msg"]:
179
+ st.info(state["msg"])
180
+ model = state["model"]
181
+ load_model(model)
182
+
183
+ # seed
184
+ if "seed" not in st.session_state:
185
+ st.session_state.seed = 0
186
+
187
+ def increment_counter():
188
+ st.session_state.seed += 1
189
+
190
+ def decrement_counter():
191
+ if st.session_state.seed > 0:
192
+ st.session_state.seed -= 1
193
+
194
+ with head_cols[2]:
195
+ n_steps = st.number_input(label="number of steps", min_value=1, max_value=4)
196
+
197
+ sampler = SubstepSampler(
198
+ n_sample_steps=1,
199
+ num_steps=1000,
200
+ eta=1.0,
201
+ discretization_config=dict(
202
+ target="sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization"
203
+ ),
204
+ )
205
+ sampler.n_sample_steps = n_steps
206
+ default_prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe."
207
+ prompt = st_keyup("Enter a value", value=default_prompt, debounce=300, key="interactive_text")
208
+
209
+ cols = st.columns([1, 5, 1])
210
+ if mode != "skip":
211
+ with cols[0]:
212
+ v_spacer(14)
213
+ st.button("↩", on_click=decrement_counter)
214
+ with cols[2]:
215
+ v_spacer(14)
216
+ st.button("↪", on_click=increment_counter)
217
+
218
+ sampler.noise_sampler = SeededNoise(seed=st.session_state.seed)
219
+ out = sample(
220
+ model, sampler, H=512, W=512, seed=st.session_state.seed, prompt=prompt, filter=state.get("filter")
221
+ )
222
+ with cols[1]:
223
+ st.image(out[0])
generative_models/scripts/demo/video_sampling.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from pytorch_lightning import seed_everything
4
+
5
+ from scripts.demo.streamlit_helpers import *
6
+
7
+ SAVE_PATH = "outputs/demo/vid/"
8
+
9
+ VERSION2SPECS = {
10
+ "svd": {
11
+ "T": 14,
12
+ "H": 576,
13
+ "W": 1024,
14
+ "C": 4,
15
+ "f": 8,
16
+ "config": "configs/inference/svd.yaml",
17
+ "ckpt": "checkpoints/svd.safetensors",
18
+ "options": {
19
+ "discretization": 1,
20
+ "cfg": 2.5,
21
+ "sigma_min": 0.002,
22
+ "sigma_max": 700.0,
23
+ "rho": 7.0,
24
+ "guider": 2,
25
+ "force_uc_zero_embeddings": ["cond_frames", "cond_frames_without_noise"],
26
+ "num_steps": 25,
27
+ },
28
+ },
29
+ "svd_image_decoder": {
30
+ "T": 14,
31
+ "H": 576,
32
+ "W": 1024,
33
+ "C": 4,
34
+ "f": 8,
35
+ "config": "configs/inference/svd_image_decoder.yaml",
36
+ "ckpt": "checkpoints/svd_image_decoder.safetensors",
37
+ "options": {
38
+ "discretization": 1,
39
+ "cfg": 2.5,
40
+ "sigma_min": 0.002,
41
+ "sigma_max": 700.0,
42
+ "rho": 7.0,
43
+ "guider": 2,
44
+ "force_uc_zero_embeddings": ["cond_frames", "cond_frames_without_noise"],
45
+ "num_steps": 25,
46
+ },
47
+ },
48
+ "svd_xt": {
49
+ "T": 25,
50
+ "H": 576,
51
+ "W": 1024,
52
+ "C": 4,
53
+ "f": 8,
54
+ "config": "configs/inference/svd.yaml",
55
+ "ckpt": "checkpoints/svd_xt.safetensors",
56
+ "options": {
57
+ "discretization": 1,
58
+ "cfg": 3.0,
59
+ "min_cfg": 1.5,
60
+ "sigma_min": 0.002,
61
+ "sigma_max": 700.0,
62
+ "rho": 7.0,
63
+ "guider": 2,
64
+ "force_uc_zero_embeddings": ["cond_frames", "cond_frames_without_noise"],
65
+ "num_steps": 30,
66
+ "decoding_t": 14,
67
+ },
68
+ },
69
+ "svd_xt_image_decoder": {
70
+ "T": 25,
71
+ "H": 576,
72
+ "W": 1024,
73
+ "C": 4,
74
+ "f": 8,
75
+ "config": "configs/inference/svd_image_decoder.yaml",
76
+ "ckpt": "checkpoints/svd_xt_image_decoder.safetensors",
77
+ "options": {
78
+ "discretization": 1,
79
+ "cfg": 3.0,
80
+ "min_cfg": 1.5,
81
+ "sigma_min": 0.002,
82
+ "sigma_max": 700.0,
83
+ "rho": 7.0,
84
+ "guider": 2,
85
+ "force_uc_zero_embeddings": ["cond_frames", "cond_frames_without_noise"],
86
+ "num_steps": 30,
87
+ "decoding_t": 14,
88
+ },
89
+ },
90
+ }
91
+
92
+
93
+ if __name__ == "__main__":
94
+ st.title("Stable Video Diffusion")
95
+ version = st.selectbox(
96
+ "Model Version",
97
+ [k for k in VERSION2SPECS.keys()],
98
+ 0,
99
+ )
100
+ version_dict = VERSION2SPECS[version]
101
+ if st.checkbox("Load Model"):
102
+ mode = "img2vid"
103
+ else:
104
+ mode = "skip"
105
+
106
+ H = st.sidebar.number_input(
107
+ "H", value=version_dict["H"], min_value=64, max_value=2048
108
+ )
109
+ W = st.sidebar.number_input(
110
+ "W", value=version_dict["W"], min_value=64, max_value=2048
111
+ )
112
+ T = st.sidebar.number_input(
113
+ "T", value=version_dict["T"], min_value=0, max_value=128
114
+ )
115
+ C = version_dict["C"]
116
+ F = version_dict["f"]
117
+ options = version_dict["options"]
118
+
119
+ if mode != "skip":
120
+ state = init_st(version_dict, load_filter=True)
121
+ if state["msg"]:
122
+ st.info(state["msg"])
123
+ model = state["model"]
124
+
125
+ ukeys = set(
126
+ get_unique_embedder_keys_from_conditioner(state["model"].conditioner)
127
+ )
128
+
129
+ value_dict = init_embedder_options(
130
+ ukeys,
131
+ {},
132
+ )
133
+
134
+ value_dict["image_only_indicator"] = 0
135
+
136
+ if mode == "img2vid":
137
+ img = load_img_for_prediction(W, H)
138
+ cond_aug = st.number_input(
139
+ "Conditioning augmentation:", value=0.02, min_value=0.0
140
+ )
141
+ value_dict["cond_frames_without_noise"] = img
142
+ value_dict["cond_frames"] = img + cond_aug * torch.randn_like(img)
143
+ value_dict["cond_aug"] = cond_aug
144
+
145
+ seed = st.sidebar.number_input(
146
+ "seed", value=23, min_value=0, max_value=int(1e9)
147
+ )
148
+ seed_everything(seed)
149
+
150
+ save_locally, save_path = init_save_locally(
151
+ os.path.join(SAVE_PATH, version), init_value=True
152
+ )
153
+
154
+ options["num_frames"] = T
155
+
156
+ sampler, num_rows, num_cols = init_sampling(options=options)
157
+ num_samples = num_rows * num_cols
158
+
159
+ decoding_t = st.number_input(
160
+ "Decode t frames at a time (set small if you are low on VRAM)",
161
+ value=options.get("decoding_t", T),
162
+ min_value=1,
163
+ max_value=int(1e9),
164
+ )
165
+
166
+ if st.checkbox("Overwrite fps in mp4 generator", False):
167
+ saving_fps = st.number_input(
168
+ f"saving video at fps:", value=value_dict["fps"], min_value=1
169
+ )
170
+ else:
171
+ saving_fps = value_dict["fps"]
172
+
173
+ if st.button("Sample"):
174
+ out = do_sample(
175
+ model,
176
+ sampler,
177
+ value_dict,
178
+ num_samples,
179
+ H,
180
+ W,
181
+ C,
182
+ F,
183
+ T=T,
184
+ batch2model_input=["num_video_frames", "image_only_indicator"],
185
+ force_uc_zero_embeddings=options.get("force_uc_zero_embeddings", None),
186
+ force_cond_zero_embeddings=options.get(
187
+ "force_cond_zero_embeddings", None
188
+ ),
189
+ return_latents=False,
190
+ decoding_t=decoding_t,
191
+ )
192
+
193
+ if isinstance(out, (tuple, list)):
194
+ samples, samples_z = out
195
+ else:
196
+ samples = out
197
+ samples_z = None
198
+
199
+ if save_locally:
200
+ save_video_as_grid_and_mp4(samples, save_path, T, fps=saving_fps)
generative_models/scripts/sampling/configs/svd.yaml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+ ckpt_path: checkpoints/svd.safetensors
7
+
8
+ denoiser_config:
9
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
10
+ params:
11
+ scaling_config:
12
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
13
+
14
+ network_config:
15
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
16
+ params:
17
+ adm_in_channels: 768
18
+ num_classes: sequential
19
+ use_checkpoint: True
20
+ in_channels: 8
21
+ out_channels: 4
22
+ model_channels: 320
23
+ attention_resolutions: [4, 2, 1]
24
+ num_res_blocks: 2
25
+ channel_mult: [1, 2, 4, 4]
26
+ num_head_channels: 64
27
+ use_linear_in_transformer: True
28
+ transformer_depth: 1
29
+ context_dim: 1024
30
+ spatial_transformer_attn_type: softmax-xformers
31
+ extra_ff_mix_layer: True
32
+ use_spatial_context: True
33
+ merge_strategy: learned_with_images
34
+ video_kernel_size: [3, 1, 1]
35
+
36
+ conditioner_config:
37
+ target: sgm.modules.GeneralConditioner
38
+ params:
39
+ emb_models:
40
+ - is_trainable: False
41
+ input_key: cond_frames_without_noise
42
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
43
+ params:
44
+ n_cond_frames: 1
45
+ n_copies: 1
46
+ open_clip_embedding_config:
47
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
48
+ params:
49
+ freeze: True
50
+
51
+ - input_key: fps_id
52
+ is_trainable: False
53
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
54
+ params:
55
+ outdim: 256
56
+
57
+ - input_key: motion_bucket_id
58
+ is_trainable: False
59
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - input_key: cond_frames
64
+ is_trainable: False
65
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
66
+ params:
67
+ disable_encoder_autocast: True
68
+ n_cond_frames: 1
69
+ n_copies: 1
70
+ is_ae: True
71
+ encoder_config:
72
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
73
+ params:
74
+ embed_dim: 4
75
+ monitor: val/rec_loss
76
+ ddconfig:
77
+ attn_type: vanilla-xformers
78
+ double_z: True
79
+ z_channels: 4
80
+ resolution: 256
81
+ in_channels: 3
82
+ out_ch: 3
83
+ ch: 128
84
+ ch_mult: [1, 2, 4, 4]
85
+ num_res_blocks: 2
86
+ attn_resolutions: []
87
+ dropout: 0.0
88
+ lossconfig:
89
+ target: torch.nn.Identity
90
+
91
+ - input_key: cond_aug
92
+ is_trainable: False
93
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
94
+ params:
95
+ outdim: 256
96
+
97
+ first_stage_config:
98
+ target: sgm.models.autoencoder.AutoencodingEngine
99
+ params:
100
+ loss_config:
101
+ target: torch.nn.Identity
102
+ regularizer_config:
103
+ target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
104
+ encoder_config:
105
+ target: sgm.modules.diffusionmodules.model.Encoder
106
+ params:
107
+ attn_type: vanilla
108
+ double_z: True
109
+ z_channels: 4
110
+ resolution: 256
111
+ in_channels: 3
112
+ out_ch: 3
113
+ ch: 128
114
+ ch_mult: [1, 2, 4, 4]
115
+ num_res_blocks: 2
116
+ attn_resolutions: []
117
+ dropout: 0.0
118
+ decoder_config:
119
+ target: sgm.modules.autoencoding.temporal_ae.VideoDecoder
120
+ params:
121
+ attn_type: vanilla
122
+ double_z: True
123
+ z_channels: 4
124
+ resolution: 256
125
+ in_channels: 3
126
+ out_ch: 3
127
+ ch: 128
128
+ ch_mult: [1, 2, 4, 4]
129
+ num_res_blocks: 2
130
+ attn_resolutions: []
131
+ dropout: 0.0
132
+ video_kernel_size: [3, 1, 1]
133
+
134
+ sampler_config:
135
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
136
+ params:
137
+ discretization_config:
138
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
139
+ params:
140
+ sigma_max: 700.0
141
+
142
+ guider_config:
143
+ target: sgm.modules.diffusionmodules.guiders.LinearPredictionGuider
144
+ params:
145
+ max_scale: 2.5
146
+ min_scale: 1.0
generative_models/scripts/sampling/configs/svd_image_decoder.yaml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+ ckpt_path: checkpoints/svd_image_decoder.safetensors
7
+
8
+ denoiser_config:
9
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
10
+ params:
11
+ scaling_config:
12
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
13
+
14
+ network_config:
15
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
16
+ params:
17
+ adm_in_channels: 768
18
+ num_classes: sequential
19
+ use_checkpoint: True
20
+ in_channels: 8
21
+ out_channels: 4
22
+ model_channels: 320
23
+ attention_resolutions: [4, 2, 1]
24
+ num_res_blocks: 2
25
+ channel_mult: [1, 2, 4, 4]
26
+ num_head_channels: 64
27
+ use_linear_in_transformer: True
28
+ transformer_depth: 1
29
+ context_dim: 1024
30
+ spatial_transformer_attn_type: softmax-xformers
31
+ extra_ff_mix_layer: True
32
+ use_spatial_context: True
33
+ merge_strategy: learned_with_images
34
+ video_kernel_size: [3, 1, 1]
35
+
36
+ conditioner_config:
37
+ target: sgm.modules.GeneralConditioner
38
+ params:
39
+ emb_models:
40
+ - is_trainable: False
41
+ input_key: cond_frames_without_noise
42
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
43
+ params:
44
+ n_cond_frames: 1
45
+ n_copies: 1
46
+ open_clip_embedding_config:
47
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
48
+ params:
49
+ freeze: True
50
+
51
+ - input_key: fps_id
52
+ is_trainable: False
53
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
54
+ params:
55
+ outdim: 256
56
+
57
+ - input_key: motion_bucket_id
58
+ is_trainable: False
59
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - input_key: cond_frames
64
+ is_trainable: False
65
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
66
+ params:
67
+ disable_encoder_autocast: True
68
+ n_cond_frames: 1
69
+ n_copies: 1
70
+ is_ae: True
71
+ encoder_config:
72
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
73
+ params:
74
+ embed_dim: 4
75
+ monitor: val/rec_loss
76
+ ddconfig:
77
+ attn_type: vanilla-xformers
78
+ double_z: True
79
+ z_channels: 4
80
+ resolution: 256
81
+ in_channels: 3
82
+ out_ch: 3
83
+ ch: 128
84
+ ch_mult: [1, 2, 4, 4]
85
+ num_res_blocks: 2
86
+ attn_resolutions: []
87
+ dropout: 0.0
88
+ lossconfig:
89
+ target: torch.nn.Identity
90
+
91
+ - input_key: cond_aug
92
+ is_trainable: False
93
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
94
+ params:
95
+ outdim: 256
96
+
97
+ first_stage_config:
98
+ target: sgm.models.autoencoder.AutoencoderKL
99
+ params:
100
+ embed_dim: 4
101
+ monitor: val/rec_loss
102
+ ddconfig:
103
+ attn_type: vanilla-xformers
104
+ double_z: True
105
+ z_channels: 4
106
+ resolution: 256
107
+ in_channels: 3
108
+ out_ch: 3
109
+ ch: 128
110
+ ch_mult: [1, 2, 4, 4]
111
+ num_res_blocks: 2
112
+ attn_resolutions: []
113
+ dropout: 0.0
114
+ lossconfig:
115
+ target: torch.nn.Identity
116
+
117
+ sampler_config:
118
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
119
+ params:
120
+ discretization_config:
121
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
122
+ params:
123
+ sigma_max: 700.0
124
+
125
+ guider_config:
126
+ target: sgm.modules.diffusionmodules.guiders.LinearPredictionGuider
127
+ params:
128
+ max_scale: 2.5
129
+ min_scale: 1.0
generative_models/scripts/sampling/configs/svd_xt.yaml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+ ckpt_path: checkpoints/svd_xt.safetensors
7
+
8
+ denoiser_config:
9
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
10
+ params:
11
+ scaling_config:
12
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
13
+
14
+ network_config:
15
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
16
+ params:
17
+ adm_in_channels: 768
18
+ num_classes: sequential
19
+ use_checkpoint: True
20
+ in_channels: 8
21
+ out_channels: 4
22
+ model_channels: 320
23
+ attention_resolutions: [4, 2, 1]
24
+ num_res_blocks: 2
25
+ channel_mult: [1, 2, 4, 4]
26
+ num_head_channels: 64
27
+ use_linear_in_transformer: True
28
+ transformer_depth: 1
29
+ context_dim: 1024
30
+ spatial_transformer_attn_type: softmax-xformers
31
+ extra_ff_mix_layer: True
32
+ use_spatial_context: True
33
+ merge_strategy: learned_with_images
34
+ video_kernel_size: [3, 1, 1]
35
+
36
+ conditioner_config:
37
+ target: sgm.modules.GeneralConditioner
38
+ params:
39
+ emb_models:
40
+ - is_trainable: False
41
+ input_key: cond_frames_without_noise
42
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
43
+ params:
44
+ n_cond_frames: 1
45
+ n_copies: 1
46
+ open_clip_embedding_config:
47
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
48
+ params:
49
+ freeze: True
50
+
51
+ - input_key: fps_id
52
+ is_trainable: False
53
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
54
+ params:
55
+ outdim: 256
56
+
57
+ - input_key: motion_bucket_id
58
+ is_trainable: False
59
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - input_key: cond_frames
64
+ is_trainable: False
65
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
66
+ params:
67
+ disable_encoder_autocast: True
68
+ n_cond_frames: 1
69
+ n_copies: 1
70
+ is_ae: True
71
+ encoder_config:
72
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
73
+ params:
74
+ embed_dim: 4
75
+ monitor: val/rec_loss
76
+ ddconfig:
77
+ attn_type: vanilla-xformers
78
+ double_z: True
79
+ z_channels: 4
80
+ resolution: 256
81
+ in_channels: 3
82
+ out_ch: 3
83
+ ch: 128
84
+ ch_mult: [1, 2, 4, 4]
85
+ num_res_blocks: 2
86
+ attn_resolutions: []
87
+ dropout: 0.0
88
+ lossconfig:
89
+ target: torch.nn.Identity
90
+
91
+ - input_key: cond_aug
92
+ is_trainable: False
93
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
94
+ params:
95
+ outdim: 256
96
+
97
+ first_stage_config:
98
+ target: sgm.models.autoencoder.AutoencodingEngine
99
+ params:
100
+ loss_config:
101
+ target: torch.nn.Identity
102
+ regularizer_config:
103
+ target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
104
+ encoder_config:
105
+ target: sgm.modules.diffusionmodules.model.Encoder
106
+ params:
107
+ attn_type: vanilla
108
+ double_z: True
109
+ z_channels: 4
110
+ resolution: 256
111
+ in_channels: 3
112
+ out_ch: 3
113
+ ch: 128
114
+ ch_mult: [1, 2, 4, 4]
115
+ num_res_blocks: 2
116
+ attn_resolutions: []
117
+ dropout: 0.0
118
+ decoder_config:
119
+ target: sgm.modules.autoencoding.temporal_ae.VideoDecoder
120
+ params:
121
+ attn_type: vanilla
122
+ double_z: True
123
+ z_channels: 4
124
+ resolution: 256
125
+ in_channels: 3
126
+ out_ch: 3
127
+ ch: 128
128
+ ch_mult: [1, 2, 4, 4]
129
+ num_res_blocks: 2
130
+ attn_resolutions: []
131
+ dropout: 0.0
132
+ video_kernel_size: [3, 1, 1]
133
+
134
+ sampler_config:
135
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
136
+ params:
137
+ discretization_config:
138
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
139
+ params:
140
+ sigma_max: 700.0
141
+
142
+ guider_config:
143
+ target: sgm.modules.diffusionmodules.guiders.LinearPredictionGuider
144
+ params:
145
+ max_scale: 3.0
146
+ min_scale: 1.5
generative_models/scripts/sampling/configs/svd_xt_image_decoder.yaml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.18215
5
+ disable_first_stage_autocast: True
6
+ ckpt_path: checkpoints/svd_xt_image_decoder.safetensors
7
+
8
+ denoiser_config:
9
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
10
+ params:
11
+ scaling_config:
12
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
13
+
14
+ network_config:
15
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
16
+ params:
17
+ adm_in_channels: 768
18
+ num_classes: sequential
19
+ use_checkpoint: True
20
+ in_channels: 8
21
+ out_channels: 4
22
+ model_channels: 320
23
+ attention_resolutions: [4, 2, 1]
24
+ num_res_blocks: 2
25
+ channel_mult: [1, 2, 4, 4]
26
+ num_head_channels: 64
27
+ use_linear_in_transformer: True
28
+ transformer_depth: 1
29
+ context_dim: 1024
30
+ spatial_transformer_attn_type: softmax-xformers
31
+ extra_ff_mix_layer: True
32
+ use_spatial_context: True
33
+ merge_strategy: learned_with_images
34
+ video_kernel_size: [3, 1, 1]
35
+
36
+ conditioner_config:
37
+ target: sgm.modules.GeneralConditioner
38
+ params:
39
+ emb_models:
40
+ - is_trainable: False
41
+ input_key: cond_frames_without_noise
42
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
43
+ params:
44
+ n_cond_frames: 1
45
+ n_copies: 1
46
+ open_clip_embedding_config:
47
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
48
+ params:
49
+ freeze: True
50
+
51
+ - input_key: fps_id
52
+ is_trainable: False
53
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
54
+ params:
55
+ outdim: 256
56
+
57
+ - input_key: motion_bucket_id
58
+ is_trainable: False
59
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - input_key: cond_frames
64
+ is_trainable: False
65
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
66
+ params:
67
+ disable_encoder_autocast: True
68
+ n_cond_frames: 1
69
+ n_copies: 1
70
+ is_ae: True
71
+ encoder_config:
72
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
73
+ params:
74
+ embed_dim: 4
75
+ monitor: val/rec_loss
76
+ ddconfig:
77
+ attn_type: vanilla-xformers
78
+ double_z: True
79
+ z_channels: 4
80
+ resolution: 256
81
+ in_channels: 3
82
+ out_ch: 3
83
+ ch: 128
84
+ ch_mult: [1, 2, 4, 4]
85
+ num_res_blocks: 2
86
+ attn_resolutions: []
87
+ dropout: 0.0
88
+ lossconfig:
89
+ target: torch.nn.Identity
90
+
91
+ - input_key: cond_aug
92
+ is_trainable: False
93
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
94
+ params:
95
+ outdim: 256
96
+
97
+ first_stage_config:
98
+ target: sgm.models.autoencoder.AutoencoderKL
99
+ params:
100
+ embed_dim: 4
101
+ monitor: val/rec_loss
102
+ ddconfig:
103
+ attn_type: vanilla-xformers
104
+ double_z: True
105
+ z_channels: 4
106
+ resolution: 256
107
+ in_channels: 3
108
+ out_ch: 3
109
+ ch: 128
110
+ ch_mult: [1, 2, 4, 4]
111
+ num_res_blocks: 2
112
+ attn_resolutions: []
113
+ dropout: 0.0
114
+ lossconfig:
115
+ target: torch.nn.Identity
116
+
117
+ sampler_config:
118
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
119
+ params:
120
+ discretization_config:
121
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
122
+ params:
123
+ sigma_max: 700.0
124
+
125
+ guider_config:
126
+ target: sgm.modules.diffusionmodules.guiders.LinearPredictionGuider
127
+ params:
128
+ max_scale: 3.0
129
+ min_scale: 1.5
generative_models/scripts/sampling/simple_video_sample.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ from glob import glob
4
+ from pathlib import Path
5
+ from typing import Optional
6
+
7
+ import cv2
8
+ import numpy as np
9
+ import torch
10
+ from einops import rearrange, repeat
11
+ from fire import Fire
12
+ from omegaconf import OmegaConf
13
+ from PIL import Image
14
+ from torchvision.transforms import ToTensor
15
+
16
+ from scripts.util.detection.nsfw_and_watermark_dectection import \
17
+ DeepFloydDataFiltering
18
+ from sgm.inference.helpers import embed_watermark
19
+ from sgm.util import default, instantiate_from_config
20
+
21
+
22
+ def sample(
23
+ input_path: str = "assets/test_image.png", # Can either be image file or folder with image files
24
+ num_frames: Optional[int] = None,
25
+ num_steps: Optional[int] = None,
26
+ version: str = "svd",
27
+ fps_id: int = 6,
28
+ motion_bucket_id: int = 127,
29
+ cond_aug: float = 0.02,
30
+ seed: int = 23,
31
+ decoding_t: int = 14, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
32
+ device: str = "cuda",
33
+ output_folder: Optional[str] = None,
34
+ ):
35
+ """
36
+ Simple script to generate a single sample conditioned on an image `input_path` or multiple images, one for each
37
+ image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t`.
38
+ """
39
+
40
+ if version == "svd":
41
+ num_frames = default(num_frames, 14)
42
+ num_steps = default(num_steps, 25)
43
+ output_folder = default(output_folder, "outputs/simple_video_sample/svd/")
44
+ model_config = "scripts/sampling/configs/svd.yaml"
45
+ elif version == "svd_xt":
46
+ num_frames = default(num_frames, 25)
47
+ num_steps = default(num_steps, 30)
48
+ output_folder = default(output_folder, "outputs/simple_video_sample/svd_xt/")
49
+ model_config = "scripts/sampling/configs/svd_xt.yaml"
50
+ elif version == "svd_image_decoder":
51
+ num_frames = default(num_frames, 14)
52
+ num_steps = default(num_steps, 25)
53
+ output_folder = default(
54
+ output_folder, "outputs/simple_video_sample/svd_image_decoder/"
55
+ )
56
+ model_config = "scripts/sampling/configs/svd_image_decoder.yaml"
57
+ elif version == "svd_xt_image_decoder":
58
+ num_frames = default(num_frames, 25)
59
+ num_steps = default(num_steps, 30)
60
+ output_folder = default(
61
+ output_folder, "outputs/simple_video_sample/svd_xt_image_decoder/"
62
+ )
63
+ model_config = "scripts/sampling/configs/svd_xt_image_decoder.yaml"
64
+ else:
65
+ raise ValueError(f"Version {version} does not exist.")
66
+
67
+ model, filter = load_model(
68
+ model_config,
69
+ device,
70
+ num_frames,
71
+ num_steps,
72
+ )
73
+ torch.manual_seed(seed)
74
+
75
+ path = Path(input_path)
76
+ all_img_paths = []
77
+ if path.is_file():
78
+ if any([input_path.endswith(x) for x in ["jpg", "jpeg", "png"]]):
79
+ all_img_paths = [input_path]
80
+ else:
81
+ raise ValueError("Path is not valid image file.")
82
+ elif path.is_dir():
83
+ all_img_paths = sorted(
84
+ [
85
+ f
86
+ for f in path.iterdir()
87
+ if f.is_file() and f.suffix.lower() in [".jpg", ".jpeg", ".png"]
88
+ ]
89
+ )
90
+ if len(all_img_paths) == 0:
91
+ raise ValueError("Folder does not contain any images.")
92
+ else:
93
+ raise ValueError
94
+
95
+ for input_img_path in all_img_paths:
96
+ with Image.open(input_img_path) as image:
97
+ if image.mode == "RGBA":
98
+ image = image.convert("RGB")
99
+ w, h = image.size
100
+
101
+ if h % 64 != 0 or w % 64 != 0:
102
+ width, height = map(lambda x: x - x % 64, (w, h))
103
+ image = image.resize((width, height))
104
+ print(
105
+ f"WARNING: Your image is of size {h}x{w} which is not divisible by 64. We are resizing to {height}x{width}!"
106
+ )
107
+
108
+ image = ToTensor()(image)
109
+ image = image * 2.0 - 1.0
110
+
111
+ image = image.unsqueeze(0).to(device)
112
+ H, W = image.shape[2:]
113
+ assert image.shape[1] == 3
114
+ F = 8
115
+ C = 4
116
+ shape = (num_frames, C, H // F, W // F)
117
+ if (H, W) != (576, 1024):
118
+ print(
119
+ "WARNING: The conditioning frame you provided is not 576x1024. This leads to suboptimal performance as model was only trained on 576x1024. Consider increasing `cond_aug`."
120
+ )
121
+ if motion_bucket_id > 255:
122
+ print(
123
+ "WARNING: High motion bucket! This may lead to suboptimal performance."
124
+ )
125
+
126
+ if fps_id < 5:
127
+ print("WARNING: Small fps value! This may lead to suboptimal performance.")
128
+
129
+ if fps_id > 30:
130
+ print("WARNING: Large fps value! This may lead to suboptimal performance.")
131
+
132
+ value_dict = {}
133
+ value_dict["motion_bucket_id"] = motion_bucket_id
134
+ value_dict["fps_id"] = fps_id
135
+ value_dict["cond_aug"] = cond_aug
136
+ value_dict["cond_frames_without_noise"] = image
137
+ value_dict["cond_frames"] = image + cond_aug * torch.randn_like(image)
138
+ value_dict["cond_aug"] = cond_aug
139
+
140
+ with torch.no_grad():
141
+ with torch.autocast(device):
142
+ batch, batch_uc = get_batch(
143
+ get_unique_embedder_keys_from_conditioner(model.conditioner),
144
+ value_dict,
145
+ [1, num_frames],
146
+ T=num_frames,
147
+ device=device,
148
+ )
149
+ c, uc = model.conditioner.get_unconditional_conditioning(
150
+ batch,
151
+ batch_uc=batch_uc,
152
+ force_uc_zero_embeddings=[
153
+ "cond_frames",
154
+ "cond_frames_without_noise",
155
+ ],
156
+ )
157
+
158
+ for k in ["crossattn", "concat"]:
159
+ uc[k] = repeat(uc[k], "b ... -> b t ...", t=num_frames)
160
+ uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=num_frames)
161
+ c[k] = repeat(c[k], "b ... -> b t ...", t=num_frames)
162
+ c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=num_frames)
163
+
164
+ randn = torch.randn(shape, device=device)
165
+
166
+ additional_model_inputs = {}
167
+ additional_model_inputs["image_only_indicator"] = torch.zeros(
168
+ 2, num_frames
169
+ ).to(device)
170
+ additional_model_inputs["num_video_frames"] = batch["num_video_frames"]
171
+
172
+ def denoiser(input, sigma, c):
173
+ return model.denoiser(
174
+ model.model, input, sigma, c, **additional_model_inputs
175
+ )
176
+
177
+ samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
178
+ model.en_and_decode_n_samples_a_time = decoding_t
179
+ samples_x = model.decode_first_stage(samples_z)
180
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
181
+
182
+ os.makedirs(output_folder, exist_ok=True)
183
+ base_count = len(glob(os.path.join(output_folder, "*.mp4")))
184
+ video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
185
+ writer = cv2.VideoWriter(
186
+ video_path,
187
+ cv2.VideoWriter_fourcc(*"MP4V"),
188
+ fps_id + 1,
189
+ (samples.shape[-1], samples.shape[-2]),
190
+ )
191
+
192
+ samples = embed_watermark(samples)
193
+ samples = filter(samples)
194
+ vid = (
195
+ (rearrange(samples, "t c h w -> t h w c") * 255)
196
+ .cpu()
197
+ .numpy()
198
+ .astype(np.uint8)
199
+ )
200
+ for frame in vid:
201
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
202
+ writer.write(frame)
203
+ writer.release()
204
+
205
+
206
+ def get_unique_embedder_keys_from_conditioner(conditioner):
207
+ return list(set([x.input_key for x in conditioner.embedders]))
208
+
209
+
210
+ def get_batch(keys, value_dict, N, T, device):
211
+ batch = {}
212
+ batch_uc = {}
213
+
214
+ for key in keys:
215
+ if key == "fps_id":
216
+ batch[key] = (
217
+ torch.tensor([value_dict["fps_id"]])
218
+ .to(device)
219
+ .repeat(int(math.prod(N)))
220
+ )
221
+ elif key == "motion_bucket_id":
222
+ batch[key] = (
223
+ torch.tensor([value_dict["motion_bucket_id"]])
224
+ .to(device)
225
+ .repeat(int(math.prod(N)))
226
+ )
227
+ elif key == "cond_aug":
228
+ batch[key] = repeat(
229
+ torch.tensor([value_dict["cond_aug"]]).to(device),
230
+ "1 -> b",
231
+ b=math.prod(N),
232
+ )
233
+ elif key == "cond_frames":
234
+ batch[key] = repeat(value_dict["cond_frames"], "1 ... -> b ...", b=N[0])
235
+ elif key == "cond_frames_without_noise":
236
+ batch[key] = repeat(
237
+ value_dict["cond_frames_without_noise"], "1 ... -> b ...", b=N[0]
238
+ )
239
+ else:
240
+ batch[key] = value_dict[key]
241
+
242
+ if T is not None:
243
+ batch["num_video_frames"] = T
244
+
245
+ for key in batch.keys():
246
+ if key not in batch_uc and isinstance(batch[key], torch.Tensor):
247
+ batch_uc[key] = torch.clone(batch[key])
248
+ return batch, batch_uc
249
+
250
+
251
+ def load_model(
252
+ config: str,
253
+ device: str,
254
+ num_frames: int,
255
+ num_steps: int,
256
+ ):
257
+ config = OmegaConf.load(config)
258
+ if device == "cuda":
259
+ config.model.params.conditioner_config.params.emb_models[
260
+ 0
261
+ ].params.open_clip_embedding_config.params.init_device = device
262
+
263
+ config.model.params.sampler_config.params.num_steps = num_steps
264
+ config.model.params.sampler_config.params.guider_config.params.num_frames = (
265
+ num_frames
266
+ )
267
+ if device == "cuda":
268
+ with torch.device(device):
269
+ model = instantiate_from_config(config.model).to(device).eval()
270
+ else:
271
+ model = instantiate_from_config(config.model).to(device).eval()
272
+
273
+ filter = DeepFloydDataFiltering(verbose=False, device=device)
274
+ return model, filter
275
+
276
+
277
+ if __name__ == "__main__":
278
+ Fire(sample)