Warlord-K commited on
Commit
c945057
1 Parent(s): 12a07c8

Initial Commit

Browse files
Files changed (11) hide show
  1. .flake8 +3 -0
  2. .gitignore +10 -0
  3. DEVELOPMENT.md +7 -0
  4. LICENSE +21 -0
  5. Makefile +22 -0
  6. README.md +60 -0
  7. cog.yaml.in +25 -0
  8. common.py +41 -0
  9. predict_advanced.py +238 -0
  10. predict_basic.py +118 -0
  11. script/download-weights +34 -0
.flake8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [flake8]
2
+ exclude = .git,__pycache__,docs/source/conf.py,old,build,dist
3
+ extend-ignore = E501
.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ cog_class_data
2
+ cog_instance_data
3
+ *-cache
4
+ __debug*
5
+ __pycache__
6
+ checkpoints
7
+ .cog
8
+ *.safetensors
9
+ output.zip
10
+ cog.yaml
DEVELOPMENT.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Push to Replicate
2
+
3
+ This repo is responsible for two models on Replicate: replicate/lora-training and replicate/lora-training-advanced. To push both models, (given that you have access to push to the replicate org) run
4
+
5
+ ```
6
+ make push
7
+ ```
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Replicate
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Makefile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: cog-yaml-basic
2
+ cog-yaml-basic:
3
+ PREDICT_FILE=predict_basic.py envsubst < cog.yaml.in > cog.yaml
4
+
5
+ .PHONY: cog-yaml-advanced
6
+ cog-yaml-advanced:
7
+ PREDICT_FILE=predict_advanced.py envsubst < cog.yaml.in > cog.yaml
8
+
9
+ .PHONY: push-basic
10
+ push-basic: cog-yaml-basic
11
+ cog push r8.im/cloneofsimo/lora-training
12
+
13
+ .PHONY: push-advanced
14
+ push-advanced: cog-yaml-advanced
15
+ cog push r8.im/cloneofsimo/lora-advanced-training
16
+
17
+ .PHONY: push
18
+ push: lint push-basic push-advanced
19
+
20
+ .PHONY: lint
21
+ lint: cog-yaml-basic
22
+ cog run flake8 *.py
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LoRA training Cog model
2
+
3
+ ## Use on Replicate
4
+
5
+ Easy-to-use model pre-configured for faces, objects, and styles:
6
+
7
+ [![Replicate](https://replicate.com/replicate/lora-training/badge)](https://replicate.com/replicate/lora-training)
8
+
9
+ Advanced model with all the parameters:
10
+
11
+ [![Replicate](https://replicate.com/replicate/lora-advanced-training/badge)](https://replicate.com/replicate/lora-advanced-training)
12
+
13
+ Feed the trained model into this inference model to run predictions:
14
+
15
+ [![Replicate](https://replicate.com/replicate/lora/badge)](https://replicate.com/replicate/lora)
16
+
17
+ If you want to share your trained LoRAs, please join the `#lora` channel in the [Replicate Discord](https://discord.gg/replicate).
18
+
19
+ ## Use locally
20
+
21
+ First, download the pre-trained weights [with your Hugging Face auth token](https://huggingface.co/settings/tokens):
22
+
23
+ ```
24
+ cog run script/download-weights <your-hugging-face-auth-token>
25
+ ```
26
+
27
+ Then, you can run train your dreambooth:
28
+
29
+ ```
30
+ cog predict -i [email protected]
31
+ ```
32
+
33
+ The resulting LoRA weights file can be used with `patch_pipe` function:
34
+
35
+ ```python
36
+ from diffusers import StableDiffusionPipeline
37
+ from lora_diffusion import patch_pipe, tune_lora_scale, image_grid
38
+ import torch
39
+
40
+ model_id = "runwayml/stable-diffusion-v1-5"
41
+
42
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(
43
+ "cuda:1"
44
+ )
45
+
46
+ patch_pipe(pipe, "./my-images.safetensors")
47
+ prompt = "detailed photo of <s1><s2>, detailed face, a brown cloak, brown steampunk corset, belt, virtual youtuber, cowboy shot, feathers in hair, feather hair ornament, white shirt, brown gloves, shooting arrows"
48
+
49
+ tune_lora_scale(pipe.unet, 0.8)
50
+ tune_lora_scale(pipe.text_encoder, 0.8)
51
+
52
+ imgs = pipe(
53
+ [prompt],
54
+ num_inference_steps=50,
55
+ guidance_scale=4.5,
56
+ height=640,
57
+ width=512,
58
+ ).images
59
+ ...
60
+ ```
cog.yaml.in ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for Cog ⚙️
2
+ # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md
3
+
4
+ build:
5
+ gpu: true
6
+ cuda: "11.4"
7
+ python_version: "3.10"
8
+ system_packages:
9
+ - "ffmpeg"
10
+ - "libsm6"
11
+ - "libxext6"
12
+ python_packages:
13
+ - "diffusers==0.11.1"
14
+ - "torch==1.13.0"
15
+ - "ftfy==6.1.1"
16
+ - "scipy==1.9.3"
17
+ - "transformers==4.25.1"
18
+ - "accelerate==0.15.0"
19
+ - "git+https://github.com/cloneofsimo/lora.git@799c17aef2a475641fb70d68a6992de4fc325ce4"
20
+ - "opencv-python==4.7.0.68"
21
+ - "torchvision==0.14.0"
22
+ - "mediapipe==0.9.0.1"
23
+ - "flake8==5.0.4"
24
+
25
+ predict: "$PREDICT_FILE:Predictor"
common.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import mimetypes
4
+ import re
5
+ from zipfile import ZipFile
6
+
7
+ from cog import Path
8
+
9
+
10
+ def clean_directory(path):
11
+ if os.path.exists(path):
12
+ shutil.rmtree(path)
13
+ os.makedirs(path)
14
+
15
+
16
+ def clean_directories(paths):
17
+ for path in paths:
18
+ clean_directory(path)
19
+
20
+
21
+ def random_seed():
22
+ return int.from_bytes(os.urandom(2), "big")
23
+
24
+
25
+ def extract_zip_and_flatten(zip_path, output_path):
26
+ # extract zip contents, flattening any paths present within it
27
+ with ZipFile(str(zip_path), "r") as zip_ref:
28
+ for zip_info in zip_ref.infolist():
29
+ if zip_info.filename[-1] == "/" or zip_info.filename.startswith(
30
+ "__MACOSX"
31
+ ):
32
+ continue
33
+ mt = mimetypes.guess_type(zip_info.filename)
34
+ if mt and mt[0] and mt[0].startswith("image/"):
35
+ zip_info.filename = os.path.basename(zip_info.filename)
36
+ zip_ref.extract(zip_info, output_path)
37
+
38
+
39
+ def get_output_filename(input_filename):
40
+ temp_name = Path(input_filename).name
41
+ return Path(re.sub("[^-a-zA-Z0-9_]", "", temp_name)).with_suffix(".safetensors")
predict_advanced.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import torch
3
+ from cog import BasePredictor, Input, Path
4
+ from lora_diffusion.cli_lora_pti import train as lora_train
5
+ from lora_diffusion import (
6
+ UNET_DEFAULT_TARGET_REPLACE,
7
+ TEXT_ENCODER_DEFAULT_TARGET_REPLACE,
8
+ )
9
+
10
+ from common import (
11
+ random_seed,
12
+ clean_directories,
13
+ extract_zip_and_flatten,
14
+ get_output_filename,
15
+ )
16
+
17
+
18
+ class Predictor(BasePredictor):
19
+ def predict(
20
+ self,
21
+ instance_data: Path = Input(
22
+ description="A ZIP file containing your training images (JPG, PNG, etc. size not restricted). These images contain your 'subject' that you want the trained model to embed in the output domain for later generating customized scenes beyond the training images. For best results, use images without noise or unrelated objects in the background.",
23
+ ),
24
+ seed: int = Input(description="A seed for reproducible training", default=1337),
25
+ resolution: int = Input(
26
+ description="The resolution for input images. All the images in the train/validation dataset will be resized to this"
27
+ " resolution.",
28
+ default=512,
29
+ ),
30
+ train_text_encoder: bool = Input(
31
+ description="Whether to train the text encoder",
32
+ default=True,
33
+ ),
34
+ train_batch_size: int = Input(
35
+ description="Batch size (per device) for the training dataloader.",
36
+ default=1,
37
+ ),
38
+ gradient_accumulation_steps: int = Input(
39
+ description="Number of updates steps to accumulate before performing a backward/update pass.",
40
+ default=4,
41
+ ),
42
+ gradient_checkpointing: bool = Input(
43
+ description="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
44
+ default=False,
45
+ ),
46
+ scale_lr: bool = Input(
47
+ description="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
48
+ default=True,
49
+ ),
50
+ lr_scheduler: str = Input(
51
+ description="The scheduler type to use",
52
+ choices=[
53
+ "linear",
54
+ "cosine",
55
+ "cosine_with_restarts",
56
+ "polynomial",
57
+ "constant",
58
+ "constant_with_warmup",
59
+ ],
60
+ default="constant",
61
+ ),
62
+ lr_warmup_steps: int = Input(
63
+ description="Number of steps for the warmup in the lr scheduler.",
64
+ default=0,
65
+ ),
66
+ clip_ti_decay: bool = Input(
67
+ default=True,
68
+ description="Whether or not to perform Bayesian Learning Rule on norm of the CLIP latent.",
69
+ ),
70
+ color_jitter: bool = Input(
71
+ default=True,
72
+ description="Whether or not to use color jitter at augmentation.",
73
+ ),
74
+ continue_inversion: bool = Input(
75
+ default=False,
76
+ description="Whether or not to continue inversion.",
77
+ ),
78
+ continue_inversion_lr: float = Input(
79
+ default=1e-4,
80
+ description="The learning rate for continuing an inversion.",
81
+ ),
82
+ initializer_tokens: str = Input(
83
+ default=None,
84
+ description="The tokens to use for the initializer. If not provided, will randomly initialize from gaussian N(0,0.017^2)",
85
+ ),
86
+ learning_rate_text: float = Input(
87
+ default=1e-5,
88
+ description="The learning rate for the text encoder.",
89
+ ),
90
+ learning_rate_ti: float = Input(
91
+ default=5e-4,
92
+ description="The learning rate for the TI.",
93
+ ),
94
+ learning_rate_unet: float = Input(
95
+ default=1e-4,
96
+ description="The learning rate for the unet.",
97
+ ),
98
+ lora_rank: int = Input(
99
+ default=4,
100
+ description="Rank of the LoRA. Larger it is, more likely to capture fidelity but less likely to be editable. Larger rank will make the end result larger.",
101
+ ),
102
+ lora_dropout_p: float = Input(
103
+ default=0.1,
104
+ description="Dropout for the LoRA layer. Reference LoRA paper for more details.",
105
+ ),
106
+ lora_scale: float = Input(
107
+ default=1.0,
108
+ description="Scaling parameter at the end of the LoRA layer.",
109
+ ),
110
+ lr_scheduler_lora: str = Input(
111
+ description="The scheduler type to use",
112
+ choices=[
113
+ "linear",
114
+ "cosine",
115
+ "cosine_with_restarts",
116
+ "polynomial",
117
+ "constant",
118
+ "constant_with_warmup",
119
+ ],
120
+ default="constant",
121
+ ),
122
+ lr_warmup_steps_lora: int = Input(
123
+ description="Number of steps for the warmup in the lr scheduler.",
124
+ default=0,
125
+ ),
126
+ max_train_steps_ti: int = Input(
127
+ default=500,
128
+ description="The maximum number of training steps for the TI.",
129
+ ),
130
+ max_train_steps_tuning: int = Input(
131
+ default=1000,
132
+ description="The maximum number of training steps for the tuning.",
133
+ ),
134
+ placeholder_token_at_data: str = Input(
135
+ default=None,
136
+ description="If this value is provided as 'X|Y', it will transform target word X into Y at caption. You are required to provide caption as filename (not regarding extension), and Y has to contain placeholder token below. You are also required to set `None` for `use_template` argument to use this feature.",
137
+ ),
138
+ placeholder_tokens: str = Input(
139
+ default="<s1>|<s2>",
140
+ description="The placeholder tokens to use for the initializer. If not provided, will use the first tokens of the data.",
141
+ ),
142
+ use_face_segmentation_condition: bool = Input(
143
+ default=False,
144
+ description="Whether or not to use the face segmentation condition.",
145
+ ),
146
+ use_template: str = Input(
147
+ default="object",
148
+ description="The template to use for the inversion.",
149
+ choices=[
150
+ "object",
151
+ "style",
152
+ "none",
153
+ ],
154
+ ),
155
+ weight_decay_lora: float = Input(
156
+ default=0.001,
157
+ description="The weight decay for the LORA loss.",
158
+ ),
159
+ weight_decay_ti: float = Input(
160
+ default=0.00,
161
+ description="The weight decay for the TI.",
162
+ ),
163
+ ) -> Path:
164
+ if seed is None:
165
+ seed = random_seed()
166
+ print(f"Using seed: {seed}")
167
+
168
+ assert (
169
+ train_text_encoder
170
+ ), "train_text_encoder must be True. This will be updated in the future."
171
+
172
+ # check that the data is provided
173
+ cog_instance_data = "cog_instance_data"
174
+ cog_class_data = "cog_class_data"
175
+ cog_output_dir = "checkpoints"
176
+ clean_directories([cog_instance_data, cog_output_dir, cog_class_data])
177
+
178
+ extract_zip_and_flatten(instance_data, cog_instance_data)
179
+
180
+ if use_template == "none":
181
+ use_template = "null"
182
+
183
+ # some settings are fixed for the replicate model
184
+ lora_train(
185
+ pretrained_model_name_or_path="./stable-diffusion-v1-5-cache",
186
+ pretrained_vae_name_or_path=None,
187
+ revision=None,
188
+ instance_data_dir=cog_instance_data,
189
+ seed=seed,
190
+ resolution=resolution,
191
+ train_text_encoder=train_text_encoder,
192
+ train_batch_size=train_batch_size,
193
+ gradient_accumulation_steps=gradient_accumulation_steps,
194
+ gradient_checkpointing=gradient_checkpointing,
195
+ scale_lr=scale_lr,
196
+ lr_scheduler=lr_scheduler,
197
+ lr_warmup_steps=lr_warmup_steps,
198
+ use_8bit_adam=False,
199
+ mixed_precision="fp16",
200
+ output_dir=cog_output_dir,
201
+ clip_ti_decay=clip_ti_decay,
202
+ color_jitter=color_jitter,
203
+ continue_inversion=continue_inversion,
204
+ continue_inversion_lr=continue_inversion_lr,
205
+ device="cuda:0",
206
+ initializer_tokens=initializer_tokens,
207
+ learning_rate_text=learning_rate_text,
208
+ learning_rate_ti=learning_rate_ti,
209
+ learning_rate_unet=learning_rate_unet,
210
+ lora_clip_target_modules=TEXT_ENCODER_DEFAULT_TARGET_REPLACE,
211
+ lora_rank=lora_rank,
212
+ lora_dropout_p=lora_dropout_p,
213
+ lora_scale=lora_scale,
214
+ lora_unet_target_modules=UNET_DEFAULT_TARGET_REPLACE,
215
+ lr_scheduler_lora=lr_scheduler_lora,
216
+ lr_warmup_steps_lora=lr_warmup_steps_lora,
217
+ max_train_steps_ti=max_train_steps_ti,
218
+ max_train_steps_tuning=max_train_steps_tuning,
219
+ perform_inversion=True,
220
+ placeholder_token_at_data=placeholder_token_at_data,
221
+ placeholder_tokens=placeholder_tokens,
222
+ save_steps=max_train_steps_tuning,
223
+ use_face_segmentation_condition=use_face_segmentation_condition,
224
+ use_template=use_template,
225
+ weight_decay_lora=weight_decay_lora,
226
+ weight_decay_ti=weight_decay_ti,
227
+ )
228
+
229
+ gc.collect()
230
+ torch.cuda.empty_cache()
231
+
232
+ weights_path = (
233
+ Path(cog_output_dir) / f"step_{max_train_steps_tuning}.safetensors"
234
+ )
235
+ output_path = Path(cog_output_dir) / get_output_filename(instance_data)
236
+ weights_path.rename(output_path)
237
+
238
+ return output_path
predict_basic.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import torch
3
+ from cog import BasePredictor, Input, Path
4
+ from lora_diffusion.cli_lora_pti import train as lora_train
5
+
6
+ from common import (
7
+ random_seed,
8
+ clean_directories,
9
+ extract_zip_and_flatten,
10
+ get_output_filename,
11
+ )
12
+
13
+
14
+ COMMON_PARAMETERS = {
15
+ "train_text_encoder": True,
16
+ "train_batch_size": 1,
17
+ "gradient_accumulation_steps": 2,
18
+ "gradient_checkpointing": False,
19
+ "lr_scheduler": "constant",
20
+ "scale_lr": True,
21
+ "lr_warmup_steps": 0,
22
+ "clip_ti_decay": True,
23
+ "color_jitter": True,
24
+ "continue_inversion": False,
25
+ "continue_inversion_lr": 1e-4,
26
+ "initializer_tokens": None,
27
+ "learning_rate_text": 1e-5,
28
+ "learning_rate_ti": 5e-4,
29
+ "learning_rate_unet": 2e-4,
30
+ "lr_scheduler_lora": "constant",
31
+ "lr_warmup_steps_lora": 0,
32
+ "max_train_steps_ti": 700,
33
+ "max_train_steps_tuning": 700,
34
+ "placeholder_token_at_data": None,
35
+ "placeholder_tokens": "<s1>|<s2>",
36
+ "weight_decay_lora": 0.001,
37
+ "weight_decay_ti": 0,
38
+ }
39
+
40
+
41
+ FACE_PARAMETERS = {
42
+ "use_face_segmentation_condition": True,
43
+ "use_template": "object",
44
+ "placeholder_tokens": "<s1>|<s2>",
45
+ "lora_rank": 16,
46
+ }
47
+
48
+ OBJECT_PARAMETERS = {
49
+ "use_face_segmentation_condition": False,
50
+ "use_template": "object",
51
+ "placeholder_tokens": "<s1>|<s2>",
52
+ "lora_rank": 8,
53
+ }
54
+
55
+ STYLE_PARAMETERS = {
56
+ "use_face_segmentation_condition": False,
57
+ "use_template": "style",
58
+ "placeholder_tokens": "<s1>|<s2>",
59
+ "lora_rank": 16,
60
+ }
61
+
62
+ TASK_PARAMETERS = {
63
+ "face": FACE_PARAMETERS,
64
+ "object": OBJECT_PARAMETERS,
65
+ "style": STYLE_PARAMETERS,
66
+ }
67
+
68
+
69
+ class Predictor(BasePredictor):
70
+ def predict(
71
+ self,
72
+ instance_data: Path = Input(
73
+ description="A ZIP file containing your training images (JPG, PNG, etc. size not restricted). These images contain your 'subject' that you want the trained model to embed in the output domain for later generating customized scenes beyond the training images. For best results, use images without noise or unrelated objects in the background.",
74
+ ),
75
+ task: str = Input(
76
+ default="face",
77
+ choices=["face", "object", "style"],
78
+ description="Type of LoRA model you want to train",
79
+ ),
80
+ seed: int = Input(description="A seed for reproducible training", default=None),
81
+ resolution: int = Input(
82
+ description="The resolution for input images. All the images in the train/validation dataset will be resized to this"
83
+ " resolution.",
84
+ default=512,
85
+ ),
86
+ ) -> Path:
87
+ if seed is None:
88
+ seed = random_seed()
89
+ print(f"Using seed: {seed}")
90
+
91
+ cog_instance_data = "cog_instance_data"
92
+ cog_output_dir = "checkpoints"
93
+ clean_directories([cog_instance_data, cog_output_dir])
94
+
95
+ params = {k: v for k, v in TASK_PARAMETERS[task].items()}
96
+ params.update(COMMON_PARAMETERS)
97
+ params.update(
98
+ {
99
+ "pretrained_model_name_or_path": "./stable-diffusion-v1-5-cache",
100
+ "instance_data_dir": cog_instance_data,
101
+ "output_dir": cog_output_dir,
102
+ "resolution": resolution,
103
+ "seed": seed,
104
+ }
105
+ )
106
+
107
+ extract_zip_and_flatten(instance_data, cog_instance_data)
108
+
109
+ lora_train(**params)
110
+ gc.collect()
111
+ torch.cuda.empty_cache()
112
+
113
+ num_steps = COMMON_PARAMETERS["max_train_steps_tuning"]
114
+ weights_path = Path(cog_output_dir) / f"step_{num_steps}.safetensors"
115
+ output_path = Path(cog_output_dir) / get_output_filename(instance_data)
116
+ weights_path.rename(output_path)
117
+
118
+ return output_path
script/download-weights ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+
4
+ import os
5
+ import sys
6
+ import torch
7
+ from diffusers import StableDiffusionPipeline
8
+ from diffusers import (
9
+ AutoencoderKL,
10
+ DDIMScheduler,
11
+ DDPMScheduler,
12
+ StableDiffusionPipeline,
13
+ UNet2DConditionModel,
14
+ )
15
+
16
+ from transformers import CLIPTextModel, CLIPTokenizer
17
+
18
+
19
+ cache_dir = "ssd-cache"
20
+ vae_cache_dir = "sd-vae-ft-mse-cache"
21
+ os.makedirs(cache_dir, exist_ok=True)
22
+ os.makedirs(vae_cache_dir, exist_ok=True)
23
+
24
+ pipe = StableDiffusionPipeline.from_pretrained(
25
+ "segmind/SSD-1B-fp32",
26
+ )
27
+
28
+ pipe.save_pretrained(cache_dir)
29
+
30
+
31
+ pretrained_vae = AutoencoderKL.from_pretrained(
32
+ "madebyollin/sdxl-vae-fp16-fix", subfolder=None, revision=None
33
+ )
34
+ pretrained_vae.save_pretrained(vae_cache_dir)