feat: upload kedama lora model
Browse files
kedama_config/config_file.toml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[model_arguments]
|
2 |
+
v2 = false
|
3 |
+
v_parameterization = false
|
4 |
+
pretrained_model_name_or_path = "/content/pretrained_model/Animefull-final-pruned.ckpt"
|
5 |
+
|
6 |
+
[additional_network_arguments]
|
7 |
+
no_metadata = false
|
8 |
+
unet_lr = 1.0
|
9 |
+
text_encoder_lr = 1.0
|
10 |
+
network_module = "networks.lora"
|
11 |
+
network_dim = 64
|
12 |
+
network_alpha = 1
|
13 |
+
network_train_unet_only = false
|
14 |
+
network_train_text_encoder_only = false
|
15 |
+
|
16 |
+
[optimizer_arguments]
|
17 |
+
optimizer_type = "Prodigy"
|
18 |
+
learning_rate = 1.0
|
19 |
+
max_grad_norm = 1.0
|
20 |
+
optimizer_args = [ "decouple=True", "weight_decay=0.01", "d_coef=2", "use_bias_correction=True", "safeguard_warmup=True", "betas=0.9,0.99",]
|
21 |
+
lr_scheduler = "constant_with_warmup"
|
22 |
+
lr_warmup_steps = 100
|
23 |
+
|
24 |
+
[dataset_arguments]
|
25 |
+
cache_latents = true
|
26 |
+
debug_dataset = false
|
27 |
+
vae_batch_size = 4
|
28 |
+
|
29 |
+
[training_arguments]
|
30 |
+
output_dir = "/content/LoRA/output"
|
31 |
+
output_name = "kedama"
|
32 |
+
save_precision = "fp16"
|
33 |
+
save_every_n_epochs = 10
|
34 |
+
train_batch_size = 6
|
35 |
+
max_token_length = 225
|
36 |
+
mem_eff_attn = false
|
37 |
+
xformers = true
|
38 |
+
max_train_epochs = 20
|
39 |
+
max_data_loader_n_workers = 8
|
40 |
+
persistent_data_loader_workers = true
|
41 |
+
seed = 31337
|
42 |
+
gradient_checkpointing = false
|
43 |
+
gradient_accumulation_steps = 1
|
44 |
+
mixed_precision = "fp16"
|
45 |
+
clip_skip = 2
|
46 |
+
logging_dir = "/content/LoRA/logs"
|
47 |
+
log_prefix = "kedama"
|
48 |
+
lowram = true
|
49 |
+
|
50 |
+
[sample_prompt_arguments]
|
51 |
+
sample_every_n_epochs = 5
|
52 |
+
sample_sampler = "ddim"
|
53 |
+
|
54 |
+
[dreambooth_arguments]
|
55 |
+
prior_loss_weight = 1.0
|
56 |
+
|
57 |
+
[saving_arguments]
|
58 |
+
save_model_as = "safetensors"
|
kedama_config/dataset_config.toml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[[datasets]]
|
2 |
+
resolution = 576
|
3 |
+
min_bucket_reso = 256
|
4 |
+
max_bucket_reso = 1024
|
5 |
+
caption_dropout_rate = 0
|
6 |
+
caption_tag_dropout_rate = 0
|
7 |
+
caption_dropout_every_n_epochs = 0
|
8 |
+
flip_aug = false
|
9 |
+
color_aug = false
|
10 |
+
[[datasets.subsets]]
|
11 |
+
image_dir = "/content/LoRA/train_data"
|
12 |
+
class_tokens = "mksks"
|
13 |
+
num_repeats = 1
|
14 |
+
|
15 |
+
|
16 |
+
[general]
|
17 |
+
enable_bucket = true
|
18 |
+
caption_extension = ".txt"
|
19 |
+
shuffle_caption = true
|
20 |
+
keep_tokens = 1
|
21 |
+
bucket_reso_steps = 64
|
22 |
+
bucket_no_upscale = false
|
kedama_config/sample_prompt.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
masterpiece, best quality,kedama, 1girl, :o, belt collar, bikini, bow, breasts, cameltoe, collar, demon girl, demon tail, falling petals, female focus, heterochromia, horns, loli, long hair, looking at viewer, navel, nipples, panties, panty pull, petals, pointy ears, red eyes, seiza, shrug (clothing), side-tie panties, sidelocks, sitting, small breasts, solo, swimsuit, tail, thighhighs, two side up, underwear, untied bikini, untied panties, yellow eyes, --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry --w 512 --h 768 --l 7 --s 28
|