feat: upload ishikei lora model
Browse files
ishikei_config/config_file.toml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[model_arguments]
|
2 |
+
v2 = false
|
3 |
+
v_parameterization = false
|
4 |
+
pretrained_model_name_or_path = "/content/pretrained_model/AnyLoRA.safetensors"
|
5 |
+
|
6 |
+
[additional_network_arguments]
|
7 |
+
no_metadata = false
|
8 |
+
unet_lr = 1.0
|
9 |
+
text_encoder_lr = 0.5
|
10 |
+
network_module = "lycoris.kohya"
|
11 |
+
network_dim = 128
|
12 |
+
network_alpha = 128
|
13 |
+
network_args = [ "conv_dim=128", "conv_alpha=128", "algo=lora",]
|
14 |
+
network_train_unet_only = false
|
15 |
+
network_train_text_encoder_only = false
|
16 |
+
|
17 |
+
[optimizer_arguments]
|
18 |
+
optimizer_type = "DAdaptation"
|
19 |
+
learning_rate = 1.0
|
20 |
+
max_grad_norm = 1.0
|
21 |
+
optimizer_args = [ "decouple=True", "weight_decay=0.01", "betas=0.9,0.99",]
|
22 |
+
lr_scheduler = "constant"
|
23 |
+
lr_warmup_steps = 0
|
24 |
+
|
25 |
+
[dataset_arguments]
|
26 |
+
cache_latents = true
|
27 |
+
debug_dataset = false
|
28 |
+
vae_batch_size = 4
|
29 |
+
|
30 |
+
[training_arguments]
|
31 |
+
output_dir = "/content/LoRA/output"
|
32 |
+
output_name = "ishikei"
|
33 |
+
save_precision = "fp16"
|
34 |
+
save_every_n_epochs = 2
|
35 |
+
train_batch_size = 3
|
36 |
+
max_token_length = 225
|
37 |
+
mem_eff_attn = false
|
38 |
+
xformers = true
|
39 |
+
max_train_epochs = 10
|
40 |
+
max_data_loader_n_workers = 8
|
41 |
+
persistent_data_loader_workers = true
|
42 |
+
gradient_checkpointing = false
|
43 |
+
gradient_accumulation_steps = 1
|
44 |
+
mixed_precision = "fp16"
|
45 |
+
clip_skip = 2
|
46 |
+
logging_dir = "/content/LoRA/logs"
|
47 |
+
log_prefix = "ishikei"
|
48 |
+
lowram = true
|
49 |
+
|
50 |
+
[sample_prompt_arguments]
|
51 |
+
sample_every_n_epochs = 1
|
52 |
+
sample_sampler = "ddim"
|
53 |
+
|
54 |
+
[dreambooth_arguments]
|
55 |
+
prior_loss_weight = 1.0
|
56 |
+
|
57 |
+
[saving_arguments]
|
58 |
+
save_model_as = "safetensors"
|
ishikei_config/dataset_config.toml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[[datasets]]
|
2 |
+
resolution = 512
|
3 |
+
min_bucket_reso = 256
|
4 |
+
max_bucket_reso = 1024
|
5 |
+
caption_dropout_rate = 0
|
6 |
+
caption_tag_dropout_rate = 0
|
7 |
+
caption_dropout_every_n_epochs = 0
|
8 |
+
flip_aug = false
|
9 |
+
color_aug = false
|
10 |
+
[[datasets.subsets]]
|
11 |
+
image_dir = "/content/LoRA/train_data"
|
12 |
+
class_tokens = "mksks"
|
13 |
+
num_repeats = 10
|
14 |
+
|
15 |
+
|
16 |
+
[general]
|
17 |
+
enable_bucket = true
|
18 |
+
caption_extension = ".txt"
|
19 |
+
shuffle_caption = true
|
20 |
+
keep_tokens = 1
|
21 |
+
bucket_reso_steps = 64
|
22 |
+
bucket_no_upscale = false
|
ishikei_config/sample_prompt.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
masterpiece, best quality,ishikei, 1girl, --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry --w 512 --h 768 --l 7 --s 28
|