Upload lora-scripts/sd-scripts/library/config_util.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/library/config_util.py
ADDED
@@ -0,0 +1,714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from dataclasses import (
|
3 |
+
asdict,
|
4 |
+
dataclass,
|
5 |
+
)
|
6 |
+
import functools
|
7 |
+
import random
|
8 |
+
from textwrap import dedent, indent
|
9 |
+
import json
|
10 |
+
from pathlib import Path
|
11 |
+
|
12 |
+
# from toolz import curry
|
13 |
+
from typing import (
|
14 |
+
List,
|
15 |
+
Optional,
|
16 |
+
Sequence,
|
17 |
+
Tuple,
|
18 |
+
Union,
|
19 |
+
)
|
20 |
+
|
21 |
+
import toml
|
22 |
+
import voluptuous
|
23 |
+
from voluptuous import (
|
24 |
+
Any,
|
25 |
+
ExactSequence,
|
26 |
+
MultipleInvalid,
|
27 |
+
Object,
|
28 |
+
Required,
|
29 |
+
Schema,
|
30 |
+
)
|
31 |
+
from transformers import CLIPTokenizer
|
32 |
+
|
33 |
+
from . import train_util
|
34 |
+
from .train_util import (
|
35 |
+
DreamBoothSubset,
|
36 |
+
FineTuningSubset,
|
37 |
+
ControlNetSubset,
|
38 |
+
DreamBoothDataset,
|
39 |
+
FineTuningDataset,
|
40 |
+
ControlNetDataset,
|
41 |
+
DatasetGroup,
|
42 |
+
)
|
43 |
+
from .utils import setup_logging
|
44 |
+
|
45 |
+
setup_logging()
|
46 |
+
import logging
|
47 |
+
|
48 |
+
logger = logging.getLogger(__name__)
|
49 |
+
|
50 |
+
|
51 |
+
def add_config_arguments(parser: argparse.ArgumentParser):
|
52 |
+
parser.add_argument(
|
53 |
+
"--dataset_config", type=Path, default=None, help="config file for detail settings / 詳細な設定用の設定ファイル"
|
54 |
+
)
|
55 |
+
|
56 |
+
|
57 |
+
# TODO: inherit Params class in Subset, Dataset
|
58 |
+
|
59 |
+
|
60 |
+
@dataclass
|
61 |
+
class BaseSubsetParams:
|
62 |
+
image_dir: Optional[str] = None
|
63 |
+
num_repeats: int = 1
|
64 |
+
shuffle_caption: bool = False
|
65 |
+
caption_separator: str = (",",)
|
66 |
+
keep_tokens: int = 0
|
67 |
+
keep_tokens_separator: str = (None,)
|
68 |
+
secondary_separator: Optional[str] = None
|
69 |
+
enable_wildcard: bool = False
|
70 |
+
color_aug: bool = False
|
71 |
+
flip_aug: bool = False
|
72 |
+
face_crop_aug_range: Optional[Tuple[float, float]] = None
|
73 |
+
random_crop: bool = False
|
74 |
+
caption_prefix: Optional[str] = None
|
75 |
+
caption_suffix: Optional[str] = None
|
76 |
+
caption_dropout_rate: float = 0.0
|
77 |
+
caption_dropout_every_n_epochs: int = 0
|
78 |
+
caption_tag_dropout_rate: float = 0.0
|
79 |
+
token_warmup_min: int = 1
|
80 |
+
token_warmup_step: float = 0
|
81 |
+
|
82 |
+
|
83 |
+
@dataclass
|
84 |
+
class DreamBoothSubsetParams(BaseSubsetParams):
|
85 |
+
is_reg: bool = False
|
86 |
+
class_tokens: Optional[str] = None
|
87 |
+
caption_extension: str = ".caption"
|
88 |
+
cache_info: bool = False
|
89 |
+
|
90 |
+
|
91 |
+
@dataclass
|
92 |
+
class FineTuningSubsetParams(BaseSubsetParams):
|
93 |
+
metadata_file: Optional[str] = None
|
94 |
+
|
95 |
+
|
96 |
+
@dataclass
|
97 |
+
class ControlNetSubsetParams(BaseSubsetParams):
|
98 |
+
conditioning_data_dir: str = None
|
99 |
+
caption_extension: str = ".caption"
|
100 |
+
cache_info: bool = False
|
101 |
+
|
102 |
+
|
103 |
+
@dataclass
|
104 |
+
class BaseDatasetParams:
|
105 |
+
tokenizer: Union[CLIPTokenizer, List[CLIPTokenizer]] = None
|
106 |
+
max_token_length: int = None
|
107 |
+
resolution: Optional[Tuple[int, int]] = None
|
108 |
+
network_multiplier: float = 1.0
|
109 |
+
debug_dataset: bool = False
|
110 |
+
|
111 |
+
|
112 |
+
@dataclass
|
113 |
+
class DreamBoothDatasetParams(BaseDatasetParams):
|
114 |
+
batch_size: int = 1
|
115 |
+
enable_bucket: bool = False
|
116 |
+
min_bucket_reso: int = 256
|
117 |
+
max_bucket_reso: int = 1024
|
118 |
+
bucket_reso_steps: int = 64
|
119 |
+
bucket_no_upscale: bool = False
|
120 |
+
prior_loss_weight: float = 1.0
|
121 |
+
|
122 |
+
|
123 |
+
@dataclass
|
124 |
+
class FineTuningDatasetParams(BaseDatasetParams):
|
125 |
+
batch_size: int = 1
|
126 |
+
enable_bucket: bool = False
|
127 |
+
min_bucket_reso: int = 256
|
128 |
+
max_bucket_reso: int = 1024
|
129 |
+
bucket_reso_steps: int = 64
|
130 |
+
bucket_no_upscale: bool = False
|
131 |
+
|
132 |
+
|
133 |
+
@dataclass
|
134 |
+
class ControlNetDatasetParams(BaseDatasetParams):
|
135 |
+
batch_size: int = 1
|
136 |
+
enable_bucket: bool = False
|
137 |
+
min_bucket_reso: int = 256
|
138 |
+
max_bucket_reso: int = 1024
|
139 |
+
bucket_reso_steps: int = 64
|
140 |
+
bucket_no_upscale: bool = False
|
141 |
+
|
142 |
+
|
143 |
+
@dataclass
|
144 |
+
class SubsetBlueprint:
|
145 |
+
params: Union[DreamBoothSubsetParams, FineTuningSubsetParams]
|
146 |
+
|
147 |
+
|
148 |
+
@dataclass
|
149 |
+
class DatasetBlueprint:
|
150 |
+
is_dreambooth: bool
|
151 |
+
is_controlnet: bool
|
152 |
+
params: Union[DreamBoothDatasetParams, FineTuningDatasetParams]
|
153 |
+
subsets: Sequence[SubsetBlueprint]
|
154 |
+
|
155 |
+
|
156 |
+
@dataclass
|
157 |
+
class DatasetGroupBlueprint:
|
158 |
+
datasets: Sequence[DatasetBlueprint]
|
159 |
+
|
160 |
+
|
161 |
+
@dataclass
|
162 |
+
class Blueprint:
|
163 |
+
dataset_group: DatasetGroupBlueprint
|
164 |
+
|
165 |
+
|
166 |
+
class ConfigSanitizer:
|
167 |
+
# @curry
|
168 |
+
@staticmethod
|
169 |
+
def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:
|
170 |
+
Schema(ExactSequence([klass, klass]))(value)
|
171 |
+
return tuple(value)
|
172 |
+
|
173 |
+
# @curry
|
174 |
+
@staticmethod
|
175 |
+
def __validate_and_convert_scalar_or_twodim(klass, value: Union[float, Sequence]) -> Tuple:
|
176 |
+
Schema(Any(klass, ExactSequence([klass, klass])))(value)
|
177 |
+
try:
|
178 |
+
Schema(klass)(value)
|
179 |
+
return (value, value)
|
180 |
+
except:
|
181 |
+
return ConfigSanitizer.__validate_and_convert_twodim(klass, value)
|
182 |
+
|
183 |
+
# subset schema
|
184 |
+
SUBSET_ASCENDABLE_SCHEMA = {
|
185 |
+
"color_aug": bool,
|
186 |
+
"face_crop_aug_range": functools.partial(__validate_and_convert_twodim.__func__, float),
|
187 |
+
"flip_aug": bool,
|
188 |
+
"num_repeats": int,
|
189 |
+
"random_crop": bool,
|
190 |
+
"shuffle_caption": bool,
|
191 |
+
"keep_tokens": int,
|
192 |
+
"keep_tokens_separator": str,
|
193 |
+
"secondary_separator": str,
|
194 |
+
"enable_wildcard": bool,
|
195 |
+
"token_warmup_min": int,
|
196 |
+
"token_warmup_step": Any(float, int),
|
197 |
+
"caption_prefix": str,
|
198 |
+
"caption_suffix": str,
|
199 |
+
}
|
200 |
+
# DO means DropOut
|
201 |
+
DO_SUBSET_ASCENDABLE_SCHEMA = {
|
202 |
+
"caption_dropout_every_n_epochs": int,
|
203 |
+
"caption_dropout_rate": Any(float, int),
|
204 |
+
"caption_tag_dropout_rate": Any(float, int),
|
205 |
+
}
|
206 |
+
# DB means DreamBooth
|
207 |
+
DB_SUBSET_ASCENDABLE_SCHEMA = {
|
208 |
+
"caption_extension": str,
|
209 |
+
"class_tokens": str,
|
210 |
+
"cache_info": bool,
|
211 |
+
}
|
212 |
+
DB_SUBSET_DISTINCT_SCHEMA = {
|
213 |
+
Required("image_dir"): str,
|
214 |
+
"is_reg": bool,
|
215 |
+
}
|
216 |
+
# FT means FineTuning
|
217 |
+
FT_SUBSET_DISTINCT_SCHEMA = {
|
218 |
+
Required("metadata_file"): str,
|
219 |
+
"image_dir": str,
|
220 |
+
}
|
221 |
+
CN_SUBSET_ASCENDABLE_SCHEMA = {
|
222 |
+
"caption_extension": str,
|
223 |
+
"cache_info": bool,
|
224 |
+
}
|
225 |
+
CN_SUBSET_DISTINCT_SCHEMA = {
|
226 |
+
Required("image_dir"): str,
|
227 |
+
Required("conditioning_data_dir"): str,
|
228 |
+
}
|
229 |
+
|
230 |
+
# datasets schema
|
231 |
+
DATASET_ASCENDABLE_SCHEMA = {
|
232 |
+
"batch_size": int,
|
233 |
+
"bucket_no_upscale": bool,
|
234 |
+
"bucket_reso_steps": int,
|
235 |
+
"enable_bucket": bool,
|
236 |
+
"max_bucket_reso": int,
|
237 |
+
"min_bucket_reso": int,
|
238 |
+
"resolution": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int),
|
239 |
+
"network_multiplier": float,
|
240 |
+
}
|
241 |
+
|
242 |
+
# options handled by argparse but not handled by user config
|
243 |
+
ARGPARSE_SPECIFIC_SCHEMA = {
|
244 |
+
"debug_dataset": bool,
|
245 |
+
"max_token_length": Any(None, int),
|
246 |
+
"prior_loss_weight": Any(float, int),
|
247 |
+
}
|
248 |
+
# for handling default None value of argparse
|
249 |
+
ARGPARSE_NULLABLE_OPTNAMES = [
|
250 |
+
"face_crop_aug_range",
|
251 |
+
"resolution",
|
252 |
+
]
|
253 |
+
# prepare map because option name may differ among argparse and user config
|
254 |
+
ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME = {
|
255 |
+
"train_batch_size": "batch_size",
|
256 |
+
"dataset_repeats": "num_repeats",
|
257 |
+
}
|
258 |
+
|
259 |
+
def __init__(self, support_dreambooth: bool, support_finetuning: bool, support_controlnet: bool, support_dropout: bool) -> None:
|
260 |
+
assert support_dreambooth or support_finetuning or support_controlnet, (
|
261 |
+
"Neither DreamBooth mode nor fine tuning mode nor controlnet mode specified. Please specify one mode or more."
|
262 |
+
+ " / DreamBooth モードか fine tuning モードか controlnet モードのどれも指定されていません。1つ以上指定してください。"
|
263 |
+
)
|
264 |
+
|
265 |
+
self.db_subset_schema = self.__merge_dict(
|
266 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
267 |
+
self.DB_SUBSET_DISTINCT_SCHEMA,
|
268 |
+
self.DB_SUBSET_ASCENDABLE_SCHEMA,
|
269 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
270 |
+
)
|
271 |
+
|
272 |
+
self.ft_subset_schema = self.__merge_dict(
|
273 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
274 |
+
self.FT_SUBSET_DISTINCT_SCHEMA,
|
275 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
276 |
+
)
|
277 |
+
|
278 |
+
self.cn_subset_schema = self.__merge_dict(
|
279 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
280 |
+
self.CN_SUBSET_DISTINCT_SCHEMA,
|
281 |
+
self.CN_SUBSET_ASCENDABLE_SCHEMA,
|
282 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
283 |
+
)
|
284 |
+
|
285 |
+
self.db_dataset_schema = self.__merge_dict(
|
286 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
287 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
288 |
+
self.DB_SUBSET_ASCENDABLE_SCHEMA,
|
289 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
290 |
+
{"subsets": [self.db_subset_schema]},
|
291 |
+
)
|
292 |
+
|
293 |
+
self.ft_dataset_schema = self.__merge_dict(
|
294 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
295 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
296 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
297 |
+
{"subsets": [self.ft_subset_schema]},
|
298 |
+
)
|
299 |
+
|
300 |
+
self.cn_dataset_schema = self.__merge_dict(
|
301 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
302 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
303 |
+
self.CN_SUBSET_ASCENDABLE_SCHEMA,
|
304 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
305 |
+
{"subsets": [self.cn_subset_schema]},
|
306 |
+
)
|
307 |
+
|
308 |
+
if support_dreambooth and support_finetuning:
|
309 |
+
|
310 |
+
def validate_flex_dataset(dataset_config: dict):
|
311 |
+
subsets_config = dataset_config.get("subsets", [])
|
312 |
+
|
313 |
+
if support_controlnet and all(["conditioning_data_dir" in subset for subset in subsets_config]):
|
314 |
+
return Schema(self.cn_dataset_schema)(dataset_config)
|
315 |
+
# check dataset meets FT style
|
316 |
+
# NOTE: all FT subsets should have "metadata_file"
|
317 |
+
elif all(["metadata_file" in subset for subset in subsets_config]):
|
318 |
+
return Schema(self.ft_dataset_schema)(dataset_config)
|
319 |
+
# check dataset meets DB style
|
320 |
+
# NOTE: all DB subsets should have no "metadata_file"
|
321 |
+
elif all(["metadata_file" not in subset for subset in subsets_config]):
|
322 |
+
return Schema(self.db_dataset_schema)(dataset_config)
|
323 |
+
else:
|
324 |
+
raise voluptuous.Invalid(
|
325 |
+
"DreamBooth subset and fine tuning subset cannot be mixed in the same dataset. Please split them into separate datasets. / DreamBoothのサブセットとfine tuninのサブセットを同一のデータセットに混在させることはできません。別々のデータセットに分割してください。"
|
326 |
+
)
|
327 |
+
|
328 |
+
self.dataset_schema = validate_flex_dataset
|
329 |
+
elif support_dreambooth:
|
330 |
+
if support_controlnet:
|
331 |
+
self.dataset_schema = self.cn_dataset_schema
|
332 |
+
else:
|
333 |
+
self.dataset_schema = self.db_dataset_schema
|
334 |
+
elif support_finetuning:
|
335 |
+
self.dataset_schema = self.ft_dataset_schema
|
336 |
+
elif support_controlnet:
|
337 |
+
self.dataset_schema = self.cn_dataset_schema
|
338 |
+
|
339 |
+
self.general_schema = self.__merge_dict(
|
340 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
341 |
+
self.SUBSET_ASCENDABLE_SCHEMA,
|
342 |
+
self.DB_SUBSET_ASCENDABLE_SCHEMA if support_dreambooth else {},
|
343 |
+
self.CN_SUBSET_ASCENDABLE_SCHEMA if support_controlnet else {},
|
344 |
+
self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},
|
345 |
+
)
|
346 |
+
|
347 |
+
self.user_config_validator = Schema(
|
348 |
+
{
|
349 |
+
"general": self.general_schema,
|
350 |
+
"datasets": [self.dataset_schema],
|
351 |
+
}
|
352 |
+
)
|
353 |
+
|
354 |
+
self.argparse_schema = self.__merge_dict(
|
355 |
+
self.general_schema,
|
356 |
+
self.ARGPARSE_SPECIFIC_SCHEMA,
|
357 |
+
{optname: Any(None, self.general_schema[optname]) for optname in self.ARGPARSE_NULLABLE_OPTNAMES},
|
358 |
+
{a_name: self.general_schema[c_name] for a_name, c_name in self.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME.items()},
|
359 |
+
)
|
360 |
+
|
361 |
+
self.argparse_config_validator = Schema(Object(self.argparse_schema), extra=voluptuous.ALLOW_EXTRA)
|
362 |
+
|
363 |
+
def sanitize_user_config(self, user_config: dict) -> dict:
|
364 |
+
try:
|
365 |
+
return self.user_config_validator(user_config)
|
366 |
+
except MultipleInvalid:
|
367 |
+
# TODO: エラー発生時のメッセージをわかりやすくする
|
368 |
+
logger.error("Invalid user config / ユーザ設定の形式が正しくないようです")
|
369 |
+
raise
|
370 |
+
|
371 |
+
# NOTE: In nature, argument parser result is not needed to be sanitize
|
372 |
+
# However this will help us to detect program bug
|
373 |
+
def sanitize_argparse_namespace(self, argparse_namespace: argparse.Namespace) -> argparse.Namespace:
|
374 |
+
try:
|
375 |
+
return self.argparse_config_validator(argparse_namespace)
|
376 |
+
except MultipleInvalid:
|
377 |
+
# XXX: this should be a bug
|
378 |
+
logger.error(
|
379 |
+
"Invalid cmdline parsed arguments. This should be a bug. / コマンドラインのパース結果が正しくないようです。プログラムのバグの可能性が高いです。"
|
380 |
+
)
|
381 |
+
raise
|
382 |
+
|
383 |
+
# NOTE: value would be overwritten by latter dict if there is already the same key
|
384 |
+
@staticmethod
|
385 |
+
def __merge_dict(*dict_list: dict) -> dict:
|
386 |
+
merged = {}
|
387 |
+
for schema in dict_list:
|
388 |
+
# merged |= schema
|
389 |
+
for k, v in schema.items():
|
390 |
+
merged[k] = v
|
391 |
+
return merged
|
392 |
+
|
393 |
+
|
394 |
+
class BlueprintGenerator:
|
395 |
+
BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME = {}
|
396 |
+
|
397 |
+
def __init__(self, sanitizer: ConfigSanitizer):
|
398 |
+
self.sanitizer = sanitizer
|
399 |
+
|
400 |
+
# runtime_params is for parameters which is only configurable on runtime, such as tokenizer
|
401 |
+
def generate(self, user_config: dict, argparse_namespace: argparse.Namespace, **runtime_params) -> Blueprint:
|
402 |
+
sanitized_user_config = self.sanitizer.sanitize_user_config(user_config)
|
403 |
+
sanitized_argparse_namespace = self.sanitizer.sanitize_argparse_namespace(argparse_namespace)
|
404 |
+
|
405 |
+
# convert argparse namespace to dict like config
|
406 |
+
# NOTE: it is ok to have extra entries in dict
|
407 |
+
optname_map = self.sanitizer.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME
|
408 |
+
argparse_config = {
|
409 |
+
optname_map.get(optname, optname): value for optname, value in vars(sanitized_argparse_namespace).items()
|
410 |
+
}
|
411 |
+
|
412 |
+
general_config = sanitized_user_config.get("general", {})
|
413 |
+
|
414 |
+
dataset_blueprints = []
|
415 |
+
for dataset_config in sanitized_user_config.get("datasets", []):
|
416 |
+
# NOTE: if subsets have no "metadata_file", these are DreamBooth datasets/subsets
|
417 |
+
subsets = dataset_config.get("subsets", [])
|
418 |
+
is_dreambooth = all(["metadata_file" not in subset for subset in subsets])
|
419 |
+
is_controlnet = all(["conditioning_data_dir" in subset for subset in subsets])
|
420 |
+
if is_controlnet:
|
421 |
+
subset_params_klass = ControlNetSubsetParams
|
422 |
+
dataset_params_klass = ControlNetDatasetParams
|
423 |
+
elif is_dreambooth:
|
424 |
+
subset_params_klass = DreamBoothSubsetParams
|
425 |
+
dataset_params_klass = DreamBoothDatasetParams
|
426 |
+
else:
|
427 |
+
subset_params_klass = FineTuningSubsetParams
|
428 |
+
dataset_params_klass = FineTuningDatasetParams
|
429 |
+
|
430 |
+
subset_blueprints = []
|
431 |
+
for subset_config in subsets:
|
432 |
+
params = self.generate_params_by_fallbacks(
|
433 |
+
subset_params_klass, [subset_config, dataset_config, general_config, argparse_config, runtime_params]
|
434 |
+
)
|
435 |
+
subset_blueprints.append(SubsetBlueprint(params))
|
436 |
+
|
437 |
+
params = self.generate_params_by_fallbacks(
|
438 |
+
dataset_params_klass, [dataset_config, general_config, argparse_config, runtime_params]
|
439 |
+
)
|
440 |
+
dataset_blueprints.append(DatasetBlueprint(is_dreambooth, is_controlnet, params, subset_blueprints))
|
441 |
+
|
442 |
+
dataset_group_blueprint = DatasetGroupBlueprint(dataset_blueprints)
|
443 |
+
|
444 |
+
return Blueprint(dataset_group_blueprint)
|
445 |
+
|
446 |
+
@staticmethod
|
447 |
+
def generate_params_by_fallbacks(param_klass, fallbacks: Sequence[dict]):
|
448 |
+
name_map = BlueprintGenerator.BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME
|
449 |
+
search_value = BlueprintGenerator.search_value
|
450 |
+
default_params = asdict(param_klass())
|
451 |
+
param_names = default_params.keys()
|
452 |
+
|
453 |
+
params = {name: search_value(name_map.get(name, name), fallbacks, default_params.get(name)) for name in param_names}
|
454 |
+
|
455 |
+
return param_klass(**params)
|
456 |
+
|
457 |
+
@staticmethod
|
458 |
+
def search_value(key: str, fallbacks: Sequence[dict], default_value=None):
|
459 |
+
for cand in fallbacks:
|
460 |
+
value = cand.get(key)
|
461 |
+
if value is not None:
|
462 |
+
return value
|
463 |
+
|
464 |
+
return default_value
|
465 |
+
|
466 |
+
|
467 |
+
def generate_dataset_group_by_blueprint(dataset_group_blueprint: DatasetGroupBlueprint):
|
468 |
+
datasets: List[Union[DreamBoothDataset, FineTuningDataset, ControlNetDataset]] = []
|
469 |
+
|
470 |
+
for dataset_blueprint in dataset_group_blueprint.datasets:
|
471 |
+
if dataset_blueprint.is_controlnet:
|
472 |
+
subset_klass = ControlNetSubset
|
473 |
+
dataset_klass = ControlNetDataset
|
474 |
+
elif dataset_blueprint.is_dreambooth:
|
475 |
+
subset_klass = DreamBoothSubset
|
476 |
+
dataset_klass = DreamBoothDataset
|
477 |
+
else:
|
478 |
+
subset_klass = FineTuningSubset
|
479 |
+
dataset_klass = FineTuningDataset
|
480 |
+
|
481 |
+
subsets = [subset_klass(**asdict(subset_blueprint.params)) for subset_blueprint in dataset_blueprint.subsets]
|
482 |
+
dataset = dataset_klass(subsets=subsets, **asdict(dataset_blueprint.params))
|
483 |
+
datasets.append(dataset)
|
484 |
+
|
485 |
+
# print info
|
486 |
+
info = ""
|
487 |
+
for i, dataset in enumerate(datasets):
|
488 |
+
is_dreambooth = isinstance(dataset, DreamBoothDataset)
|
489 |
+
is_controlnet = isinstance(dataset, ControlNetDataset)
|
490 |
+
info += dedent(
|
491 |
+
f"""\
|
492 |
+
[Dataset {i}]
|
493 |
+
batch_size: {dataset.batch_size}
|
494 |
+
resolution: {(dataset.width, dataset.height)}
|
495 |
+
enable_bucket: {dataset.enable_bucket}
|
496 |
+
network_multiplier: {dataset.network_multiplier}
|
497 |
+
"""
|
498 |
+
)
|
499 |
+
|
500 |
+
if dataset.enable_bucket:
|
501 |
+
info += indent(
|
502 |
+
dedent(
|
503 |
+
f"""\
|
504 |
+
min_bucket_reso: {dataset.min_bucket_reso}
|
505 |
+
max_bucket_reso: {dataset.max_bucket_reso}
|
506 |
+
bucket_reso_steps: {dataset.bucket_reso_steps}
|
507 |
+
bucket_no_upscale: {dataset.bucket_no_upscale}
|
508 |
+
\n"""
|
509 |
+
),
|
510 |
+
" ",
|
511 |
+
)
|
512 |
+
else:
|
513 |
+
info += "\n"
|
514 |
+
|
515 |
+
for j, subset in enumerate(dataset.subsets):
|
516 |
+
info += indent(
|
517 |
+
dedent(
|
518 |
+
f"""\
|
519 |
+
[Subset {j} of Dataset {i}]
|
520 |
+
image_dir: "{subset.image_dir}"
|
521 |
+
image_count: {subset.img_count}
|
522 |
+
num_repeats: {subset.num_repeats}
|
523 |
+
shuffle_caption: {subset.shuffle_caption}
|
524 |
+
keep_tokens: {subset.keep_tokens}
|
525 |
+
keep_tokens_separator: {subset.keep_tokens_separator}
|
526 |
+
secondary_separator: {subset.secondary_separator}
|
527 |
+
enable_wildcard: {subset.enable_wildcard}
|
528 |
+
caption_dropout_rate: {subset.caption_dropout_rate}
|
529 |
+
caption_dropout_every_n_epoches: {subset.caption_dropout_every_n_epochs}
|
530 |
+
caption_tag_dropout_rate: {subset.caption_tag_dropout_rate}
|
531 |
+
caption_prefix: {subset.caption_prefix}
|
532 |
+
caption_suffix: {subset.caption_suffix}
|
533 |
+
color_aug: {subset.color_aug}
|
534 |
+
flip_aug: {subset.flip_aug}
|
535 |
+
face_crop_aug_range: {subset.face_crop_aug_range}
|
536 |
+
random_crop: {subset.random_crop}
|
537 |
+
token_warmup_min: {subset.token_warmup_min},
|
538 |
+
token_warmup_step: {subset.token_warmup_step},
|
539 |
+
"""
|
540 |
+
),
|
541 |
+
" ",
|
542 |
+
)
|
543 |
+
|
544 |
+
if is_dreambooth:
|
545 |
+
info += indent(
|
546 |
+
dedent(
|
547 |
+
f"""\
|
548 |
+
is_reg: {subset.is_reg}
|
549 |
+
class_tokens: {subset.class_tokens}
|
550 |
+
caption_extension: {subset.caption_extension}
|
551 |
+
\n"""
|
552 |
+
),
|
553 |
+
" ",
|
554 |
+
)
|
555 |
+
elif not is_controlnet:
|
556 |
+
info += indent(
|
557 |
+
dedent(
|
558 |
+
f"""\
|
559 |
+
metadata_file: {subset.metadata_file}
|
560 |
+
\n"""
|
561 |
+
),
|
562 |
+
" ",
|
563 |
+
)
|
564 |
+
|
565 |
+
logger.info(f"{info}")
|
566 |
+
|
567 |
+
# make buckets first because it determines the length of dataset
|
568 |
+
# and set the same seed for all datasets
|
569 |
+
seed = random.randint(0, 2**31) # actual seed is seed + epoch_no
|
570 |
+
for i, dataset in enumerate(datasets):
|
571 |
+
logger.info(f"[Dataset {i}]")
|
572 |
+
dataset.make_buckets()
|
573 |
+
dataset.set_seed(seed)
|
574 |
+
|
575 |
+
return DatasetGroup(datasets)
|
576 |
+
|
577 |
+
|
578 |
+
def generate_dreambooth_subsets_config_by_subdirs(train_data_dir: Optional[str] = None, reg_data_dir: Optional[str] = None):
|
579 |
+
def extract_dreambooth_params(name: str) -> Tuple[int, str]:
|
580 |
+
tokens = name.split("_")
|
581 |
+
try:
|
582 |
+
n_repeats = int(tokens[0])
|
583 |
+
except ValueError as e:
|
584 |
+
logger.warning(f"ignore directory without repeats / 繰り返し回数のないディレクトリを無視します: {name}")
|
585 |
+
return 0, ""
|
586 |
+
caption_by_folder = "_".join(tokens[1:])
|
587 |
+
return n_repeats, caption_by_folder
|
588 |
+
|
589 |
+
def generate(base_dir: Optional[str], is_reg: bool):
|
590 |
+
if base_dir is None:
|
591 |
+
return []
|
592 |
+
|
593 |
+
base_dir: Path = Path(base_dir)
|
594 |
+
if not base_dir.is_dir():
|
595 |
+
return []
|
596 |
+
|
597 |
+
subsets_config = []
|
598 |
+
for subdir in base_dir.iterdir():
|
599 |
+
if not subdir.is_dir():
|
600 |
+
continue
|
601 |
+
|
602 |
+
num_repeats, class_tokens = extract_dreambooth_params(subdir.name)
|
603 |
+
if num_repeats < 1:
|
604 |
+
continue
|
605 |
+
|
606 |
+
subset_config = {"image_dir": str(subdir), "num_repeats": num_repeats, "is_reg": is_reg, "class_tokens": class_tokens}
|
607 |
+
subsets_config.append(subset_config)
|
608 |
+
|
609 |
+
return subsets_config
|
610 |
+
|
611 |
+
subsets_config = []
|
612 |
+
subsets_config += generate(train_data_dir, False)
|
613 |
+
subsets_config += generate(reg_data_dir, True)
|
614 |
+
|
615 |
+
return subsets_config
|
616 |
+
|
617 |
+
|
618 |
+
def generate_controlnet_subsets_config_by_subdirs(
|
619 |
+
train_data_dir: Optional[str] = None, conditioning_data_dir: Optional[str] = None, caption_extension: str = ".txt"
|
620 |
+
):
|
621 |
+
def generate(base_dir: Optional[str]):
|
622 |
+
if base_dir is None:
|
623 |
+
return []
|
624 |
+
|
625 |
+
base_dir: Path = Path(base_dir)
|
626 |
+
if not base_dir.is_dir():
|
627 |
+
return []
|
628 |
+
|
629 |
+
subsets_config = []
|
630 |
+
subset_config = {
|
631 |
+
"image_dir": train_data_dir,
|
632 |
+
"conditioning_data_dir": conditioning_data_dir,
|
633 |
+
"caption_extension": caption_extension,
|
634 |
+
"num_repeats": 1,
|
635 |
+
}
|
636 |
+
subsets_config.append(subset_config)
|
637 |
+
|
638 |
+
return subsets_config
|
639 |
+
|
640 |
+
subsets_config = []
|
641 |
+
subsets_config += generate(train_data_dir)
|
642 |
+
|
643 |
+
return subsets_config
|
644 |
+
|
645 |
+
|
646 |
+
def load_user_config(file: str) -> dict:
|
647 |
+
file: Path = Path(file)
|
648 |
+
if not file.is_file():
|
649 |
+
raise ValueError(f"file not found / ファイルが見つかりません: {file}")
|
650 |
+
|
651 |
+
if file.name.lower().endswith(".json"):
|
652 |
+
try:
|
653 |
+
with open(file, "r") as f:
|
654 |
+
config = json.load(f)
|
655 |
+
except Exception:
|
656 |
+
logger.error(
|
657 |
+
f"Error on parsing JSON config file. Please check the format. / JSON 形式の設定ファイルの読み込みに失敗しました。文法が正しいか確認してください。: {file}"
|
658 |
+
)
|
659 |
+
raise
|
660 |
+
elif file.name.lower().endswith(".toml"):
|
661 |
+
try:
|
662 |
+
config = toml.load(file)
|
663 |
+
except Exception:
|
664 |
+
logger.error(
|
665 |
+
f"Error on parsing TOML config file. Please check the format. / TOML 形式の設定ファイルの読み込みに失敗しました。文法が正しいか確認してください。: {file}"
|
666 |
+
)
|
667 |
+
raise
|
668 |
+
else:
|
669 |
+
raise ValueError(f"not supported config file format / 対応していない設定ファイルの形式です: {file}")
|
670 |
+
|
671 |
+
return config
|
672 |
+
|
673 |
+
|
674 |
+
# for config test
|
675 |
+
if __name__ == "__main__":
|
676 |
+
parser = argparse.ArgumentParser()
|
677 |
+
parser.add_argument("--support_dreambooth", action="store_true")
|
678 |
+
parser.add_argument("--support_finetuning", action="store_true")
|
679 |
+
parser.add_argument("--support_controlnet", action="store_true")
|
680 |
+
parser.add_argument("--support_dropout", action="store_true")
|
681 |
+
parser.add_argument("dataset_config")
|
682 |
+
config_args, remain = parser.parse_known_args()
|
683 |
+
|
684 |
+
parser = argparse.ArgumentParser()
|
685 |
+
train_util.add_dataset_arguments(
|
686 |
+
parser, config_args.support_dreambooth, config_args.support_finetuning, config_args.support_dropout
|
687 |
+
)
|
688 |
+
train_util.add_training_arguments(parser, config_args.support_dreambooth)
|
689 |
+
argparse_namespace = parser.parse_args(remain)
|
690 |
+
train_util.prepare_dataset_args(argparse_namespace, config_args.support_finetuning)
|
691 |
+
|
692 |
+
logger.info("[argparse_namespace]")
|
693 |
+
logger.info(f"{vars(argparse_namespace)}")
|
694 |
+
|
695 |
+
user_config = load_user_config(config_args.dataset_config)
|
696 |
+
|
697 |
+
logger.info("")
|
698 |
+
logger.info("[user_config]")
|
699 |
+
logger.info(f"{user_config}")
|
700 |
+
|
701 |
+
sanitizer = ConfigSanitizer(
|
702 |
+
config_args.support_dreambooth, config_args.support_finetuning, config_args.support_controlnet, config_args.support_dropout
|
703 |
+
)
|
704 |
+
sanitized_user_config = sanitizer.sanitize_user_config(user_config)
|
705 |
+
|
706 |
+
logger.info("")
|
707 |
+
logger.info("[sanitized_user_config]")
|
708 |
+
logger.info(f"{sanitized_user_config}")
|
709 |
+
|
710 |
+
blueprint = BlueprintGenerator(sanitizer).generate(user_config, argparse_namespace)
|
711 |
+
|
712 |
+
logger.info("")
|
713 |
+
logger.info("[blueprint]")
|
714 |
+
logger.info(f"{blueprint}")
|