|
import random |
|
from collections import OrderedDict |
|
from torch.utils.data import DataLoader |
|
from toolkit.prompt_utils import concat_prompt_embeds, split_prompt_embeds |
|
from toolkit.stable_diffusion_model import StableDiffusion, BlankNetwork |
|
from toolkit.train_tools import get_torch_dtype, apply_snr_weight |
|
import gc |
|
import torch |
|
from jobs.process import BaseSDTrainProcess |
|
|
|
|
|
def flush(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
|
|
class ConceptReplacementConfig: |
|
def __init__(self, **kwargs): |
|
self.concept: str = kwargs.get('concept', '') |
|
self.replacement: str = kwargs.get('replacement', '') |
|
|
|
|
|
class ConceptReplacer(BaseSDTrainProcess): |
|
|
|
def __init__(self, process_id: int, job, config: OrderedDict, **kwargs): |
|
super().__init__(process_id, job, config, **kwargs) |
|
replacement_list = self.config.get('replacements', []) |
|
self.replacement_list = [ConceptReplacementConfig(**x) for x in replacement_list] |
|
|
|
def before_model_load(self): |
|
pass |
|
|
|
def hook_before_train_loop(self): |
|
self.sd.vae.eval() |
|
self.sd.vae.to(self.device_torch) |
|
|
|
|
|
if self.embedding is not None: |
|
|
|
self.sd.text_encoder.train() |
|
|
|
def hook_train_loop(self, batch): |
|
with torch.no_grad(): |
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
noisy_latents, noise, timesteps, conditioned_prompts, imgs = self.process_general_training_batch(batch) |
|
network_weight_list = batch.get_network_weight_list() |
|
|
|
|
|
if self.network is not None: |
|
network = self.network |
|
else: |
|
network = BlankNetwork() |
|
|
|
batch_replacement_list = [] |
|
|
|
for prompt in conditioned_prompts: |
|
replacement = random.choice(self.replacement_list) |
|
batch_replacement_list.append(replacement) |
|
|
|
|
|
concept_prompts = [] |
|
replacement_prompts = [] |
|
for idx, replacement in enumerate(batch_replacement_list): |
|
prompt = conditioned_prompts[idx] |
|
|
|
|
|
shuffled_concept = [x.strip() for x in replacement.concept.split(',')] |
|
random.shuffle(shuffled_concept) |
|
shuffled_concept = ', '.join(shuffled_concept) |
|
concept_prompts.append(f"{shuffled_concept}, {prompt}, {shuffled_concept}") |
|
|
|
|
|
shuffled_replacement = [x.strip() for x in replacement.replacement.split(',')] |
|
random.shuffle(shuffled_replacement) |
|
shuffled_replacement = ', '.join(shuffled_replacement) |
|
replacement_prompts.append(f"{shuffled_replacement}, {prompt}, {shuffled_replacement}") |
|
|
|
|
|
conditional_embeds = self.sd.encode_prompt(replacement_prompts).to(self.device_torch, dtype=dtype) |
|
|
|
replacement_pred = self.sd.predict_noise( |
|
latents=noisy_latents.to(self.device_torch, dtype=dtype), |
|
conditional_embeddings=conditional_embeds.to(self.device_torch, dtype=dtype), |
|
timestep=timesteps, |
|
guidance_scale=1.0, |
|
) |
|
|
|
del conditional_embeds |
|
replacement_pred = replacement_pred.detach() |
|
|
|
self.optimizer.zero_grad() |
|
flush() |
|
|
|
|
|
grad_on_text_encoder = False |
|
if self.train_config.train_text_encoder: |
|
grad_on_text_encoder = True |
|
|
|
if self.embedding: |
|
grad_on_text_encoder = True |
|
|
|
|
|
network.multiplier = network_weight_list |
|
|
|
|
|
with network: |
|
with torch.set_grad_enabled(grad_on_text_encoder): |
|
|
|
conditional_embeds = self.sd.encode_prompt(concept_prompts).to(self.device_torch, dtype=dtype) |
|
if not grad_on_text_encoder: |
|
|
|
conditional_embeds = conditional_embeds.detach() |
|
self.optimizer.zero_grad() |
|
flush() |
|
|
|
noise_pred = self.sd.predict_noise( |
|
latents=noisy_latents.to(self.device_torch, dtype=dtype), |
|
conditional_embeddings=conditional_embeds.to(self.device_torch, dtype=dtype), |
|
timestep=timesteps, |
|
guidance_scale=1.0, |
|
) |
|
|
|
loss = torch.nn.functional.mse_loss(noise_pred.float(), replacement_pred.float(), reduction="none") |
|
loss = loss.mean([1, 2, 3]) |
|
|
|
if self.train_config.min_snr_gamma is not None and self.train_config.min_snr_gamma > 0.000001: |
|
|
|
loss = apply_snr_weight(loss, timesteps, self.sd.noise_scheduler, self.train_config.min_snr_gamma) |
|
|
|
loss = loss.mean() |
|
|
|
|
|
loss.backward() |
|
flush() |
|
|
|
|
|
self.optimizer.step() |
|
self.optimizer.zero_grad() |
|
self.lr_scheduler.step() |
|
|
|
if self.embedding is not None: |
|
|
|
self.embedding.restore_embeddings() |
|
|
|
loss_dict = OrderedDict( |
|
{'loss': loss.item()} |
|
) |
|
|
|
network.multiplier = 1.0 |
|
|
|
return loss_dict |
|
|