|
import copy |
|
import random |
|
from collections import OrderedDict |
|
import os |
|
from contextlib import nullcontext |
|
from typing import Optional, Union, List |
|
from torch.utils.data import ConcatDataset, DataLoader |
|
|
|
from toolkit.config_modules import ReferenceDatasetConfig |
|
from toolkit.data_loader import PairedImageDataset |
|
from toolkit.prompt_utils import concat_prompt_embeds, split_prompt_embeds, build_latent_image_batch_for_prompt_pair |
|
from toolkit.stable_diffusion_model import StableDiffusion, PromptEmbeds |
|
from toolkit.train_tools import get_torch_dtype, apply_snr_weight |
|
import gc |
|
from toolkit import train_tools |
|
import torch |
|
from jobs.process import BaseSDTrainProcess |
|
import random |
|
|
|
import random |
|
from collections import OrderedDict |
|
from tqdm import tqdm |
|
|
|
from toolkit.config_modules import SliderConfig |
|
from toolkit.train_tools import get_torch_dtype, apply_snr_weight |
|
import gc |
|
from toolkit import train_tools |
|
from toolkit.prompt_utils import \ |
|
EncodedPromptPair, ACTION_TYPES_SLIDER, \ |
|
EncodedAnchor, concat_prompt_pairs, \ |
|
concat_anchors, PromptEmbedsCache, encode_prompts_to_cache, build_prompt_pair_batch_from_cache, split_anchors, \ |
|
split_prompt_pairs |
|
|
|
import torch |
|
|
|
|
|
def flush(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
|
|
class UltimateSliderConfig(SliderConfig): |
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
self.additional_losses: List[str] = kwargs.get('additional_losses', []) |
|
self.weight_jitter: float = kwargs.get('weight_jitter', 0.0) |
|
self.img_loss_weight: float = kwargs.get('img_loss_weight', 1.0) |
|
self.cfg_loss_weight: float = kwargs.get('cfg_loss_weight', 1.0) |
|
self.datasets: List[ReferenceDatasetConfig] = [ReferenceDatasetConfig(**d) for d in kwargs.get('datasets', [])] |
|
|
|
|
|
class UltimateSliderTrainerProcess(BaseSDTrainProcess): |
|
sd: StableDiffusion |
|
data_loader: DataLoader = None |
|
|
|
def __init__(self, process_id: int, job, config: OrderedDict, **kwargs): |
|
super().__init__(process_id, job, config, **kwargs) |
|
self.prompt_txt_list = None |
|
self.step_num = 0 |
|
self.start_step = 0 |
|
self.device = self.get_conf('device', self.job.device) |
|
self.device_torch = torch.device(self.device) |
|
self.slider_config = UltimateSliderConfig(**self.get_conf('slider', {})) |
|
|
|
self.prompt_cache = PromptEmbedsCache() |
|
self.prompt_pairs: list[EncodedPromptPair] = [] |
|
self.anchor_pairs: list[EncodedAnchor] = [] |
|
|
|
self.prompt_chunk_size = 1 |
|
|
|
|
|
self.dataset_prompts = [] |
|
self.train_with_dataset = self.slider_config.datasets is not None and len(self.slider_config.datasets) > 0 |
|
|
|
def load_datasets(self): |
|
if self.data_loader is None and \ |
|
self.slider_config.datasets is not None and len(self.slider_config.datasets) > 0: |
|
print(f"Loading datasets") |
|
datasets = [] |
|
for dataset in self.slider_config.datasets: |
|
print(f" - Dataset: {dataset.pair_folder}") |
|
config = { |
|
'path': dataset.pair_folder, |
|
'size': dataset.size, |
|
'default_prompt': dataset.target_class, |
|
'network_weight': dataset.network_weight, |
|
'pos_weight': dataset.pos_weight, |
|
'neg_weight': dataset.neg_weight, |
|
'pos_folder': dataset.pos_folder, |
|
'neg_folder': dataset.neg_folder, |
|
} |
|
image_dataset = PairedImageDataset(config) |
|
datasets.append(image_dataset) |
|
|
|
|
|
self.dataset_prompts += image_dataset.get_all_prompts() |
|
|
|
concatenated_dataset = ConcatDataset(datasets) |
|
self.data_loader = DataLoader( |
|
concatenated_dataset, |
|
batch_size=self.train_config.batch_size, |
|
shuffle=True, |
|
num_workers=2 |
|
) |
|
|
|
def before_model_load(self): |
|
pass |
|
|
|
def hook_before_train_loop(self): |
|
|
|
self.load_datasets() |
|
|
|
|
|
if self.slider_config.prompt_file: |
|
self.print(f"Loading prompt file from {self.slider_config.prompt_file}") |
|
with open(self.slider_config.prompt_file, 'r', encoding='utf-8') as f: |
|
self.prompt_txt_list = f.readlines() |
|
|
|
self.prompt_txt_list = [line.strip() for line in self.prompt_txt_list if len(line.strip()) > 0] |
|
|
|
self.print(f"Found {len(self.prompt_txt_list)} prompts.") |
|
|
|
if not self.slider_config.prompt_tensors: |
|
print(f"Prompt tensors not found. Building prompt tensors for {self.train_config.steps} steps.") |
|
|
|
random.shuffle(self.prompt_txt_list) |
|
|
|
self.prompt_txt_list = self.prompt_txt_list[:self.train_config.steps] |
|
|
|
|
|
cache = PromptEmbedsCache() |
|
|
|
|
|
with torch.no_grad(): |
|
|
|
neutral_list = self.prompt_txt_list if self.prompt_txt_list is not None else [""] |
|
|
|
|
|
prompts_to_cache = [] |
|
for neutral in neutral_list: |
|
for target in self.slider_config.targets: |
|
prompt_list = [ |
|
f"{target.target_class}", |
|
f"{target.target_class} {neutral}", |
|
f"{target.positive}", |
|
f"{target.positive} {neutral}", |
|
f"{target.negative}", |
|
f"{target.negative} {neutral}", |
|
f"{neutral}", |
|
f"{target.positive} {target.negative}", |
|
f"{target.negative} {target.positive}", |
|
] |
|
prompts_to_cache += prompt_list |
|
|
|
|
|
prompts_to_cache = list(dict.fromkeys(prompts_to_cache)) |
|
|
|
|
|
prompts_to_cache = prompts_to_cache[:self.train_config.steps] |
|
|
|
if len(self.dataset_prompts) > 0: |
|
|
|
prompts_to_cache += self.dataset_prompts |
|
|
|
|
|
cache = encode_prompts_to_cache( |
|
prompt_list=prompts_to_cache, |
|
sd=self.sd, |
|
cache=cache, |
|
prompt_tensor_file=self.slider_config.prompt_tensors |
|
) |
|
|
|
prompt_pairs = [] |
|
prompt_batches = [] |
|
for neutral in tqdm(neutral_list, desc="Building Prompt Pairs", leave=False): |
|
for target in self.slider_config.targets: |
|
prompt_pair_batch = build_prompt_pair_batch_from_cache( |
|
cache=cache, |
|
target=target, |
|
neutral=neutral, |
|
|
|
) |
|
if self.slider_config.batch_full_slide: |
|
|
|
|
|
self.prompt_chunk_size = 4 |
|
concat_prompt_pair_batch = concat_prompt_pairs(prompt_pair_batch).to('cpu') |
|
prompt_pairs += [concat_prompt_pair_batch] |
|
else: |
|
self.prompt_chunk_size = 1 |
|
|
|
prompt_pairs += [x.to('cpu') for x in prompt_pair_batch] |
|
|
|
|
|
|
|
|
|
if isinstance(self.sd.text_encoder, list): |
|
for encoder in self.sd.text_encoder: |
|
encoder.to("cpu") |
|
else: |
|
self.sd.text_encoder.to("cpu") |
|
self.prompt_cache = cache |
|
self.prompt_pairs = prompt_pairs |
|
|
|
|
|
|
|
|
|
self.sd.vae.to(self.device_torch) |
|
self.sd.vae.eval() |
|
self.sd.vae.requires_grad_(False) |
|
|
|
if self.train_config.gradient_checkpointing: |
|
|
|
self.sd.unet.enable_gradient_checkpointing() |
|
|
|
flush() |
|
|
|
|
|
def hook_train_loop(self, batch): |
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
|
|
with torch.no_grad(): |
|
|
|
noise_scheduler = self.sd.noise_scheduler |
|
optimizer = self.optimizer |
|
lr_scheduler = self.lr_scheduler |
|
|
|
|
|
|
|
prompt_pair: EncodedPromptPair = self.prompt_pairs[ |
|
torch.randint(0, len(self.prompt_pairs), (1,)).item() |
|
] |
|
|
|
prompt_pair.to(self.device_torch, dtype=dtype) |
|
|
|
|
|
|
|
imgs, prompts, network_weights = batch |
|
network_pos_weight, network_neg_weight = network_weights |
|
|
|
if isinstance(network_pos_weight, torch.Tensor): |
|
network_pos_weight = network_pos_weight.item() |
|
if isinstance(network_neg_weight, torch.Tensor): |
|
network_neg_weight = network_neg_weight.item() |
|
|
|
|
|
weight_jitter = self.slider_config.weight_jitter |
|
if weight_jitter > 0.0: |
|
jitter_list = random.uniform(-weight_jitter, weight_jitter) |
|
network_pos_weight += jitter_list |
|
network_neg_weight += (jitter_list * -1.0) |
|
|
|
|
|
imgs: torch.Tensor = imgs.to(self.device_torch, dtype=dtype) |
|
|
|
negative_images, positive_images = torch.chunk(imgs, 2, dim=3) |
|
|
|
height = positive_images.shape[2] |
|
width = positive_images.shape[3] |
|
batch_size = positive_images.shape[0] |
|
|
|
positive_latents = self.sd.encode_images(positive_images) |
|
negative_latents = self.sd.encode_images(negative_images) |
|
|
|
self.sd.noise_scheduler.set_timesteps( |
|
self.train_config.max_denoising_steps, device=self.device_torch |
|
) |
|
|
|
timesteps = torch.randint(0, self.train_config.max_denoising_steps, (1,), device=self.device_torch) |
|
current_timestep_index = timesteps.item() |
|
current_timestep = noise_scheduler.timesteps[current_timestep_index] |
|
timesteps = timesteps.long() |
|
|
|
|
|
noise_positive = self.sd.get_latent_noise( |
|
pixel_height=height, |
|
pixel_width=width, |
|
batch_size=batch_size, |
|
noise_offset=self.train_config.noise_offset, |
|
).to(self.device_torch, dtype=dtype) |
|
|
|
noise_negative = noise_positive.clone() |
|
|
|
|
|
|
|
noisy_positive_latents = noise_scheduler.add_noise(positive_latents, noise_positive, timesteps) |
|
noisy_negative_latents = noise_scheduler.add_noise(negative_latents, noise_negative, timesteps) |
|
|
|
|
|
|
|
|
|
noisy_cfg_latents = build_latent_image_batch_for_prompt_pair( |
|
pos_latent=noisy_positive_latents, |
|
neg_latent=noisy_negative_latents, |
|
prompt_pair=prompt_pair, |
|
prompt_chunk_size=self.prompt_chunk_size, |
|
) |
|
noisy_cfg_latents.requires_grad = False |
|
|
|
assert not self.network.is_active |
|
|
|
|
|
positive_latents = self.sd.predict_noise( |
|
latents=noisy_cfg_latents, |
|
text_embeddings=train_tools.concat_prompt_embeddings( |
|
prompt_pair.positive_target, |
|
prompt_pair.negative_target, |
|
self.train_config.batch_size, |
|
), |
|
timestep=current_timestep, |
|
guidance_scale=1.0 |
|
) |
|
positive_latents.requires_grad = False |
|
|
|
neutral_latents = self.sd.predict_noise( |
|
latents=noisy_cfg_latents, |
|
text_embeddings=train_tools.concat_prompt_embeddings( |
|
prompt_pair.positive_target, |
|
prompt_pair.empty_prompt, |
|
self.train_config.batch_size, |
|
), |
|
timestep=current_timestep, |
|
guidance_scale=1.0 |
|
) |
|
neutral_latents.requires_grad = False |
|
|
|
unconditional_latents = self.sd.predict_noise( |
|
latents=noisy_cfg_latents, |
|
text_embeddings=train_tools.concat_prompt_embeddings( |
|
prompt_pair.positive_target, |
|
prompt_pair.positive_target, |
|
self.train_config.batch_size, |
|
), |
|
timestep=current_timestep, |
|
guidance_scale=1.0 |
|
) |
|
unconditional_latents.requires_grad = False |
|
|
|
positive_latents_chunks = torch.chunk(positive_latents, self.prompt_chunk_size, dim=0) |
|
neutral_latents_chunks = torch.chunk(neutral_latents, self.prompt_chunk_size, dim=0) |
|
unconditional_latents_chunks = torch.chunk(unconditional_latents, self.prompt_chunk_size, dim=0) |
|
prompt_pair_chunks = split_prompt_pairs(prompt_pair, self.prompt_chunk_size) |
|
noisy_cfg_latents_chunks = torch.chunk(noisy_cfg_latents, self.prompt_chunk_size, dim=0) |
|
assert len(prompt_pair_chunks) == len(noisy_cfg_latents_chunks) |
|
|
|
noisy_latents = torch.cat([noisy_positive_latents, noisy_negative_latents], dim=0) |
|
noise = torch.cat([noise_positive, noise_negative], dim=0) |
|
timesteps = torch.cat([timesteps, timesteps], dim=0) |
|
network_multiplier = [network_pos_weight * 1.0, network_neg_weight * -1.0] |
|
|
|
flush() |
|
|
|
loss_float = None |
|
loss_mirror_float = None |
|
|
|
self.optimizer.zero_grad() |
|
noisy_latents.requires_grad = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.train_with_dataset: |
|
embedding_list = [] |
|
with torch.set_grad_enabled(self.train_config.train_text_encoder): |
|
for prompt in prompts: |
|
|
|
embedding = self.prompt_cache[prompt] |
|
embedding = embedding.to(self.device_torch, dtype=dtype) |
|
embedding_list.append(embedding) |
|
conditional_embeds = concat_prompt_embeds(embedding_list) |
|
|
|
conditional_embeds = concat_prompt_embeds([conditional_embeds, conditional_embeds]) |
|
else: |
|
|
|
raise Exception("Datasets and targets required for ultimate slider") |
|
|
|
if self.model_config.is_xl: |
|
|
|
network_multiplier_list = network_multiplier |
|
noisy_latent_list = torch.chunk(noisy_latents, 2, dim=0) |
|
noise_list = torch.chunk(noise, 2, dim=0) |
|
timesteps_list = torch.chunk(timesteps, 2, dim=0) |
|
conditional_embeds_list = split_prompt_embeds(conditional_embeds) |
|
else: |
|
network_multiplier_list = [network_multiplier] |
|
noisy_latent_list = [noisy_latents] |
|
noise_list = [noise] |
|
timesteps_list = [timesteps] |
|
conditional_embeds_list = [conditional_embeds] |
|
|
|
|
|
|
|
reference_image_losses = [] |
|
|
|
for network_multiplier, noisy_latents, noise, timesteps, conditional_embeds in zip( |
|
network_multiplier_list, noisy_latent_list, noise_list, timesteps_list, conditional_embeds_list |
|
): |
|
with self.network: |
|
assert self.network.is_active |
|
|
|
self.network.multiplier = network_multiplier |
|
|
|
noise_pred = self.sd.predict_noise( |
|
latents=noisy_latents.to(self.device_torch, dtype=dtype), |
|
conditional_embeddings=conditional_embeds.to(self.device_torch, dtype=dtype), |
|
timestep=timesteps, |
|
) |
|
noise = noise.to(self.device_torch, dtype=dtype) |
|
|
|
if self.sd.prediction_type == 'v_prediction': |
|
|
|
target = noise_scheduler.get_velocity(noisy_latents, noise, timesteps) |
|
else: |
|
target = noise |
|
|
|
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none") |
|
loss = loss.mean([1, 2, 3]) |
|
|
|
|
|
if self.train_config.min_snr_gamma is not None and self.train_config.min_snr_gamma > 0.000001: |
|
|
|
loss = apply_snr_weight(loss, timesteps, noise_scheduler, self.train_config.min_snr_gamma) |
|
|
|
loss = loss.mean() |
|
loss = loss * self.slider_config.img_loss_weight |
|
loss_slide_float = loss.item() |
|
|
|
loss_float = loss.item() |
|
reference_image_losses.append(loss_float) |
|
|
|
|
|
loss.backward() |
|
flush() |
|
|
|
|
|
|
|
cfg_loss_list = [] |
|
|
|
with self.network: |
|
assert self.network.is_active |
|
for prompt_pair_chunk, \ |
|
noisy_cfg_latent_chunk, \ |
|
positive_latents_chunk, \ |
|
neutral_latents_chunk, \ |
|
unconditional_latents_chunk \ |
|
in zip( |
|
prompt_pair_chunks, |
|
noisy_cfg_latents_chunks, |
|
positive_latents_chunks, |
|
neutral_latents_chunks, |
|
unconditional_latents_chunks, |
|
): |
|
self.network.multiplier = prompt_pair_chunk.multiplier_list |
|
|
|
target_latents = self.sd.predict_noise( |
|
latents=noisy_cfg_latent_chunk, |
|
text_embeddings=train_tools.concat_prompt_embeddings( |
|
prompt_pair_chunk.positive_target, |
|
prompt_pair_chunk.target_class, |
|
self.train_config.batch_size, |
|
), |
|
timestep=current_timestep, |
|
guidance_scale=1.0 |
|
) |
|
|
|
guidance_scale = 1.0 |
|
|
|
offset = guidance_scale * (positive_latents_chunk - unconditional_latents_chunk) |
|
|
|
|
|
offset_multiplier_list = [] |
|
for action in prompt_pair_chunk.action_list: |
|
if action == ACTION_TYPES_SLIDER.ERASE_NEGATIVE: |
|
offset_multiplier_list += [-1.0] |
|
elif action == ACTION_TYPES_SLIDER.ENHANCE_NEGATIVE: |
|
offset_multiplier_list += [1.0] |
|
|
|
offset_multiplier = torch.tensor(offset_multiplier_list).to(offset.device, dtype=offset.dtype) |
|
|
|
offset_multiplier = offset_multiplier.view(offset.shape[0], 1, 1, 1) |
|
offset *= offset_multiplier |
|
|
|
offset_neutral = neutral_latents_chunk |
|
|
|
offset_neutral += offset |
|
|
|
|
|
loss = torch.nn.functional.mse_loss(target_latents.float(), offset_neutral.float(), reduction="none") |
|
loss = loss.mean([1, 2, 3]) |
|
|
|
if self.train_config.min_snr_gamma is not None and self.train_config.min_snr_gamma > 0.000001: |
|
|
|
timesteps_index_list = [current_timestep_index for _ in range(target_latents.shape[0])] |
|
|
|
loss = apply_snr_weight(loss, timesteps_index_list, noise_scheduler, |
|
self.train_config.min_snr_gamma) |
|
|
|
loss = loss.mean() * prompt_pair_chunk.weight * self.slider_config.cfg_loss_weight |
|
|
|
loss.backward() |
|
cfg_loss_list.append(loss.item()) |
|
del target_latents |
|
del offset_neutral |
|
del loss |
|
flush() |
|
|
|
|
|
optimizer.step() |
|
lr_scheduler.step() |
|
|
|
|
|
self.network.multiplier = 1.0 |
|
|
|
reference_image_loss = sum(reference_image_losses) / len(reference_image_losses) if len( |
|
reference_image_losses) > 0 else 0.0 |
|
cfg_loss = sum(cfg_loss_list) / len(cfg_loss_list) if len(cfg_loss_list) > 0 else 0.0 |
|
|
|
loss_dict = OrderedDict({ |
|
'loss/img': reference_image_loss, |
|
'loss/cfg': cfg_loss, |
|
}) |
|
|
|
return loss_dict |
|
|
|
|