|
import torch |
|
import gc |
|
from collections import OrderedDict |
|
from typing import TYPE_CHECKING |
|
from jobs.process import BaseExtensionProcess |
|
from toolkit.config_modules import ModelConfig |
|
from toolkit.stable_diffusion_model import StableDiffusion |
|
from toolkit.train_tools import get_torch_dtype |
|
from tqdm import tqdm |
|
|
|
|
|
if TYPE_CHECKING: |
|
from jobs import ExtensionJob |
|
|
|
|
|
|
|
class ModelInputConfig(ModelConfig): |
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
self.weight = kwargs.get('weight', 1.0) |
|
|
|
|
|
self.dtype: str = kwargs.get('dtype', 'float32') |
|
|
|
|
|
def flush(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
|
|
|
|
class ExampleMergeModels(BaseExtensionProcess): |
|
def __init__( |
|
self, |
|
process_id: int, |
|
job: 'ExtensionJob', |
|
config: OrderedDict |
|
): |
|
super().__init__(process_id, job, config) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.save_path = self.get_conf('save_path', required=True) |
|
self.save_dtype = self.get_conf('save_dtype', default='float16', as_type=get_torch_dtype) |
|
self.device = self.get_conf('device', default='cpu', as_type=torch.device) |
|
|
|
|
|
models_to_merge = self.get_conf('models_to_merge', required=True, as_type=list) |
|
|
|
|
|
|
|
self.models_to_merge = [ModelInputConfig(**model) for model in models_to_merge] |
|
|
|
|
|
|
|
def run(self): |
|
|
|
super().run() |
|
print(f"Running process: {self.__class__.__name__}") |
|
|
|
|
|
total_weight = sum([model.weight for model in self.models_to_merge]) |
|
weight_adjust = 1.0 / total_weight |
|
for model in self.models_to_merge: |
|
model.weight *= weight_adjust |
|
|
|
output_model: StableDiffusion = None |
|
|
|
for model_config in tqdm(self.models_to_merge, desc="Merging models"): |
|
|
|
sd_model = StableDiffusion( |
|
device=self.device, |
|
model_config=model_config, |
|
dtype="float32" |
|
) |
|
|
|
sd_model.load_model() |
|
|
|
|
|
if isinstance(sd_model.text_encoder, list): |
|
|
|
for text_encoder in sd_model.text_encoder: |
|
for key, value in text_encoder.state_dict().items(): |
|
value *= model_config.weight |
|
else: |
|
|
|
for key, value in sd_model.text_encoder.state_dict().items(): |
|
value *= model_config.weight |
|
|
|
for key, value in sd_model.unet.state_dict().items(): |
|
value *= model_config.weight |
|
|
|
if output_model is None: |
|
|
|
output_model = sd_model |
|
else: |
|
|
|
|
|
if isinstance(output_model.text_encoder, list): |
|
|
|
for i, text_encoder in enumerate(output_model.text_encoder): |
|
for key, value in text_encoder.state_dict().items(): |
|
value += sd_model.text_encoder[i].state_dict()[key] |
|
else: |
|
|
|
for key, value in output_model.text_encoder.state_dict().items(): |
|
value += sd_model.text_encoder.state_dict()[key] |
|
|
|
for key, value in output_model.unet.state_dict().items(): |
|
value += sd_model.unet.state_dict()[key] |
|
|
|
|
|
del sd_model |
|
flush() |
|
|
|
|
|
print(f"Saving merged model to {self.save_path}") |
|
output_model.save(self.save_path, meta=self.meta, save_dtype=self.save_dtype) |
|
print(f"Saved merged model to {self.save_path}") |
|
|
|
del output_model |
|
flush() |
|
|