# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import PIL import torch from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.models import AutoencoderKL, T2IAdapter, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_output import ( StableDiffusionPipelineOutput, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( StableDiffusionPipeline, rescale_noise_cfg, retrieve_timesteps, ) from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import deprecate, is_torch_xla_available, logging from diffusers.utils.torch_utils import randn_tensor from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ..loaders import CustomAdapterMixin from ..models.attention_processor import ( DecoupledMVRowSelfAttnProcessor2_0, set_unet_2d_condition_attn_processor, ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample", ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class MVAdapterI2MVSDPipeline(StableDiffusionPipeline, CustomAdapterMixin): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = False, ): super().__init__( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, requires_safety_checker=requires_safety_checker, ) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False, ) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.prepare_latents def prepare_image_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents( self.vae.encode(image[i : i + 1]), generator=generator[i] ) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents( self.vae.encode(image), generator=generator ) init_latents = self.vae.config.scaling_factor * init_latents if ( batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0 ): # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat( [init_latents] * additional_image_per_prompt, dim=0 ) elif ( batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0 ): raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, num_empty_images=0, # for concat in batch like ImageDream ): assert hasattr( self, "control_image_processor" ), "control_image_processor is not initialized" image = self.control_image_processor.preprocess( image, height=height, width=width ).to(dtype=torch.float32) if num_empty_images > 0: image = torch.cat( [image, torch.zeros_like(image[:num_empty_images])], dim=0 ) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt # always 1 for control image image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, sigmas: List[float] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[ Union[ Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks, ] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], # NEW mv_scale: float = 1.0, # Camera or geometry condition control_image: Optional[PipelineImageInput] = None, control_conditioning_scale: Optional[float] = 1.0, control_conditioning_factor: float = 1.0, # Image condition reference_image: Optional[PipelineImageInput] = None, reference_conditioning_scale: Optional[float] = 1.0, # Optional. controlnet controlnet_image: Optional[PipelineImageInput] = None, controlnet_conditioning_scale: Optional[float] = 1.0, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # to deal with lora scaling and other possible forward hooks # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) else None ) # 6.2 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat( batch_size * num_images_per_prompt ) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # Preprocess reference image reference_image = self.image_processor.preprocess(reference_image) reference_latents = self.prepare_image_latents( reference_image, timesteps[:1].repeat(batch_size * num_images_per_prompt), # no use batch_size, 1, prompt_embeds.dtype, device, generator, add_noise=False, ) ref_timesteps = torch.zeros_like(timesteps[0]) ref_hidden_states = {} with torch.no_grad(): self.unet( reference_latents, ref_timesteps, encoder_hidden_states=prompt_embeds[-1:], cross_attention_kwargs={ "cache_hidden_states": ref_hidden_states, "use_mv": False, "use_ref": False, }, return_dict=False, ) ref_hidden_states = { k: v.repeat_interleave(num_images_per_prompt, dim=0) for k, v in ref_hidden_states.items() } if self.do_classifier_free_guidance: ref_hidden_states = { k: torch.cat([torch.zeros_like(v), v], dim=0) for k, v in ref_hidden_states.items() } cross_attention_kwargs = { "num_views": num_images_per_prompt, "mv_scale": mv_scale, "ref_hidden_states": {k: v.clone() for k, v in ref_hidden_states.items()}, "ref_scale": reference_conditioning_scale, **(self.cross_attention_kwargs or {}), } # Preprocess control image control_image_feature = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=1, # NOTE: always 1 for control images device=device, dtype=latents.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, ) control_image_feature = control_image_feature.to( device=device, dtype=latents.dtype ) adapter_state = self.cond_encoder(control_image_feature) for i, state in enumerate(adapter_state): adapter_state[i] = state * control_conditioning_scale # Preprocess controlnet image if provided do_controlnet = controlnet_image is not None and hasattr(self, "controlnet") if do_controlnet: controlnet_image = self.prepare_control_image( image=controlnet_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=1, # NOTE: always 1 for control images device=device, dtype=latents.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, ) controlnet_image = controlnet_image.to(device=device, dtype=latents.dtype) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents ) latent_model_input = self.scheduler.scale_model_input( latent_model_input, t ) if i < int(num_inference_steps * control_conditioning_factor): down_intrablock_additional_residuals = [ state.clone() for state in adapter_state ] else: down_intrablock_additional_residuals = None unet_add_kwargs = {} # Do controlnet if provided if do_controlnet: down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=prompt_embeds, controlnet_cond=controlnet_image, conditioning_scale=controlnet_conditioning_scale, guess_mode=False, added_cond_kwargs=added_cond_kwargs, return_dict=False, ) unet_add_kwargs.update( { "down_block_additional_residuals": down_block_res_samples, "mid_block_additional_residual": mid_block_res_sample, } ) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=cross_attention_kwargs, down_intrablock_additional_residuals=down_intrablock_additional_residuals, added_cond_kwargs=added_cond_kwargs, return_dict=False, **unet_add_kwargs, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * ( noise_pred_text - noise_pred_uncond ) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg( noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, **extra_step_kwargs, return_dict=False )[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop( "negative_prompt_embeds", negative_prompt_embeds ) # call the callback, if provided if i == len(timesteps) - 1 or ( (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 ): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode( latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, )[0] image, has_nsfw_concept = self.run_safety_checker( image, device, prompt_embeds.dtype ) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=do_denormalize ) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput( images=image, nsfw_content_detected=has_nsfw_concept ) ### NEW: adapters ### def _init_custom_adapter( self, # Multi-view adapter num_views: int = 1, self_attn_processor: Any = DecoupledMVRowSelfAttnProcessor2_0, # Condition encoder cond_in_channels: int = 6, # For training copy_attn_weights: bool = True, zero_init_module_keys: List[str] = [], ): # Condition encoder self.cond_encoder = T2IAdapter( in_channels=cond_in_channels, channels=self.unet.config.block_out_channels, num_res_blocks=self.unet.config.layers_per_block, downscale_factor=8, ) # set custom attn processor for multi-view attention self.unet: UNet2DConditionModel set_unet_2d_condition_attn_processor( self.unet, set_self_attn_proc_func=lambda name, hs, cad, ap: self_attn_processor( query_dim=hs, inner_dim=hs, num_views=num_views, name=name, use_mv=True, use_ref=True, ), set_cross_attn_proc_func=lambda name, hs, cad, ap: self_attn_processor( query_dim=hs, inner_dim=hs, num_views=num_views, name=name, use_mv=False, use_ref=False, ), ) # copy decoupled attention weights from original unet if copy_attn_weights: state_dict = self.unet.state_dict() for key in state_dict.keys(): if "_mv" in key: compatible_key = key.replace("_mv", "").replace("processor.", "") elif "_ref" in key: compatible_key = key.replace("_ref", "").replace("processor.", "") else: compatible_key = key is_zero_init_key = any([k in key for k in zero_init_module_keys]) if is_zero_init_key: state_dict[key] = torch.zeros_like(state_dict[compatible_key]) else: state_dict[key] = state_dict[compatible_key].clone() self.unet.load_state_dict(state_dict) def _load_custom_adapter(self, state_dict): self.unet.load_state_dict(state_dict, strict=False) self.cond_encoder.load_state_dict(state_dict, strict=False) def _save_custom_adapter( self, include_keys: Optional[List[str]] = None, exclude_keys: Optional[List[str]] = None, ): def include_fn(k): is_included = False if include_keys is not None: is_included = is_included or any([key in k for key in include_keys]) if exclude_keys is not None: is_included = is_included and not any( [key in k for key in exclude_keys] ) return is_included state_dict = {k: v for k, v in self.unet.state_dict().items() if include_fn(k)} state_dict.update(self.cond_encoder.state_dict()) return state_dict