Spaces:
Runtime error
Runtime error
| # Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim | |
| import math | |
| from dataclasses import dataclass | |
| from typing import List, Optional, Tuple, Union | |
| import numpy as np | |
| import torch | |
| from ..configuration_utils import ConfigMixin, register_to_config | |
| from ..utils import BaseOutput, randn_tensor | |
| from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput | |
| class DDPMParallelSchedulerOutput(BaseOutput): | |
| """ | |
| Output class for the scheduler's step function output. | |
| Args: | |
| prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): | |
| Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the | |
| denoising loop. | |
| pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): | |
| The predicted denoised sample (x_{0}) based on the model output from the current timestep. | |
| `pred_original_sample` can be used to preview progress or for guidance. | |
| """ | |
| prev_sample: torch.FloatTensor | |
| pred_original_sample: Optional[torch.FloatTensor] = None | |
| # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar | |
| def betas_for_alpha_bar( | |
| num_diffusion_timesteps, | |
| max_beta=0.999, | |
| alpha_transform_type="cosine", | |
| ): | |
| """ | |
| Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of | |
| (1-beta) over time from t = [0,1]. | |
| Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up | |
| to that part of the diffusion process. | |
| Args: | |
| num_diffusion_timesteps (`int`): the number of betas to produce. | |
| max_beta (`float`): the maximum beta to use; use values lower than 1 to | |
| prevent singularities. | |
| alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. | |
| Choose from `cosine` or `exp` | |
| Returns: | |
| betas (`np.ndarray`): the betas used by the scheduler to step the model outputs | |
| """ | |
| if alpha_transform_type == "cosine": | |
| def alpha_bar_fn(t): | |
| return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 | |
| elif alpha_transform_type == "exp": | |
| def alpha_bar_fn(t): | |
| return math.exp(t * -12.0) | |
| else: | |
| raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") | |
| betas = [] | |
| for i in range(num_diffusion_timesteps): | |
| t1 = i / num_diffusion_timesteps | |
| t2 = (i + 1) / num_diffusion_timesteps | |
| betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) | |
| return torch.tensor(betas, dtype=torch.float32) | |
| class DDPMParallelScheduler(SchedulerMixin, ConfigMixin): | |
| """ | |
| Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and | |
| Langevin dynamics sampling. | |
| [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` | |
| function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. | |
| [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and | |
| [`~SchedulerMixin.from_pretrained`] functions. | |
| For more details, see the original paper: https://arxiv.org/abs/2006.11239 | |
| Args: | |
| num_train_timesteps (`int`): number of diffusion steps used to train the model. | |
| beta_start (`float`): the starting `beta` value of inference. | |
| beta_end (`float`): the final `beta` value. | |
| beta_schedule (`str`): | |
| the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from | |
| `linear`, `scaled_linear`, `squaredcos_cap_v2` or `sigmoid`. | |
| trained_betas (`np.ndarray`, optional): | |
| option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. | |
| variance_type (`str`): | |
| options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, | |
| `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. | |
| clip_sample (`bool`, default `True`): | |
| option to clip predicted sample for numerical stability. | |
| clip_sample_range (`float`, default `1.0`): | |
| the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. | |
| prediction_type (`str`, default `epsilon`, optional): | |
| prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion | |
| process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 | |
| https://imagen.research.google/video/paper.pdf) | |
| thresholding (`bool`, default `False`): | |
| whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). | |
| Note that the thresholding method is unsuitable for latent-space diffusion models (such as | |
| stable-diffusion). | |
| dynamic_thresholding_ratio (`float`, default `0.995`): | |
| the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen | |
| (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. | |
| sample_max_value (`float`, default `1.0`): | |
| the threshold value for dynamic thresholding. Valid only when `thresholding=True`. | |
| timestep_spacing (`str`, default `"leading"`): | |
| The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample | |
| Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. | |
| steps_offset (`int`, default `0`): | |
| an offset added to the inference steps. You can use a combination of `offset=1` and | |
| `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in | |
| stable diffusion. | |
| """ | |
| _compatibles = [e.name for e in KarrasDiffusionSchedulers] | |
| order = 1 | |
| _is_ode_scheduler = False | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.__init__ | |
| def __init__( | |
| self, | |
| num_train_timesteps: int = 1000, | |
| beta_start: float = 0.0001, | |
| beta_end: float = 0.02, | |
| beta_schedule: str = "linear", | |
| trained_betas: Optional[Union[np.ndarray, List[float]]] = None, | |
| variance_type: str = "fixed_small", | |
| clip_sample: bool = True, | |
| prediction_type: str = "epsilon", | |
| thresholding: bool = False, | |
| dynamic_thresholding_ratio: float = 0.995, | |
| clip_sample_range: float = 1.0, | |
| sample_max_value: float = 1.0, | |
| timestep_spacing: str = "leading", | |
| steps_offset: int = 0, | |
| ): | |
| if trained_betas is not None: | |
| self.betas = torch.tensor(trained_betas, dtype=torch.float32) | |
| elif beta_schedule == "linear": | |
| self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) | |
| elif beta_schedule == "scaled_linear": | |
| # this schedule is very specific to the latent diffusion model. | |
| self.betas = ( | |
| torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 | |
| ) | |
| elif beta_schedule == "squaredcos_cap_v2": | |
| # Glide cosine schedule | |
| self.betas = betas_for_alpha_bar(num_train_timesteps) | |
| elif beta_schedule == "sigmoid": | |
| # GeoDiff sigmoid schedule | |
| betas = torch.linspace(-6, 6, num_train_timesteps) | |
| self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start | |
| else: | |
| raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") | |
| self.alphas = 1.0 - self.betas | |
| self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) | |
| self.one = torch.tensor(1.0) | |
| # standard deviation of the initial noise distribution | |
| self.init_noise_sigma = 1.0 | |
| # setable values | |
| self.custom_timesteps = False | |
| self.num_inference_steps = None | |
| self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) | |
| self.variance_type = variance_type | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.scale_model_input | |
| def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: | |
| """ | |
| Ensures interchangeability with schedulers that need to scale the denoising model input depending on the | |
| current timestep. | |
| Args: | |
| sample (`torch.FloatTensor`): input sample | |
| timestep (`int`, optional): current timestep | |
| Returns: | |
| `torch.FloatTensor`: scaled input sample | |
| """ | |
| return sample | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.set_timesteps | |
| def set_timesteps( | |
| self, | |
| num_inference_steps: Optional[int] = None, | |
| device: Union[str, torch.device] = None, | |
| timesteps: Optional[List[int]] = None, | |
| ): | |
| """ | |
| Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. | |
| Args: | |
| num_inference_steps (`Optional[int]`): | |
| the number of diffusion steps used when generating samples with a pre-trained model. If passed, then | |
| `timesteps` must be `None`. | |
| device (`str` or `torch.device`, optional): | |
| the device to which the timesteps are moved to. | |
| custom_timesteps (`List[int]`, optional): | |
| custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default | |
| timestep spacing strategy of equal spacing between timesteps is used. If passed, `num_inference_steps` | |
| must be `None`. | |
| """ | |
| if num_inference_steps is not None and timesteps is not None: | |
| raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") | |
| if timesteps is not None: | |
| for i in range(1, len(timesteps)): | |
| if timesteps[i] >= timesteps[i - 1]: | |
| raise ValueError("`custom_timesteps` must be in descending order.") | |
| if timesteps[0] >= self.config.num_train_timesteps: | |
| raise ValueError( | |
| f"`timesteps` must start before `self.config.train_timesteps`:" | |
| f" {self.config.num_train_timesteps}." | |
| ) | |
| timesteps = np.array(timesteps, dtype=np.int64) | |
| self.custom_timesteps = True | |
| else: | |
| if num_inference_steps > self.config.num_train_timesteps: | |
| raise ValueError( | |
| f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" | |
| f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" | |
| f" maximal {self.config.num_train_timesteps} timesteps." | |
| ) | |
| self.num_inference_steps = num_inference_steps | |
| self.custom_timesteps = False | |
| # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 | |
| if self.config.timestep_spacing == "linspace": | |
| timesteps = ( | |
| np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) | |
| .round()[::-1] | |
| .copy() | |
| .astype(np.int64) | |
| ) | |
| elif self.config.timestep_spacing == "leading": | |
| step_ratio = self.config.num_train_timesteps // self.num_inference_steps | |
| # creates integer timesteps by multiplying by ratio | |
| # casting to int to avoid issues when num_inference_step is power of 3 | |
| timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) | |
| timesteps += self.config.steps_offset | |
| elif self.config.timestep_spacing == "trailing": | |
| step_ratio = self.config.num_train_timesteps / self.num_inference_steps | |
| # creates integer timesteps by multiplying by ratio | |
| # casting to int to avoid issues when num_inference_step is power of 3 | |
| timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) | |
| timesteps -= 1 | |
| else: | |
| raise ValueError( | |
| f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." | |
| ) | |
| self.timesteps = torch.from_numpy(timesteps).to(device) | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._get_variance | |
| def _get_variance(self, t, predicted_variance=None, variance_type=None): | |
| prev_t = self.previous_timestep(t) | |
| alpha_prod_t = self.alphas_cumprod[t] | |
| alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one | |
| current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev | |
| # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) | |
| # and sample from it to get previous sample | |
| # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample | |
| variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t | |
| # we always take the log of variance, so clamp it to ensure it's not 0 | |
| variance = torch.clamp(variance, min=1e-20) | |
| if variance_type is None: | |
| variance_type = self.config.variance_type | |
| # hacks - were probably added for training stability | |
| if variance_type == "fixed_small": | |
| variance = variance | |
| # for rl-diffuser https://arxiv.org/abs/2205.09991 | |
| elif variance_type == "fixed_small_log": | |
| variance = torch.log(variance) | |
| variance = torch.exp(0.5 * variance) | |
| elif variance_type == "fixed_large": | |
| variance = current_beta_t | |
| elif variance_type == "fixed_large_log": | |
| # Glide max_log | |
| variance = torch.log(current_beta_t) | |
| elif variance_type == "learned": | |
| return predicted_variance | |
| elif variance_type == "learned_range": | |
| min_log = torch.log(variance) | |
| max_log = torch.log(current_beta_t) | |
| frac = (predicted_variance + 1) / 2 | |
| variance = frac * max_log + (1 - frac) * min_log | |
| return variance | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample | |
| def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: | |
| """ | |
| "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the | |
| prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by | |
| s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing | |
| pixels from saturation at each step. We find that dynamic thresholding results in significantly better | |
| photorealism as well as better image-text alignment, especially when using very large guidance weights." | |
| https://arxiv.org/abs/2205.11487 | |
| """ | |
| dtype = sample.dtype | |
| batch_size, channels, height, width = sample.shape | |
| if dtype not in (torch.float32, torch.float64): | |
| sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half | |
| # Flatten sample for doing quantile calculation along each image | |
| sample = sample.reshape(batch_size, channels * height * width) | |
| abs_sample = sample.abs() # "a certain percentile absolute pixel value" | |
| s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) | |
| s = torch.clamp( | |
| s, min=1, max=self.config.sample_max_value | |
| ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] | |
| s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 | |
| sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" | |
| sample = sample.reshape(batch_size, channels, height, width) | |
| sample = sample.to(dtype) | |
| return sample | |
| def step( | |
| self, | |
| model_output: torch.FloatTensor, | |
| timestep: int, | |
| sample: torch.FloatTensor, | |
| generator=None, | |
| return_dict: bool = True, | |
| ) -> Union[DDPMParallelSchedulerOutput, Tuple]: | |
| """ | |
| Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion | |
| process from the learned model outputs (most often the predicted noise). | |
| Args: | |
| model_output (`torch.FloatTensor`): direct output from learned diffusion model. | |
| timestep (`int`): current discrete timestep in the diffusion chain. | |
| sample (`torch.FloatTensor`): | |
| current instance of sample being created by diffusion process. | |
| generator: random number generator. | |
| return_dict (`bool`): option for returning tuple rather than DDPMParallelSchedulerOutput class | |
| Returns: | |
| [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] or `tuple`: | |
| [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. | |
| When returning a tuple, the first element is the sample tensor. | |
| """ | |
| t = timestep | |
| prev_t = self.previous_timestep(t) | |
| if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: | |
| model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) | |
| else: | |
| predicted_variance = None | |
| # 1. compute alphas, betas | |
| alpha_prod_t = self.alphas_cumprod[t] | |
| alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one | |
| beta_prod_t = 1 - alpha_prod_t | |
| beta_prod_t_prev = 1 - alpha_prod_t_prev | |
| current_alpha_t = alpha_prod_t / alpha_prod_t_prev | |
| current_beta_t = 1 - current_alpha_t | |
| # 2. compute predicted original sample from predicted noise also called | |
| # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf | |
| if self.config.prediction_type == "epsilon": | |
| pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) | |
| elif self.config.prediction_type == "sample": | |
| pred_original_sample = model_output | |
| elif self.config.prediction_type == "v_prediction": | |
| pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output | |
| else: | |
| raise ValueError( | |
| f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" | |
| " `v_prediction` for the DDPMScheduler." | |
| ) | |
| # 3. Clip or threshold "predicted x_0" | |
| if self.config.thresholding: | |
| pred_original_sample = self._threshold_sample(pred_original_sample) | |
| elif self.config.clip_sample: | |
| pred_original_sample = pred_original_sample.clamp( | |
| -self.config.clip_sample_range, self.config.clip_sample_range | |
| ) | |
| # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t | |
| # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf | |
| pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t | |
| current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t | |
| # 5. Compute predicted previous sample µ_t | |
| # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf | |
| pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample | |
| # 6. Add noise | |
| variance = 0 | |
| if t > 0: | |
| device = model_output.device | |
| variance_noise = randn_tensor( | |
| model_output.shape, generator=generator, device=device, dtype=model_output.dtype | |
| ) | |
| if self.variance_type == "fixed_small_log": | |
| variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise | |
| elif self.variance_type == "learned_range": | |
| variance = self._get_variance(t, predicted_variance=predicted_variance) | |
| variance = torch.exp(0.5 * variance) * variance_noise | |
| else: | |
| variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise | |
| pred_prev_sample = pred_prev_sample + variance | |
| if not return_dict: | |
| return (pred_prev_sample,) | |
| return DDPMParallelSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) | |
| def batch_step_no_noise( | |
| self, | |
| model_output: torch.FloatTensor, | |
| timesteps: List[int], | |
| sample: torch.FloatTensor, | |
| ) -> torch.FloatTensor: | |
| """ | |
| Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. | |
| Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise | |
| is pre-sampled by the pipeline. | |
| Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion | |
| process from the learned model outputs (most often the predicted noise). | |
| Args: | |
| model_output (`torch.FloatTensor`): direct output from learned diffusion model. | |
| timesteps (`List[int]`): | |
| current discrete timesteps in the diffusion chain. This is now a list of integers. | |
| sample (`torch.FloatTensor`): | |
| current instance of sample being created by diffusion process. | |
| Returns: | |
| `torch.FloatTensor`: sample tensor at previous timestep. | |
| """ | |
| t = timesteps | |
| num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps | |
| prev_t = t - self.config.num_train_timesteps // num_inference_steps | |
| t = t.view(-1, *([1] * (model_output.ndim - 1))) | |
| prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) | |
| if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: | |
| model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) | |
| else: | |
| pass | |
| # 1. compute alphas, betas | |
| self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) | |
| alpha_prod_t = self.alphas_cumprod[t] | |
| alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] | |
| alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) | |
| beta_prod_t = 1 - alpha_prod_t | |
| beta_prod_t_prev = 1 - alpha_prod_t_prev | |
| current_alpha_t = alpha_prod_t / alpha_prod_t_prev | |
| current_beta_t = 1 - current_alpha_t | |
| # 2. compute predicted original sample from predicted noise also called | |
| # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf | |
| if self.config.prediction_type == "epsilon": | |
| pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) | |
| elif self.config.prediction_type == "sample": | |
| pred_original_sample = model_output | |
| elif self.config.prediction_type == "v_prediction": | |
| pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output | |
| else: | |
| raise ValueError( | |
| f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" | |
| " `v_prediction` for the DDPMParallelScheduler." | |
| ) | |
| # 3. Clip or threshold "predicted x_0" | |
| if self.config.thresholding: | |
| pred_original_sample = self._threshold_sample(pred_original_sample) | |
| elif self.config.clip_sample: | |
| pred_original_sample = pred_original_sample.clamp( | |
| -self.config.clip_sample_range, self.config.clip_sample_range | |
| ) | |
| # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t | |
| # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf | |
| pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t | |
| current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t | |
| # 5. Compute predicted previous sample µ_t | |
| # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf | |
| pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample | |
| return pred_prev_sample | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise | |
| def add_noise( | |
| self, | |
| original_samples: torch.FloatTensor, | |
| noise: torch.FloatTensor, | |
| timesteps: torch.IntTensor, | |
| ) -> torch.FloatTensor: | |
| # Make sure alphas_cumprod and timestep have same device and dtype as original_samples | |
| alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) | |
| timesteps = timesteps.to(original_samples.device) | |
| sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 | |
| sqrt_alpha_prod = sqrt_alpha_prod.flatten() | |
| while len(sqrt_alpha_prod.shape) < len(original_samples.shape): | |
| sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) | |
| sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 | |
| sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() | |
| while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): | |
| sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) | |
| noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise | |
| return noisy_samples | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity | |
| def get_velocity( | |
| self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor | |
| ) -> torch.FloatTensor: | |
| # Make sure alphas_cumprod and timestep have same device and dtype as sample | |
| alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype) | |
| timesteps = timesteps.to(sample.device) | |
| sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 | |
| sqrt_alpha_prod = sqrt_alpha_prod.flatten() | |
| while len(sqrt_alpha_prod.shape) < len(sample.shape): | |
| sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) | |
| sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 | |
| sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() | |
| while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): | |
| sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) | |
| velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample | |
| return velocity | |
| def __len__(self): | |
| return self.config.num_train_timesteps | |
| # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep | |
| def previous_timestep(self, timestep): | |
| if self.custom_timesteps: | |
| index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] | |
| if index == self.timesteps.shape[0] - 1: | |
| prev_t = torch.tensor(-1) | |
| else: | |
| prev_t = self.timesteps[index + 1] | |
| else: | |
| num_inference_steps = ( | |
| self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps | |
| ) | |
| prev_t = timestep - self.config.num_train_timesteps // num_inference_steps | |
| return prev_t | |