Update pipeline.py
Browse files- pipeline.py +0 -602
pipeline.py
CHANGED
@@ -1,605 +1,3 @@
|
|
1 |
-
from typing import List, Optional, Tuple, Union
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from dataclasses import dataclass
|
5 |
-
from typing import Optional, Tuple, Union
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
|
10 |
-
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
11 |
-
from diffusers.utils import BaseOutput
|
12 |
-
from diffusers.models.embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
|
13 |
-
from diffusers.models.modeling_utils import ModelMixin
|
14 |
-
from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
|
15 |
-
|
16 |
-
|
17 |
-
@dataclass
|
18 |
-
class UNet2DOutput(BaseOutput):
|
19 |
-
"""
|
20 |
-
The output of [`UNet2DModel`].
|
21 |
-
Args:
|
22 |
-
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
23 |
-
The hidden states output from the last layer of the model.
|
24 |
-
"""
|
25 |
-
|
26 |
-
sample: torch.FloatTensor
|
27 |
-
|
28 |
-
|
29 |
-
class UNet2DModel(ModelMixin, ConfigMixin):
|
30 |
-
r"""
|
31 |
-
A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output.
|
32 |
-
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
33 |
-
for all models (such as downloading or saving).
|
34 |
-
Parameters:
|
35 |
-
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
|
36 |
-
Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) -
|
37 |
-
1)`.
|
38 |
-
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample.
|
39 |
-
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
|
40 |
-
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
|
41 |
-
time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
|
42 |
-
freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding.
|
43 |
-
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
|
44 |
-
Whether to flip sin to cos for Fourier time embedding.
|
45 |
-
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`):
|
46 |
-
Tuple of downsample block types.
|
47 |
-
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`):
|
48 |
-
Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.
|
49 |
-
up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`):
|
50 |
-
Tuple of upsample block types.
|
51 |
-
block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`):
|
52 |
-
Tuple of block output channels.
|
53 |
-
layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
|
54 |
-
mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
|
55 |
-
downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution.
|
56 |
-
downsample_type (`str`, *optional*, defaults to `conv`):
|
57 |
-
The downsample type for downsampling layers. Choose between "conv" and "resnet"
|
58 |
-
upsample_type (`str`, *optional*, defaults to `conv`):
|
59 |
-
The upsample type for upsampling layers. Choose between "conv" and "resnet"
|
60 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
61 |
-
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
62 |
-
attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
|
63 |
-
norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization.
|
64 |
-
attn_norm_num_groups (`int`, *optional*, defaults to `None`):
|
65 |
-
If set to an integer, a group norm layer will be created in the mid block's [`Attention`] layer with the
|
66 |
-
given number of groups. If left as `None`, the group norm layer will only be created if
|
67 |
-
`resnet_time_scale_shift` is set to `default`, and if created will have `norm_num_groups` groups.
|
68 |
-
norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization.
|
69 |
-
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
|
70 |
-
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
|
71 |
-
class_embed_type (`str`, *optional*, defaults to `None`):
|
72 |
-
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
|
73 |
-
`"timestep"`, or `"identity"`.
|
74 |
-
num_class_embeds (`int`, *optional*, defaults to `None`):
|
75 |
-
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class
|
76 |
-
conditioning with `class_embed_type` equal to `None`.
|
77 |
-
"""
|
78 |
-
|
79 |
-
@register_to_config
|
80 |
-
def __init__(
|
81 |
-
self,
|
82 |
-
sample_size: Optional[Union[int, Tuple[int, int]]] = None,
|
83 |
-
in_channels: int = 3,
|
84 |
-
out_channels: int = 3,
|
85 |
-
center_input_sample: bool = False,
|
86 |
-
time_embedding_type: str = "positional",
|
87 |
-
freq_shift: int = 0,
|
88 |
-
flip_sin_to_cos: bool = True,
|
89 |
-
down_block_types: Tuple[str, ...] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
|
90 |
-
up_block_types: Tuple[str, ...] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
|
91 |
-
block_out_channels: Tuple[int, ...] = (224, 448, 672, 896),
|
92 |
-
layers_per_block: int = 2,
|
93 |
-
mid_block_scale_factor: float = 1,
|
94 |
-
downsample_padding: int = 1,
|
95 |
-
downsample_type: str = "conv",
|
96 |
-
upsample_type: str = "conv",
|
97 |
-
dropout: float = 0.0,
|
98 |
-
act_fn: str = "silu",
|
99 |
-
attention_head_dim: Optional[int] = 8,
|
100 |
-
norm_num_groups: int = 32,
|
101 |
-
attn_norm_num_groups: Optional[int] = None,
|
102 |
-
norm_eps: float = 1e-5,
|
103 |
-
resnet_time_scale_shift: str = "default",
|
104 |
-
add_attention: bool = True,
|
105 |
-
class_embed_type: Optional[str] = None,
|
106 |
-
num_class_embeds: Optional[int] = None,
|
107 |
-
num_train_timesteps: Optional[int] = None,
|
108 |
-
set_W_to_weight: Optional[bool] = True,
|
109 |
-
):
|
110 |
-
super().__init__()
|
111 |
-
|
112 |
-
self.sample_size = sample_size
|
113 |
-
time_embed_dim = block_out_channels[0] * 4
|
114 |
-
|
115 |
-
# Check inputs
|
116 |
-
if len(down_block_types) != len(up_block_types):
|
117 |
-
raise ValueError(
|
118 |
-
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
|
119 |
-
)
|
120 |
-
|
121 |
-
if len(block_out_channels) != len(down_block_types):
|
122 |
-
raise ValueError(
|
123 |
-
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
|
124 |
-
)
|
125 |
-
|
126 |
-
# input
|
127 |
-
self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
|
128 |
-
|
129 |
-
# time
|
130 |
-
if time_embedding_type == "fourier":
|
131 |
-
self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16, set_W_to_weight=set_W_to_weight)
|
132 |
-
timestep_input_dim = 2 * block_out_channels[0]
|
133 |
-
elif time_embedding_type == "positional":
|
134 |
-
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
135 |
-
timestep_input_dim = block_out_channels[0]
|
136 |
-
elif time_embedding_type == "learned":
|
137 |
-
self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0])
|
138 |
-
timestep_input_dim = block_out_channels[0]
|
139 |
-
|
140 |
-
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
141 |
-
|
142 |
-
# class embedding
|
143 |
-
if class_embed_type is None and num_class_embeds is not None:
|
144 |
-
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
|
145 |
-
elif class_embed_type == "timestep":
|
146 |
-
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
147 |
-
elif class_embed_type == "identity":
|
148 |
-
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
|
149 |
-
else:
|
150 |
-
self.class_embedding = None
|
151 |
-
|
152 |
-
self.down_blocks = nn.ModuleList([])
|
153 |
-
self.mid_block = None
|
154 |
-
self.up_blocks = nn.ModuleList([])
|
155 |
-
|
156 |
-
# down
|
157 |
-
output_channel = block_out_channels[0]
|
158 |
-
for i, down_block_type in enumerate(down_block_types):
|
159 |
-
input_channel = output_channel
|
160 |
-
output_channel = block_out_channels[i]
|
161 |
-
is_final_block = i == len(block_out_channels) - 1
|
162 |
-
|
163 |
-
down_block = get_down_block(
|
164 |
-
down_block_type,
|
165 |
-
num_layers=layers_per_block,
|
166 |
-
in_channels=input_channel,
|
167 |
-
out_channels=output_channel,
|
168 |
-
temb_channels=time_embed_dim,
|
169 |
-
add_downsample=not is_final_block,
|
170 |
-
resnet_eps=norm_eps,
|
171 |
-
resnet_act_fn=act_fn,
|
172 |
-
resnet_groups=norm_num_groups,
|
173 |
-
attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
|
174 |
-
downsample_padding=downsample_padding,
|
175 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
176 |
-
downsample_type=downsample_type,
|
177 |
-
dropout=dropout,
|
178 |
-
)
|
179 |
-
self.down_blocks.append(down_block)
|
180 |
-
|
181 |
-
# mid
|
182 |
-
self.mid_block = UNetMidBlock2D(
|
183 |
-
in_channels=block_out_channels[-1],
|
184 |
-
temb_channels=time_embed_dim,
|
185 |
-
dropout=dropout,
|
186 |
-
resnet_eps=norm_eps,
|
187 |
-
resnet_act_fn=act_fn,
|
188 |
-
output_scale_factor=mid_block_scale_factor,
|
189 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
190 |
-
attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1],
|
191 |
-
resnet_groups=norm_num_groups,
|
192 |
-
attn_groups=attn_norm_num_groups,
|
193 |
-
add_attention=add_attention,
|
194 |
-
)
|
195 |
-
|
196 |
-
# up
|
197 |
-
reversed_block_out_channels = list(reversed(block_out_channels))
|
198 |
-
output_channel = reversed_block_out_channels[0]
|
199 |
-
for i, up_block_type in enumerate(up_block_types):
|
200 |
-
prev_output_channel = output_channel
|
201 |
-
output_channel = reversed_block_out_channels[i]
|
202 |
-
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
203 |
-
|
204 |
-
is_final_block = i == len(block_out_channels) - 1
|
205 |
-
|
206 |
-
up_block = get_up_block(
|
207 |
-
up_block_type,
|
208 |
-
num_layers=layers_per_block + 1,
|
209 |
-
in_channels=input_channel,
|
210 |
-
out_channels=output_channel,
|
211 |
-
prev_output_channel=prev_output_channel,
|
212 |
-
temb_channels=time_embed_dim,
|
213 |
-
add_upsample=not is_final_block,
|
214 |
-
resnet_eps=norm_eps,
|
215 |
-
resnet_act_fn=act_fn,
|
216 |
-
resnet_groups=norm_num_groups,
|
217 |
-
attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
|
218 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
219 |
-
upsample_type=upsample_type,
|
220 |
-
dropout=dropout,
|
221 |
-
)
|
222 |
-
self.up_blocks.append(up_block)
|
223 |
-
prev_output_channel = output_channel
|
224 |
-
|
225 |
-
# out
|
226 |
-
num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
|
227 |
-
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps)
|
228 |
-
self.conv_act = nn.SiLU()
|
229 |
-
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
|
230 |
-
|
231 |
-
def forward(
|
232 |
-
self,
|
233 |
-
sample: torch.FloatTensor,
|
234 |
-
timestep: Union[torch.Tensor, float, int],
|
235 |
-
class_labels: Optional[torch.Tensor] = None,
|
236 |
-
return_dict: bool = True,
|
237 |
-
) -> Union[UNet2DOutput, Tuple]:
|
238 |
-
r"""
|
239 |
-
The [`UNet2DModel`] forward method.
|
240 |
-
Args:
|
241 |
-
sample (`torch.FloatTensor`):
|
242 |
-
The noisy input tensor with the following shape `(batch, channel, height, width)`.
|
243 |
-
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
|
244 |
-
class_labels (`torch.FloatTensor`, *optional*, defaults to `None`):
|
245 |
-
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
246 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
247 |
-
Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
|
248 |
-
Returns:
|
249 |
-
[`~models.unet_2d.UNet2DOutput`] or `tuple`:
|
250 |
-
If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
|
251 |
-
returned where the first element is the sample tensor.
|
252 |
-
"""
|
253 |
-
# 0. center input if necessary
|
254 |
-
if self.config.center_input_sample:
|
255 |
-
sample = 2 * sample - 1.0
|
256 |
-
|
257 |
-
# 1. time
|
258 |
-
timesteps = timestep
|
259 |
-
if not torch.is_tensor(timesteps):
|
260 |
-
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
|
261 |
-
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
|
262 |
-
timesteps = timesteps[None].to(sample.device)
|
263 |
-
|
264 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
265 |
-
timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
|
266 |
-
|
267 |
-
t_emb = self.time_proj(timesteps)
|
268 |
-
|
269 |
-
# timesteps does not contain any weights and will always return f32 tensors
|
270 |
-
# but time_embedding might actually be running in fp16. so we need to cast here.
|
271 |
-
# there might be better ways to encapsulate this.
|
272 |
-
t_emb = t_emb.to(dtype=self.dtype)
|
273 |
-
emb = self.time_embedding(t_emb)
|
274 |
-
|
275 |
-
if self.class_embedding is not None:
|
276 |
-
if class_labels is None:
|
277 |
-
raise ValueError("class_labels should be provided when doing class conditioning")
|
278 |
-
|
279 |
-
if self.config.class_embed_type == "timestep":
|
280 |
-
class_labels = self.time_proj(class_labels)
|
281 |
-
|
282 |
-
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
283 |
-
emb = emb + class_emb
|
284 |
-
elif self.class_embedding is None and class_labels is not None:
|
285 |
-
raise ValueError("class_embedding needs to be initialized in order to use class conditioning")
|
286 |
-
|
287 |
-
# 2. pre-process
|
288 |
-
skip_sample = sample
|
289 |
-
sample = self.conv_in(sample)
|
290 |
-
|
291 |
-
# 3. down
|
292 |
-
down_block_res_samples = (sample,)
|
293 |
-
for downsample_block in self.down_blocks:
|
294 |
-
if hasattr(downsample_block, "skip_conv"):
|
295 |
-
sample, res_samples, skip_sample = downsample_block(
|
296 |
-
hidden_states=sample, temb=emb, skip_sample=skip_sample
|
297 |
-
)
|
298 |
-
else:
|
299 |
-
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
300 |
-
|
301 |
-
down_block_res_samples += res_samples
|
302 |
-
|
303 |
-
# 4. mid
|
304 |
-
sample = self.mid_block(sample, emb)
|
305 |
-
|
306 |
-
# 5. up
|
307 |
-
skip_sample = None
|
308 |
-
for upsample_block in self.up_blocks:
|
309 |
-
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
310 |
-
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
311 |
-
|
312 |
-
if hasattr(upsample_block, "skip_conv"):
|
313 |
-
sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
|
314 |
-
else:
|
315 |
-
sample = upsample_block(sample, res_samples, emb)
|
316 |
-
|
317 |
-
# 6. post-process
|
318 |
-
sample = self.conv_norm_out(sample)
|
319 |
-
sample = self.conv_act(sample)
|
320 |
-
sample = self.conv_out(sample)
|
321 |
-
|
322 |
-
if skip_sample is not None:
|
323 |
-
sample += skip_sample
|
324 |
-
|
325 |
-
if self.config.time_embedding_type == "fourier":
|
326 |
-
timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
|
327 |
-
sample = sample / timesteps
|
328 |
-
|
329 |
-
if not return_dict:
|
330 |
-
return (sample,)
|
331 |
-
|
332 |
-
return UNet2DOutput(sample=sample)
|
333 |
-
|
334 |
-
import math
|
335 |
-
|
336 |
-
from dataclasses import dataclass
|
337 |
-
from typing import Optional, Tuple, Union
|
338 |
-
import torch
|
339 |
-
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
340 |
-
from diffusers.utils import BaseOutput
|
341 |
-
from diffusers.utils.torch_utils import randn_tensor
|
342 |
-
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
|
343 |
-
|
344 |
-
@dataclass
|
345 |
-
class SdeVeOutput(BaseOutput):
|
346 |
-
"""
|
347 |
-
Output class for the scheduler's `step` function output.
|
348 |
-
Args:
|
349 |
-
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
350 |
-
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
351 |
-
denoising loop.
|
352 |
-
prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
353 |
-
Mean averaged `prev_sample` over previous timesteps.
|
354 |
-
"""
|
355 |
-
|
356 |
-
prev_sample: torch.FloatTensor
|
357 |
-
prev_sample_mean: torch.FloatTensor
|
358 |
-
|
359 |
-
|
360 |
-
class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin):
|
361 |
-
"""
|
362 |
-
`ScoreSdeVeScheduler` is a variance exploding stochastic differential equation (SDE) scheduler.
|
363 |
-
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
364 |
-
methods the library implements for all schedulers such as loading and saving.
|
365 |
-
Args:
|
366 |
-
num_train_timesteps (`int`, defaults to 1000):
|
367 |
-
The number of diffusion steps to train the model.
|
368 |
-
snr (`float`, defaults to 0.15):
|
369 |
-
A coefficient weighting the step from the `model_output` sample (from the network) to the random noise.
|
370 |
-
sigma_min (`float`, defaults to 0.01):
|
371 |
-
The initial noise scale for the sigma sequence in the sampling procedure. The minimum sigma should mirror
|
372 |
-
the distribution of the data.
|
373 |
-
sigma_max (`float`, defaults to 1348.0):
|
374 |
-
The maximum value used for the range of continuous timesteps passed into the model.
|
375 |
-
sampling_eps (`float`, defaults to 1e-5):
|
376 |
-
The end value of sampling where timesteps decrease progressively from 1 to epsilon.
|
377 |
-
correct_steps (`int`, defaults to 1):
|
378 |
-
The number of correction steps performed on a produced sample.
|
379 |
-
"""
|
380 |
-
|
381 |
-
order = 1
|
382 |
-
|
383 |
-
@register_to_config
|
384 |
-
def __init__(
|
385 |
-
self,
|
386 |
-
num_train_timesteps: int = 2000,
|
387 |
-
snr: float = 0.15,
|
388 |
-
sigma_min: float = 0.01,
|
389 |
-
sigma_max: float = 1348.0,
|
390 |
-
sampling_eps: float = 1e-5,
|
391 |
-
correct_steps: int = 1,
|
392 |
-
):
|
393 |
-
# standard deviation of the initial noise distribution
|
394 |
-
self.init_noise_sigma = sigma_max
|
395 |
-
|
396 |
-
# setable values
|
397 |
-
self.timesteps = None
|
398 |
-
|
399 |
-
self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps)
|
400 |
-
|
401 |
-
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
|
402 |
-
"""
|
403 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
404 |
-
current timestep.
|
405 |
-
Args:
|
406 |
-
sample (`torch.FloatTensor`):
|
407 |
-
The input sample.
|
408 |
-
timestep (`int`, *optional*):
|
409 |
-
The current timestep in the diffusion chain.
|
410 |
-
Returns:
|
411 |
-
`torch.FloatTensor`:
|
412 |
-
A scaled input sample.
|
413 |
-
"""
|
414 |
-
return sample
|
415 |
-
|
416 |
-
def set_timesteps(
|
417 |
-
self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None
|
418 |
-
):
|
419 |
-
"""
|
420 |
-
Sets the continuous timesteps used for the diffusion chain (to be run before inference).
|
421 |
-
Args:
|
422 |
-
num_inference_steps (`int`):
|
423 |
-
The number of diffusion steps used when generating samples with a pre-trained model.
|
424 |
-
sampling_eps (`float`, *optional*):
|
425 |
-
The final timestep value (overrides value given during scheduler instantiation).
|
426 |
-
device (`str` or `torch.device`, *optional*):
|
427 |
-
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
428 |
-
"""
|
429 |
-
sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
|
430 |
-
|
431 |
-
self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device)
|
432 |
-
|
433 |
-
def set_sigmas(
|
434 |
-
self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None
|
435 |
-
):
|
436 |
-
"""
|
437 |
-
Sets the noise scales used for the diffusion chain (to be run before inference). The sigmas control the weight
|
438 |
-
of the `drift` and `diffusion` components of the sample update.
|
439 |
-
Args:
|
440 |
-
num_inference_steps (`int`):
|
441 |
-
The number of diffusion steps used when generating samples with a pre-trained model.
|
442 |
-
sigma_min (`float`, optional):
|
443 |
-
The initial noise scale value (overrides value given during scheduler instantiation).
|
444 |
-
sigma_max (`float`, optional):
|
445 |
-
The final noise scale value (overrides value given during scheduler instantiation).
|
446 |
-
sampling_eps (`float`, optional):
|
447 |
-
The final timestep value (overrides value given during scheduler instantiation).
|
448 |
-
"""
|
449 |
-
sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min
|
450 |
-
sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max
|
451 |
-
sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
|
452 |
-
if self.timesteps is None:
|
453 |
-
self.set_timesteps(num_inference_steps, sampling_eps)
|
454 |
-
|
455 |
-
self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
|
456 |
-
self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps))
|
457 |
-
self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
|
458 |
-
|
459 |
-
def get_adjacent_sigma(self, timesteps, t):
|
460 |
-
return torch.where(
|
461 |
-
timesteps == 0,
|
462 |
-
torch.zeros_like(t.to(timesteps.device)),
|
463 |
-
self.discrete_sigmas[timesteps - 1].to(timesteps.device),
|
464 |
-
)
|
465 |
-
|
466 |
-
def step_pred(
|
467 |
-
self,
|
468 |
-
model_output: torch.FloatTensor,
|
469 |
-
timestep: int,
|
470 |
-
sample: torch.FloatTensor,
|
471 |
-
generator: Optional[torch.Generator] = None,
|
472 |
-
return_dict: bool = True,
|
473 |
-
) -> Union[SdeVeOutput, Tuple]:
|
474 |
-
"""
|
475 |
-
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
476 |
-
process from the learned model outputs (most often the predicted noise).
|
477 |
-
Args:
|
478 |
-
model_output (`torch.FloatTensor`):
|
479 |
-
The direct output from learned diffusion model.
|
480 |
-
timestep (`int`):
|
481 |
-
The current discrete timestep in the diffusion chain.
|
482 |
-
sample (`torch.FloatTensor`):
|
483 |
-
A current instance of a sample created by the diffusion process.
|
484 |
-
generator (`torch.Generator`, *optional*):
|
485 |
-
A random number generator.
|
486 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
487 |
-
Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`.
|
488 |
-
Returns:
|
489 |
-
[`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`:
|
490 |
-
If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple
|
491 |
-
is returned where the first element is the sample tensor.
|
492 |
-
"""
|
493 |
-
if self.timesteps is None:
|
494 |
-
raise ValueError(
|
495 |
-
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
|
496 |
-
)
|
497 |
-
|
498 |
-
timestep = timestep * torch.ones(
|
499 |
-
sample.shape[0], device=sample.device
|
500 |
-
) # torch.repeat_interleave(timestep, sample.shape[0])
|
501 |
-
timesteps = (timestep * (len(self.timesteps) - 1)).long()
|
502 |
-
|
503 |
-
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
|
504 |
-
timesteps = timesteps.to(self.discrete_sigmas.device)
|
505 |
-
|
506 |
-
sigma = self.discrete_sigmas[timesteps].to(sample.device)
|
507 |
-
adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device)
|
508 |
-
drift = torch.zeros_like(sample)
|
509 |
-
diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5
|
510 |
-
|
511 |
-
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
|
512 |
-
# also equation 47 shows the analog from SDE models to ancestral sampling methods
|
513 |
-
diffusion = diffusion.flatten()
|
514 |
-
while len(diffusion.shape) < len(sample.shape):
|
515 |
-
diffusion = diffusion.unsqueeze(-1)
|
516 |
-
drift = drift - diffusion**2 * model_output
|
517 |
-
|
518 |
-
# equation 6: sample noise for the diffusion term of
|
519 |
-
noise = randn_tensor(
|
520 |
-
sample.shape, layout=sample.layout, generator=generator, device=sample.device, dtype=sample.dtype
|
521 |
-
)
|
522 |
-
prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep
|
523 |
-
# TODO is the variable diffusion the correct scaling term for the noise?
|
524 |
-
prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g
|
525 |
-
|
526 |
-
if not return_dict:
|
527 |
-
return (prev_sample, prev_sample_mean)
|
528 |
-
|
529 |
-
return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean)
|
530 |
-
|
531 |
-
def step_correct(
|
532 |
-
self,
|
533 |
-
model_output: torch.FloatTensor,
|
534 |
-
sample: torch.FloatTensor,
|
535 |
-
generator: Optional[torch.Generator] = None,
|
536 |
-
return_dict: bool = True,
|
537 |
-
) -> Union[SchedulerOutput, Tuple]:
|
538 |
-
"""
|
539 |
-
Correct the predicted sample based on the `model_output` of the network. This is often run repeatedly after
|
540 |
-
making the prediction for the previous timestep.
|
541 |
-
Args:
|
542 |
-
model_output (`torch.FloatTensor`):
|
543 |
-
The direct output from learned diffusion model.
|
544 |
-
sample (`torch.FloatTensor`):
|
545 |
-
A current instance of a sample created by the diffusion process.
|
546 |
-
generator (`torch.Generator`, *optional*):
|
547 |
-
A random number generator.
|
548 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
549 |
-
Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`.
|
550 |
-
Returns:
|
551 |
-
[`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`:
|
552 |
-
If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple
|
553 |
-
is returned where the first element is the sample tensor.
|
554 |
-
"""
|
555 |
-
if self.timesteps is None:
|
556 |
-
raise ValueError(
|
557 |
-
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
|
558 |
-
)
|
559 |
-
|
560 |
-
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
|
561 |
-
# sample noise for correction
|
562 |
-
noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator, device=sample.device).to(sample.device)
|
563 |
-
|
564 |
-
# compute step size from the model_output, the noise, and the snr
|
565 |
-
grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
|
566 |
-
noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
|
567 |
-
step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
|
568 |
-
step_size = step_size * torch.ones(sample.shape[0]).to(sample.device)
|
569 |
-
# self.repeat_scalar(step_size, sample.shape[0])
|
570 |
-
|
571 |
-
# compute corrected sample: model_output term and noise term
|
572 |
-
step_size = step_size.flatten()
|
573 |
-
while len(step_size.shape) < len(sample.shape):
|
574 |
-
step_size = step_size.unsqueeze(-1)
|
575 |
-
prev_sample_mean = sample + step_size * model_output
|
576 |
-
prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
|
577 |
-
|
578 |
-
if not return_dict:
|
579 |
-
return (prev_sample,)
|
580 |
-
|
581 |
-
return SchedulerOutput(prev_sample=prev_sample)
|
582 |
-
|
583 |
-
def add_noise(
|
584 |
-
self,
|
585 |
-
original_samples: torch.FloatTensor,
|
586 |
-
noise: torch.FloatTensor,
|
587 |
-
timesteps: torch.FloatTensor,
|
588 |
-
) -> torch.FloatTensor:
|
589 |
-
# Make sure sigmas and timesteps have the same device and dtype as original_samples
|
590 |
-
timesteps = timesteps.to(original_samples.device)
|
591 |
-
sigmas = self.config.sigma_min * (self.config.sigma_max / self.config.sigma_min) ** timesteps
|
592 |
-
noise = (
|
593 |
-
noise * sigmas[:, None, None, None]
|
594 |
-
if noise is not None
|
595 |
-
else torch.randn_like(original_samples) * sigmas[:, None, None, None]
|
596 |
-
)
|
597 |
-
noisy_samples = noise + original_samples
|
598 |
-
return noisy_samples
|
599 |
-
|
600 |
-
def __len__(self):
|
601 |
-
return self.config.num_train_timesteps
|
602 |
-
|
603 |
from diffusers.utils.torch_utils import randn_tensor
|
604 |
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
605 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from diffusers.utils.torch_utils import randn_tensor
|
2 |
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
3 |
|