code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def clear(self):
"""
Clears the attention bank of all reader attention modules.
This method is used when the `reference_attn` attribute is set to `True`.
It clears the attention bank of all reader attention modules inside the UNet
model based on the selected `fusion_blocks` mode.
If `fusion_blocks` is set to "midup", it searches for reader attention modules
in both the mid block and up blocks of the UNet model. If `fusion_blocks` is set
to "full", it searches for reader attention modules in the entire UNet model.
It sorts the reader attention modules by the number of neurons in their
`norm1.normalized_shape[0]` attribute in descending order. This sorting ensures
that the modules with more neurons are cleared first.
Finally, it iterates through the sorted list of reader attention modules and
calls the `clear()` method on each module's `bank` attribute to clear the
attention bank.
"""
if self.reference_attn:
if self.fusion_blocks == "midup":
reader_attn_modules = [
module
for module in (
torch_dfs(self.unet.mid_block) +
torch_dfs(self.unet.up_blocks)
)
if isinstance(module, (BasicTransformerBlock, TemporalBasicTransformerBlock))
]
elif self.fusion_blocks == "full":
reader_attn_modules = [
module
for module in torch_dfs(self.unet)
if isinstance(module, (BasicTransformerBlock, TemporalBasicTransformerBlock))
]
reader_attn_modules = sorted(
reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]
)
for r in reader_attn_modules:
r.bank.clear()
|
Clears the attention bank of all reader attention modules.
This method is used when the `reference_attn` attribute is set to `True`.
It clears the attention bank of all reader attention modules inside the UNet
model based on the selected `fusion_blocks` mode.
If `fusion_blocks` is set to "midup", it searches for reader attention modules
in both the mid block and up blocks of the UNet model. If `fusion_blocks` is set
to "full", it searches for reader attention modules in the entire UNet model.
It sorts the reader attention modules by the number of neurons in their
`norm1.normalized_shape[0]` attribute in descending order. This sorting ensures
that the modules with more neurons are cleared first.
Finally, it iterates through the sorted list of reader attention modules and
calls the `clear()` method on each module's `bank` attribute to clear the
attention bank.
|
clear
|
python
|
fudan-generative-vision/hallo
|
hallo/models/mutual_self_attention.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/mutual_self_attention.py
|
MIT
|
def forward(self, x):
"""
Forward pass of the InflatedConv3d layer.
Args:
x (torch.Tensor): Input tensor to the layer.
Returns:
torch.Tensor: Output tensor after applying the InflatedConv3d layer.
"""
video_length = x.shape[2]
x = rearrange(x, "b c f h w -> (b f) c h w")
x = super().forward(x)
x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
return x
|
Forward pass of the InflatedConv3d layer.
Args:
x (torch.Tensor): Input tensor to the layer.
Returns:
torch.Tensor: Output tensor after applying the InflatedConv3d layer.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/resnet.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/resnet.py
|
MIT
|
def forward(self, x):
"""
Performs a forward pass through the CustomClassName.
:param x: Input tensor of shape (batch_size, channels, video_length, height, width).
:return: Output tensor of shape (batch_size, channels, video_length, height, width).
"""
video_length = x.shape[2]
x = rearrange(x, "b c f h w -> (b f) c h w")
x = super().forward(x)
x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
return x
|
Performs a forward pass through the CustomClassName.
:param x: Input tensor of shape (batch_size, channels, video_length, height, width).
:return: Output tensor of shape (batch_size, channels, video_length, height, width).
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/resnet.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/resnet.py
|
MIT
|
def forward(self, hidden_states, output_size=None):
"""
Forward pass of the Upsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be upsampled.
output_size (tuple, optional): Desired output size of the upsampled tensor.
Returns:
torch.Tensor: Upsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
"""
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(
hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest"
)
else:
hidden_states = F.interpolate(
hidden_states, size=output_size, mode="nearest"
)
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# if self.use_conv:
# if self.name == "conv":
# hidden_states = self.conv(hidden_states)
# else:
# hidden_states = self.Conv2d_0(hidden_states)
hidden_states = self.conv(hidden_states)
return hidden_states
|
Forward pass of the Upsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be upsampled.
output_size (tuple, optional): Desired output size of the upsampled tensor.
Returns:
torch.Tensor: Upsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/resnet.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/resnet.py
|
MIT
|
def __init__(
self, channels, use_conv=False, out_channels=None, padding=1, name="conv"
):
"""
Downsamples the given input in the 3D space.
Args:
channels: The number of input channels.
use_conv: Whether to use a convolutional layer for downsampling.
out_channels: The number of output channels. If None, the input channels are used.
padding: The amount of padding to be added to the input.
name: The name of the convolutional layer.
"""
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(
self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
raise NotImplementedError
|
Downsamples the given input in the 3D space.
Args:
channels: The number of input channels.
use_conv: Whether to use a convolutional layer for downsampling.
out_channels: The number of output channels. If None, the input channels are used.
padding: The amount of padding to be added to the input.
name: The name of the convolutional layer.
|
__init__
|
python
|
fudan-generative-vision/hallo
|
hallo/models/resnet.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/resnet.py
|
MIT
|
def forward(self, hidden_states):
"""
Forward pass for the Downsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be downsampled.
Returns:
torch.Tensor: Downsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
"""
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
|
Forward pass for the Downsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be downsampled.
Returns:
torch.Tensor: Downsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/resnet.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/resnet.py
|
MIT
|
def forward(self, input_tensor, temb):
"""
Forward pass for the ResnetBlock3D class.
Args:
input_tensor (torch.Tensor): Input tensor to the ResnetBlock3D layer.
temb (torch.Tensor): Token embedding tensor.
Returns:
torch.Tensor: Output tensor after passing through the ResnetBlock3D layer.
"""
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
|
Forward pass for the ResnetBlock3D class.
Args:
input_tensor (torch.Tensor): Input tensor to the ResnetBlock3D layer.
temb (torch.Tensor): Token embedding tensor.
Returns:
torch.Tensor: Output tensor after passing through the ResnetBlock3D layer.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/resnet.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/resnet.py
|
MIT
|
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
timestep: Optional[torch.LongTensor] = None,
_added_cond_kwargs: Dict[str, torch.Tensor] = None,
class_labels: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
):
"""
The [`Transformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete,
`torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
"""
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
# we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
# we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None and attention_mask.ndim == 2:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
encoder_attention_mask = (
1 - encoder_attention_mask.to(hidden_states.dtype)
) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# Retrieve lora scale.
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
# 1. Input
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = (
self.proj_in(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_in(hidden_states)
)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * width, inner_dim
)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * width, inner_dim
)
hidden_states = (
self.proj_in(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_in(hidden_states)
)
# 2. Blocks
if self.caption_projection is not None:
batch_size = hidden_states.shape[0]
encoder_hidden_states = self.caption_projection(encoder_hidden_states)
encoder_hidden_states = encoder_hidden_states.view(
batch_size, -1, hidden_states.shape[-1]
)
ref_feature = hidden_states.reshape(batch, height, width, inner_dim)
for block in self.transformer_blocks:
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
timestep,
cross_attention_kwargs,
class_labels,
**ckpt_kwargs,
)
else:
hidden_states = block(
hidden_states, # shape [5, 4096, 320]
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states, # shape [1,4,768]
encoder_attention_mask=encoder_attention_mask,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
)
# 3. Output
output = None
if self.is_input_continuous:
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, width, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
hidden_states = (
self.proj_out(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_out(hidden_states)
)
else:
hidden_states = (
self.proj_out(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_out(hidden_states)
)
hidden_states = (
hidden_states.reshape(batch, height, width, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
output = hidden_states + residual
if not return_dict:
return (output, ref_feature)
return Transformer2DModelOutput(sample=output, ref_feature=ref_feature)
|
The [`Transformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete,
`torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/transformer_2d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/transformer_2d.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
motion_scale=None,
timestep=None,
return_dict: bool = True,
):
"""
Forward pass for the Transformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states.
encoder_hidden_states (torch.Tensor, optional): The input encoder hidden states.
attention_mask (torch.Tensor, optional): The attention mask.
full_mask (torch.Tensor, optional): The full mask.
face_mask (torch.Tensor, optional): The face mask.
lip_mask (torch.Tensor, optional): The lip mask.
timestep (int, optional): The current timestep.
return_dict (bool, optional): Whether to return a dictionary or a tuple.
Returns:
output (Union[Tuple, BaseOutput]): The output of the Transformer3DModel.
"""
# Input
assert (
hidden_states.dim() == 5
), f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
# TODO
if self.use_audio_module:
encoder_hidden_states = rearrange(
encoder_hidden_states,
"bs f margin dim -> (bs f) margin dim",
)
else:
if encoder_hidden_states.shape[0] != hidden_states.shape[0]:
encoder_hidden_states = repeat(
encoder_hidden_states, "b n c -> (b f) n c", f=video_length
)
batch, _, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * weight, inner_dim
)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * weight, inner_dim
)
hidden_states = self.proj_in(hidden_states)
# Blocks
motion_frames = []
for _, block in enumerate(self.transformer_blocks):
if isinstance(block, TemporalBasicTransformerBlock):
hidden_states, motion_frame_fea = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length,
)
motion_frames.append(motion_frame_fea)
else:
hidden_states = block(
hidden_states, # shape [2, 4096, 320]
encoder_hidden_states=encoder_hidden_states, # shape [2, 20, 640]
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
timestep=timestep,
video_length=video_length,
motion_scale=motion_scale,
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output, motion_frames)
return Transformer3DModelOutput(sample=output)
|
Forward pass for the Transformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states.
encoder_hidden_states (torch.Tensor, optional): The input encoder hidden states.
attention_mask (torch.Tensor, optional): The attention mask.
full_mask (torch.Tensor, optional): The full mask.
face_mask (torch.Tensor, optional): The face mask.
lip_mask (torch.Tensor, optional): The lip mask.
timestep (int, optional): The current timestep.
return_dict (bool, optional): Whether to return a dictionary or a tuple.
Returns:
output (Union[Tuple, BaseOutput]): The output of the Transformer3DModel.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/transformer_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/transformer_3d.py
|
MIT
|
def get_down_block(
down_block_type: str,
num_layers: int,
in_channels: int,
out_channels: int,
temb_channels: int,
add_downsample: bool,
resnet_eps: float,
resnet_act_fn: str,
transformer_layers_per_block: int = 1,
num_attention_heads: Optional[int] = None,
resnet_groups: Optional[int] = None,
cross_attention_dim: Optional[int] = None,
downsample_padding: Optional[int] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
attention_type: str = "default",
attention_head_dim: Optional[int] = None,
dropout: float = 0.0,
):
""" This function creates and returns a UpBlock2D or CrossAttnUpBlock2D object based on the given up_block_type.
Args:
up_block_type (str): The type of up block to create. Must be either "UpBlock2D" or "CrossAttnUpBlock2D".
num_layers (int): The number of layers in the ResNet block.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
prev_output_channel (int): The number of channels in the previous output.
temb_channels (int): The number of channels in the token embedding.
add_upsample (bool): Whether to add an upsample layer after the ResNet block. Defaults to True.
resnet_eps (float): The epsilon value for the ResNet block. Defaults to 1e-6.
resnet_act_fn (str): The activation function to use in the ResNet block. Defaults to "swish".
resnet_groups (int): The number of groups in the ResNet block. Defaults to 32.
resnet_pre_norm (bool): Whether to use pre-normalization in the ResNet block. Defaults to True.
output_scale_factor (float): The scale factor to apply to the output. Defaults to 1.0.
Returns:
nn.Module: The created UpBlock2D or CrossAttnUpBlock2D object.
"""
# If attn head dim is not defined, we default it to the number of heads
if attention_head_dim is None:
logger.warning("It is recommended to provide `attention_head_dim` when calling `get_down_block`.")
logger.warning(f"Defaulting `attention_head_dim` to {num_attention_heads}.")
attention_head_dim = num_attention_heads
down_block_type = (
down_block_type[7:]
if down_block_type.startswith("UNetRes")
else down_block_type
)
if down_block_type == "DownBlock2D":
return DownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
dropout=dropout,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
if down_block_type == "CrossAttnDownBlock2D":
if cross_attention_dim is None:
raise ValueError(
"cross_attention_dim must be specified for CrossAttnDownBlock2D"
)
return CrossAttnDownBlock2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
dropout=dropout,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
)
raise ValueError(f"{down_block_type} does not exist.")
|
This function creates and returns a UpBlock2D or CrossAttnUpBlock2D object based on the given up_block_type.
Args:
up_block_type (str): The type of up block to create. Must be either "UpBlock2D" or "CrossAttnUpBlock2D".
num_layers (int): The number of layers in the ResNet block.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
prev_output_channel (int): The number of channels in the previous output.
temb_channels (int): The number of channels in the token embedding.
add_upsample (bool): Whether to add an upsample layer after the ResNet block. Defaults to True.
resnet_eps (float): The epsilon value for the ResNet block. Defaults to 1e-6.
resnet_act_fn (str): The activation function to use in the ResNet block. Defaults to "swish".
resnet_groups (int): The number of groups in the ResNet block. Defaults to 32.
resnet_pre_norm (bool): Whether to use pre-normalization in the ResNet block. Defaults to True.
output_scale_factor (float): The scale factor to apply to the output. Defaults to 1.0.
Returns:
nn.Module: The created UpBlock2D or CrossAttnUpBlock2D object.
|
get_down_block
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Forward pass of the UNetMidBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the UNetMidBlock2D.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2D.
"""
# Your implementation here
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
if attn is not None:
hidden_states = attn(hidden_states, temb=temb)
hidden_states = resnet(hidden_states, temb)
return hidden_states
|
Forward pass of the UNetMidBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the UNetMidBlock2D.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2D.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Forward pass for the UNetMidBlock2DCrossAttn class.
Args:
hidden_states (torch.FloatTensor): The input hidden states tensor.
temb (Optional[torch.FloatTensor], optional): The optional tensor for time embeddings.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The optional encoder hidden states tensor.
attention_mask (Optional[torch.FloatTensor], optional): The optional attention mask tensor.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The optional cross-attention kwargs tensor.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The optional encoder attention mask tensor.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2DCrossAttn layers.
"""
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
else:
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
return hidden_states
|
Forward pass for the UNetMidBlock2DCrossAttn class.
Args:
hidden_states (torch.FloatTensor): The input hidden states tensor.
temb (Optional[torch.FloatTensor], optional): The optional tensor for time embeddings.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The optional encoder hidden states tensor.
attention_mask (Optional[torch.FloatTensor], optional): The optional attention mask tensor.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The optional cross-attention kwargs tensor.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The optional encoder attention mask tensor.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2DCrossAttn layers.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
additional_residuals: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
"""
Forward pass for the CrossAttnDownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The cross-attention kwargs. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask. Defaults to None.
additional_residuals (Optional[torch.FloatTensor], optional): The additional residuals. Defaults to None.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output hidden states and residuals.
"""
output_states = ()
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
blocks = list(zip(self.resnets, self.attentions))
for i, (resnet, attn) in enumerate(blocks):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
else:
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
# apply additional residuals to the output of the last pair of resnet and attention blocks
if i == len(blocks) - 1 and additional_residuals is not None:
hidden_states = hidden_states + additional_residuals
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, scale=lora_scale)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
|
Forward pass for the CrossAttnDownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The cross-attention kwargs. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask. Defaults to None.
additional_residuals (Optional[torch.FloatTensor], optional): The additional residuals. Defaults to None.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output hidden states and residuals.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
scale: float = 1.0,
) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
"""
Forward pass of the DownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the DownBlock2D layer.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
scale (float, optional): The scale factor for the input tensor. Defaults to 1.0.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output tensor and any additional hidden states.
"""
output_states = ()
for resnet in self.resnets:
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
use_reentrant=False,
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb, scale=scale)
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, scale=scale)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
|
Forward pass of the DownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the DownBlock2D layer.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
scale (float, optional): The scale factor for the input tensor. Defaults to 1.0.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output tensor and any additional hidden states.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Forward pass for the CrossAttnUpBlock2D class.
Args:
self (CrossAttnUpBlock2D): An instance of the CrossAttnUpBlock2D class.
hidden_states (torch.FloatTensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states tensors.
temb (Optional[torch.FloatTensor], optional): The token embeddings tensor. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states tensor. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for cross attention. Defaults to None.
upsample_size (Optional[int], optional): The upsample size. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask tensor. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
"""
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
)
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
else:
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(
hidden_states, upsample_size, scale=lora_scale
)
return hidden_states
|
Forward pass for the CrossAttnUpBlock2D class.
Args:
self (CrossAttnUpBlock2D): An instance of the CrossAttnUpBlock2D class.
hidden_states (torch.FloatTensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states tensors.
temb (Optional[torch.FloatTensor], optional): The token embeddings tensor. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states tensor. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for cross attention. Defaults to None.
upsample_size (Optional[int], optional): The upsample size. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask tensor. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
upsample_size: Optional[int] = None,
scale: float = 1.0,
) -> torch.FloatTensor:
"""
Forward pass for the UpBlock2D class.
Args:
self (UpBlock2D): An instance of the UpBlock2D class.
hidden_states (torch.FloatTensor): The input tensor to the block.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
upsample_size (Optional[int], optional): The size to upsample the input tensor to. Defaults to None.
scale (float, optional): The scale factor to apply to the input tensor. Defaults to 1.0.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
"""
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
)
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
use_reentrant=False,
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb, scale=scale)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size, scale=scale)
return hidden_states
|
Forward pass for the UpBlock2D class.
Args:
self (UpBlock2D): An instance of the UpBlock2D class.
hidden_states (torch.FloatTensor): The input tensor to the block.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
upsample_size (Optional[int], optional): The size to upsample the input tensor to. Defaults to None.
scale (float, optional): The scale factor to apply to the input tensor. Defaults to 1.0.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_blocks.py
|
MIT
|
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(
name: str,
module: torch.nn.Module,
processors: Dict[str, AttentionProcessor],
):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor(
return_deprecated_lora=True
)
for sub_name, child in module.named_children():
fn_recursive_add_processors(
f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
|
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
|
attn_processors
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_condition.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_condition.py
|
MIT
|
def set_attn_processor(
self,
processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]],
_remove_lora=False,
):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor, _remove_lora=_remove_lora)
else:
module.set_processor(
processor.pop(f"{name}.processor"), _remove_lora=_remove_lora
)
for sub_name, child in module.named_children():
fn_recursive_attn_processor(
f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
|
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
|
set_attn_processor
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_condition.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_condition.py
|
MIT
|
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(
proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS
for proc in self.attn_processors.values()
):
processor = AttnAddedKVProcessor()
elif all(
proc.__class__ in CROSS_ATTENTION_PROCESSORS
for proc in self.attn_processors.values()
):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor, _remove_lora=True)
|
Disables custom attention processors and sets the default attention implementation.
|
set_default_attn_processor
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_condition.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_condition.py
|
MIT
|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = (
num_sliceable_layers * [slice_size]
if not isinstance(slice_size, list)
else slice_size
)
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i, size in enumerate(slice_size):
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(
f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(
module: torch.nn.Module, slice_size: List[int]
):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_condition.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_condition.py
|
MIT
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
cond_tensor: torch.FloatTensor=None,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
post_process: bool = False,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added to UNet long skip connections from down blocks to up blocks for
example from ControlNet side model(s)
mid_block_additional_residual (`torch.Tensor`, *optional*):
additional residual to be added to UNet mid block output, for example from ControlNet side model
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (
1 - encoder_attention_mask.to(sample.dtype)
) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor(
[timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError(
"class_labels should be provided when num_class_embeds > 0"
)
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(
class_labels).to(dtype=sample.dtype)
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "text_image":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image'"
"which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get(
"text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time'"
"which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time'"
"which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
elif self.config.addition_embed_type == "image":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image'"
"which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
aug_emb = self.add_embedding(image_embs)
elif self.config.addition_embed_type == "image_hint":
# Kandinsky 2.2 - style
if (
"image_embeds" not in added_cond_kwargs
or "hint" not in added_cond_kwargs
):
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint'"
"which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
hint = added_cond_kwargs.get("hint")
aug_emb, hint = self.add_embedding(image_embs, hint)
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "text_proj"
):
encoder_hidden_states = self.encoder_hid_proj(
encoder_hidden_states)
elif (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "text_image_proj"
):
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(
encoder_hidden_states, image_embeds
)
elif (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "image_proj"
):
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(image_embeds)
elif (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "ip_image_proj"
):
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
image_embeds = self.encoder_hid_proj(image_embeds).to(
encoder_hidden_states.dtype
)
encoder_hidden_states = torch.cat(
[encoder_hidden_states, image_embeds], dim=1
)
# 2. pre-process
sample = self.conv_in(sample)
if cond_tensor is not None:
sample = sample + cond_tensor
# 2.5 GLIGEN position net
if (
cross_attention_kwargs is not None
and cross_attention_kwargs.get("gligen", None) is not None
):
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {
"objs": self.position_net(**gligen_args)
}
# 3. down
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = (
mid_block_additional_residual is not None
and down_block_additional_residuals is not None
)
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if (
not is_adapter
and mid_block_additional_residual is None
and down_block_additional_residuals is not None
):
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (
hasattr(downsample_block, "has_cross_attention")
and downsample_block.has_cross_attention
):
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = (
down_intrablock_additional_residuals.pop(0)
)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(
hidden_states=sample, temb=emb, scale=lora_scale
)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = (
down_block_res_sample + down_block_additional_residual
)
new_down_block_res_samples = new_down_block_res_samples + (
down_block_res_sample,
)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if (
hasattr(self.mid_block, "has_cross_attention")
and self.mid_block.has_cross_attention
):
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets):]
down_block_res_samples = down_block_res_samples[
: -len(upsample_block.resnets)
]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if (
hasattr(upsample_block, "has_cross_attention")
and upsample_block.has_cross_attention
):
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
scale=lora_scale,
)
# 6. post-process
if post_process:
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
|
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added to UNet long skip connections from down blocks to up blocks for
example from ControlNet side model(s)
mid_block_additional_residual (`torch.Tensor`, *optional*):
additional residual to be added to UNet mid block output, for example from ControlNet side model
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_condition.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_condition.py
|
MIT
|
def load_change_cross_attention_dim(
cls,
pretrained_model_path: PathLike,
subfolder=None,
# unet_additional_kwargs=None,
):
"""
Load or change the cross-attention dimension of a pre-trained model.
Parameters:
pretrained_model_name_or_path (:class:`~typing.Union[str, :class:`~pathlib.Path`]`):
The identifier of the pre-trained model or the path to the local folder containing the model.
force_download (:class:`~bool`):
If True, re-download the model even if it is already cached.
resume_download (:class:`~bool`):
If True, resume the download of the model if partially downloaded.
proxies (:class:`~dict`):
A dictionary of proxy servers to use for downloading the model.
cache_dir (:class:`~Optional[str]`):
The path to the cache directory for storing downloaded models.
use_auth_token (:class:`~bool`):
If True, use the authentication token for private models.
revision (:class:`~str`):
The specific model version to use.
use_safetensors (:class:`~bool`):
If True, use the SafeTensors format for loading the model weights.
**kwargs (:class:`~dict`):
Additional keyword arguments passed to the model.
"""
pretrained_model_path = Path(pretrained_model_path)
if subfolder is not None:
pretrained_model_path = pretrained_model_path.joinpath(subfolder)
config_file = pretrained_model_path / "config.json"
if not (config_file.exists() and config_file.is_file()):
raise RuntimeError(
f"{config_file} does not exist or is not a file")
unet_config = cls.load_config(config_file)
unet_config["cross_attention_dim"] = 1024
model = cls.from_config(unet_config)
# load the vanilla weights
if pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME).exists():
logger.debug(
f"loading safeTensors weights from {pretrained_model_path} ..."
)
state_dict = load_file(
pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME), device="cpu"
)
elif pretrained_model_path.joinpath(WEIGHTS_NAME).exists():
logger.debug(f"loading weights from {pretrained_model_path} ...")
state_dict = torch.load(
pretrained_model_path.joinpath(WEIGHTS_NAME),
map_location="cpu",
weights_only=True,
)
else:
raise FileNotFoundError(
f"no weights file found in {pretrained_model_path}")
model_state_dict = model.state_dict()
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
state_dict[k] = model_state_dict[k]
# load the weights into the model
m, u = model.load_state_dict(state_dict, strict=False)
print(m, u)
return model
|
Load or change the cross-attention dimension of a pre-trained model.
Parameters:
pretrained_model_name_or_path (:class:`~typing.Union[str, :class:`~pathlib.Path`]`):
The identifier of the pre-trained model or the path to the local folder containing the model.
force_download (:class:`~bool`):
If True, re-download the model even if it is already cached.
resume_download (:class:`~bool`):
If True, resume the download of the model if partially downloaded.
proxies (:class:`~dict`):
A dictionary of proxy servers to use for downloading the model.
cache_dir (:class:`~Optional[str]`):
The path to the cache directory for storing downloaded models.
use_auth_token (:class:`~bool`):
If True, use the authentication token for private models.
revision (:class:`~str`):
The specific model version to use.
use_safetensors (:class:`~bool`):
If True, use the SafeTensors format for loading the model weights.
**kwargs (:class:`~dict`):
Additional keyword arguments passed to the model.
|
load_change_cross_attention_dim
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_2d_condition.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_2d_condition.py
|
MIT
|
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(
name: str,
module: torch.nn.Module,
processors: Dict[str, AttentionProcessor],
):
if hasattr(module, "set_processor"):
processors[f"{name}.processor"] = module.processor
for sub_name, child in module.named_children():
if "temporal_transformer" not in sub_name:
fn_recursive_add_processors(
f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
if "temporal_transformer" not in name:
fn_recursive_add_processors(name, module, processors)
return processors
|
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
|
attn_processors
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d.py
|
MIT
|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = (
num_slicable_layers * [slice_size]
if not isinstance(slice_size, list)
else slice_size
)
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i, size in enumerate(slice_size):
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(
f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(
module: torch.nn.Module, slice_size: List[int]
):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d.py
|
MIT
|
def set_attn_processor(
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]
):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
if "temporal_transformer" not in sub_name:
fn_recursive_attn_processor(
f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
if "temporal_transformer" not in name:
fn_recursive_attn_processor(name, module, processor)
|
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
|
set_attn_processor
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d.py
|
MIT
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
audio_embedding: Optional[torch.Tensor] = None,
class_labels: Optional[torch.Tensor] = None,
mask_cond_fea: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
full_mask: Optional[torch.Tensor] = None,
face_mask: Optional[torch.Tensor] = None,
lip_mask: Optional[torch.Tensor] = None,
motion_scale: Optional[torch.Tensor] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
return_dict: bool = True,
# start: bool = False,
) -> Union[UNet3DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info(
"Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# time
timesteps = timestep
if not torch.is_tensor(timesteps):
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor(
[timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError(
"class_labels should be provided when num_class_embeds > 0"
)
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
# pre-process
sample = self.conv_in(sample)
if mask_cond_fea is not None:
sample = sample + mask_cond_fea
# down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (
hasattr(downsample_block, "has_cross_attention")
and downsample_block.has_cross_attention
):
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
# print("")
else:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
# audio_embedding=audio_embedding,
)
# print("")
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = (
down_block_res_sample + down_block_additional_residual
)
new_down_block_res_samples += (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# mid
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
if mid_block_additional_residual is not None:
sample = sample + mid_block_additional_residual
# up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets):]
down_block_res_samples = down_block_res_samples[
: -len(upsample_block.resnets)
]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if (
hasattr(upsample_block, "has_cross_attention")
and upsample_block.has_cross_attention
):
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
upsample_size=upsample_size,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
encoder_hidden_states=encoder_hidden_states,
# audio_embedding=audio_embedding,
)
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet3DConditionOutput(sample=sample)
|
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d.py
|
MIT
|
def from_pretrained_2d(
cls,
pretrained_model_path: PathLike,
motion_module_path: PathLike,
subfolder=None,
unet_additional_kwargs=None,
mm_zero_proj_out=False,
use_landmark=True,
):
"""
Load a pre-trained 2D UNet model from a given directory.
Parameters:
pretrained_model_path (`str` or `PathLike`):
Path to the directory containing a pre-trained 2D UNet model.
dtype (`torch.dtype`, *optional*):
The data type of the loaded model. If not provided, the default data type is used.
device (`torch.device`, *optional*):
The device on which the loaded model will be placed. If not provided, the default device is used.
**kwargs (`Any`):
Additional keyword arguments passed to the model.
Returns:
`UNet3DConditionModel`:
The loaded 2D UNet model.
"""
pretrained_model_path = Path(pretrained_model_path)
motion_module_path = Path(motion_module_path)
if subfolder is not None:
pretrained_model_path = pretrained_model_path.joinpath(subfolder)
logger.info(
f"loaded temporal unet's pretrained weights from {pretrained_model_path} ..."
)
config_file = pretrained_model_path / "config.json"
if not (config_file.exists() and config_file.is_file()):
raise RuntimeError(
f"{config_file} does not exist or is not a file")
unet_config = cls.load_config(config_file)
unet_config["_class_name"] = cls.__name__
unet_config["down_block_types"] = [
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
]
unet_config["up_block_types"] = [
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
]
unet_config["mid_block_type"] = "UNetMidBlock3DCrossAttn"
if use_landmark:
unet_config["in_channels"] = 8
unet_config["out_channels"] = 8
model = cls.from_config(unet_config, **unet_additional_kwargs)
# load the vanilla weights
if pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME).exists():
logger.debug(
f"loading safeTensors weights from {pretrained_model_path} ..."
)
state_dict = load_file(
pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME), device="cpu"
)
elif pretrained_model_path.joinpath(WEIGHTS_NAME).exists():
logger.debug(f"loading weights from {pretrained_model_path} ...")
state_dict = torch.load(
pretrained_model_path.joinpath(WEIGHTS_NAME),
map_location="cpu",
weights_only=True,
)
else:
raise FileNotFoundError(
f"no weights file found in {pretrained_model_path}")
# load the motion module weights
if motion_module_path.exists() and motion_module_path.is_file():
if motion_module_path.suffix.lower() in [".pth", ".pt", ".ckpt"]:
print(
f"Load motion module params from {motion_module_path}")
motion_state_dict = torch.load(
motion_module_path, map_location="cpu", weights_only=True
)
elif motion_module_path.suffix.lower() == ".safetensors":
motion_state_dict = load_file(motion_module_path, device="cpu")
else:
raise RuntimeError(
f"unknown file format for motion module weights: {motion_module_path.suffix}"
)
if mm_zero_proj_out:
logger.info(
"Zero initialize proj_out layers in motion module...")
new_motion_state_dict = OrderedDict()
for k in motion_state_dict:
if "proj_out" in k:
continue
new_motion_state_dict[k] = motion_state_dict[k]
motion_state_dict = new_motion_state_dict
# merge the state dicts
state_dict.update(motion_state_dict)
model_state_dict = model.state_dict()
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
state_dict[k] = model_state_dict[k]
# load the weights into the model
m, u = model.load_state_dict(state_dict, strict=False)
logger.debug(
f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};")
params = [
p.numel() if "temporal" in n else 0 for n, p in model.named_parameters()
]
logger.info(f"Loaded {sum(params) / 1e6}M-parameter motion module")
return model
|
Load a pre-trained 2D UNet model from a given directory.
Parameters:
pretrained_model_path (`str` or `PathLike`):
Path to the directory containing a pre-trained 2D UNet model.
dtype (`torch.dtype`, *optional*):
The data type of the loaded model. If not provided, the default data type is used.
device (`torch.device`, *optional*):
The device on which the loaded model will be placed. If not provided, the default device is used.
**kwargs (`Any`):
Additional keyword arguments passed to the model.
Returns:
`UNet3DConditionModel`:
The loaded 2D UNet model.
|
from_pretrained_2d
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d.py
|
MIT
|
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
audio_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
use_audio_module=None,
depth=0,
stack_enable_blocks_name=None,
stack_enable_blocks_depth=None,
):
"""
Factory function to instantiate a down-block module for the 3D UNet architecture.
Down blocks are used in the downsampling part of the U-Net to reduce the spatial dimensions
of the feature maps while increasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- down_block_type (str): The type of down block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- temb_channels (int): The number of token embedding channels.
- add_downsample (bool): Flag to add a downsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of a down-sampling block module.
"""
down_block_type = (
down_block_type[7:]
if down_block_type.startswith("UNetRes")
else down_block_type
)
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
if down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError(
"cross_attention_dim must be specified for CrossAttnDownBlock3D"
)
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
audio_attention_dim=audio_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
use_audio_module=use_audio_module,
depth=depth,
stack_enable_blocks_name=stack_enable_blocks_name,
stack_enable_blocks_depth=stack_enable_blocks_depth,
)
raise ValueError(f"{down_block_type} does not exist.")
|
Factory function to instantiate a down-block module for the 3D UNet architecture.
Down blocks are used in the downsampling part of the U-Net to reduce the spatial dimensions
of the feature maps while increasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- down_block_type (str): The type of down block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- temb_channels (int): The number of token embedding channels.
- add_downsample (bool): Flag to add a downsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of a down-sampling block module.
|
get_down_block
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
audio_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
use_audio_module=None,
depth=0,
stack_enable_blocks_name=None,
stack_enable_blocks_depth=None,
):
"""
Factory function to instantiate an up-block module for the 3D UNet architecture.
Up blocks are used in the upsampling part of the U-Net to increase the spatial dimensions
of the feature maps while decreasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- up_block_type (str): The type of up block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- prev_output_channel (int): The number of channels from the previous layer's output.
- temb_channels (int): The number of token embedding channels.
- add_upsample (bool): Flag to add an upsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of an up-sampling block module.
"""
up_block_type = (
up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
)
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
if up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError(
"cross_attention_dim must be specified for CrossAttnUpBlock3D"
)
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
audio_attention_dim=audio_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
use_audio_module=use_audio_module,
depth=depth,
stack_enable_blocks_name=stack_enable_blocks_name,
stack_enable_blocks_depth=stack_enable_blocks_depth,
)
raise ValueError(f"{up_block_type} does not exist.")
|
Factory function to instantiate an up-block module for the 3D UNet architecture.
Up blocks are used in the upsampling part of the U-Net to increase the spatial dimensions
of the feature maps while decreasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- up_block_type (str): The type of up block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- prev_output_channel (int): The number of channels from the previous layer's output.
- temb_channels (int): The number of token embedding channels.
- add_upsample (bool): Flag to add an upsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of an up-sampling block module.
|
get_up_block
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
audio_embedding=None,
motion_scale=None,
):
"""
Forward pass for the UNetMidBlock3DCrossAttn class.
Args:
self (UNetMidBlock3DCrossAttn): An instance of the UNetMidBlock3DCrossAttn class.
hidden_states (Tensor): The input hidden states tensor.
temb (Tensor, optional): The input temporal embedding tensor. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.
full_mask (Tensor, optional): The full mask tensor. Defaults to None.
face_mask (Tensor, optional): The face mask tensor. Defaults to None.
lip_mask (Tensor, optional): The lip mask tensor. Defaults to None.
audio_embedding (Tensor, optional): The audio embedding tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the UNetMidBlock3DCrossAttn layers.
"""
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, audio_module, motion_module in zip(
self.attentions, self.resnets[1:], self.audio_modules, self.motion_modules
):
hidden_states, motion_frame = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
return_dict=False,
) # .sample
if len(motion_frame[0]) > 0:
# if motion_frame[0][0].numel() > 0:
motion_frames = motion_frame[0][0]
motion_frames = rearrange(
motion_frames,
"b f (d1 d2) c -> b c f d1 d2",
d1=hidden_states.size(-1),
)
else:
motion_frames = torch.zeros(
hidden_states.shape[0],
hidden_states.shape[1],
4,
hidden_states.shape[3],
hidden_states.shape[4],
)
n_motion_frames = motion_frames.size(2)
if audio_module is not None:
hidden_states = (
audio_module(
hidden_states,
encoder_hidden_states=audio_embedding,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
motion_scale=motion_scale,
return_dict=False,
)
)[0] # .sample
if motion_module is not None:
motion_frames = motion_frames.to(
device=hidden_states.device, dtype=hidden_states.dtype
)
_hidden_states = (
torch.cat([motion_frames, hidden_states], dim=2)
if n_motion_frames > 0
else hidden_states
)
hidden_states = motion_module(
_hidden_states, encoder_hidden_states=encoder_hidden_states
)
hidden_states = hidden_states[:, :, n_motion_frames:]
hidden_states = resnet(hidden_states, temb)
return hidden_states
|
Forward pass for the UNetMidBlock3DCrossAttn class.
Args:
self (UNetMidBlock3DCrossAttn): An instance of the UNetMidBlock3DCrossAttn class.
hidden_states (Tensor): The input hidden states tensor.
temb (Tensor, optional): The input temporal embedding tensor. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.
full_mask (Tensor, optional): The full mask tensor. Defaults to None.
face_mask (Tensor, optional): The face mask tensor. Defaults to None.
lip_mask (Tensor, optional): The lip mask tensor. Defaults to None.
audio_embedding (Tensor, optional): The audio embedding tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the UNetMidBlock3DCrossAttn layers.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
audio_embedding=None,
motion_scale=None,
):
"""
Defines the forward pass for the CrossAttnDownBlock3D class.
Parameters:
- hidden_states : torch.Tensor
The input tensor to the block.
temb : torch.Tensor, optional
The token embeddings from the previous block.
encoder_hidden_states : torch.Tensor, optional
The hidden states from the encoder.
attention_mask : torch.Tensor, optional
The attention mask for the cross-attention mechanism.
full_mask : torch.Tensor, optional
The full mask for the cross-attention mechanism.
face_mask : torch.Tensor, optional
The face mask for the cross-attention mechanism.
lip_mask : torch.Tensor, optional
The lip mask for the cross-attention mechanism.
audio_embedding : torch.Tensor, optional
The audio embedding for the cross-attention mechanism.
Returns:
-- torch.Tensor
The output tensor from the block.
"""
output_states = ()
for _, (resnet, attn, audio_module, motion_module) in enumerate(
zip(self.resnets, self.attentions, self.audio_modules, self.motion_modules)
):
# self.gradient_checkpointing = False
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
motion_frames = []
hidden_states, motion_frame = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)
if len(motion_frame[0]) > 0:
motion_frames = motion_frame[0][0]
# motion_frames = torch.cat(motion_frames, dim=0)
motion_frames = rearrange(
motion_frames,
"b f (d1 d2) c -> b c f d1 d2",
d1=hidden_states.size(-1),
)
else:
motion_frames = torch.zeros(
hidden_states.shape[0],
hidden_states.shape[1],
4,
hidden_states.shape[3],
hidden_states.shape[4],
)
n_motion_frames = motion_frames.size(2)
if audio_module is not None:
# audio_embedding = audio_embedding
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(audio_module, return_dict=False),
hidden_states,
audio_embedding,
attention_mask,
full_mask,
face_mask,
lip_mask,
motion_scale,
)[0]
# add motion module
if motion_module is not None:
motion_frames = motion_frames.to(
device=hidden_states.device, dtype=hidden_states.dtype
)
_hidden_states = torch.cat(
[motion_frames, hidden_states], dim=2
) # if n_motion_frames > 0 else hidden_states
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(motion_module),
_hidden_states,
encoder_hidden_states,
)
hidden_states = hidden_states[:, :, n_motion_frames:]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
).sample
if audio_module is not None:
hidden_states = audio_module(
hidden_states,
audio_embedding,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
return_dict=False,
)[0]
# add motion module
if motion_module is not None:
hidden_states = motion_module(
hidden_states, encoder_hidden_states=encoder_hidden_states
)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
|
Defines the forward pass for the CrossAttnDownBlock3D class.
Parameters:
- hidden_states : torch.Tensor
The input tensor to the block.
temb : torch.Tensor, optional
The token embeddings from the previous block.
encoder_hidden_states : torch.Tensor, optional
The hidden states from the encoder.
attention_mask : torch.Tensor, optional
The attention mask for the cross-attention mechanism.
full_mask : torch.Tensor, optional
The full mask for the cross-attention mechanism.
face_mask : torch.Tensor, optional
The face mask for the cross-attention mechanism.
lip_mask : torch.Tensor, optional
The lip mask for the cross-attention mechanism.
audio_embedding : torch.Tensor, optional
The audio embedding for the cross-attention mechanism.
Returns:
-- torch.Tensor
The output tensor from the block.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
):
"""
forward method for the DownBlock3D class.
Args:
hidden_states (Tensor): The input tensor to the DownBlock3D layer.
temb (Tensor, optional): The token embeddings, if using transformer.
encoder_hidden_states (Tensor, optional): The hidden states from the encoder.
Returns:
Tensor: The output tensor after passing through the DownBlock3D layer.
"""
output_states = ()
for resnet, motion_module in zip(self.resnets, self.motion_modules):
# print(f"DownBlock3D {self.gradient_checkpointing = }")
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = (
motion_module(
hidden_states, encoder_hidden_states=encoder_hidden_states
)
if motion_module is not None
else hidden_states
)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
|
forward method for the DownBlock3D class.
Args:
hidden_states (Tensor): The input tensor to the DownBlock3D layer.
temb (Tensor, optional): The token embeddings, if using transformer.
encoder_hidden_states (Tensor, optional): The hidden states from the encoder.
Returns:
Tensor: The output tensor after passing through the DownBlock3D layer.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
audio_embedding=None,
motion_scale=None,
):
"""
Forward pass for the CrossAttnUpBlock3D class.
Args:
self (CrossAttnUpBlock3D): An instance of the CrossAttnUpBlock3D class.
hidden_states (Tensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[Tensor]): A tuple of residual hidden states tensors.
temb (Tensor, optional): The token embeddings tensor. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
upsample_size (int, optional): The upsample size. Defaults to None.
attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.
full_mask (Tensor, optional): The full mask tensor. Defaults to None.
face_mask (Tensor, optional): The face mask tensor. Defaults to None.
lip_mask (Tensor, optional): The lip mask tensor. Defaults to None.
audio_embedding (Tensor, optional): The audio embedding tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the CrossAttnUpBlock3D.
"""
for _, (resnet, attn, audio_module, motion_module) in enumerate(
zip(self.resnets, self.attentions, self.audio_modules, self.motion_modules)
):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
motion_frames = []
hidden_states, motion_frame = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)
if len(motion_frame[0]) > 0:
motion_frames = motion_frame[0][0]
# motion_frames = torch.cat(motion_frames, dim=0)
motion_frames = rearrange(
motion_frames,
"b f (d1 d2) c -> b c f d1 d2",
d1=hidden_states.size(-1),
)
else:
motion_frames = torch.zeros(
hidden_states.shape[0],
hidden_states.shape[1],
4,
hidden_states.shape[3],
hidden_states.shape[4],
)
n_motion_frames = motion_frames.size(2)
if audio_module is not None:
# audio_embedding = audio_embedding
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(audio_module, return_dict=False),
hidden_states,
audio_embedding,
attention_mask,
full_mask,
face_mask,
lip_mask,
motion_scale,
)[0]
# add motion module
if motion_module is not None:
motion_frames = motion_frames.to(
device=hidden_states.device, dtype=hidden_states.dtype
)
_hidden_states = (
torch.cat([motion_frames, hidden_states], dim=2)
if n_motion_frames > 0
else hidden_states
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(motion_module),
_hidden_states,
encoder_hidden_states,
)
hidden_states = hidden_states[:, :, n_motion_frames:]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
).sample
if audio_module is not None:
hidden_states = (
audio_module(
hidden_states,
encoder_hidden_states=audio_embedding,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
)
).sample
# add motion module
hidden_states = (
motion_module(
hidden_states, encoder_hidden_states=encoder_hidden_states
)
if motion_module is not None
else hidden_states
)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
|
Forward pass for the CrossAttnUpBlock3D class.
Args:
self (CrossAttnUpBlock3D): An instance of the CrossAttnUpBlock3D class.
hidden_states (Tensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[Tensor]): A tuple of residual hidden states tensors.
temb (Tensor, optional): The token embeddings tensor. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
upsample_size (int, optional): The upsample size. Defaults to None.
attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.
full_mask (Tensor, optional): The full mask tensor. Defaults to None.
face_mask (Tensor, optional): The face mask tensor. Defaults to None.
lip_mask (Tensor, optional): The lip mask tensor. Defaults to None.
audio_embedding (Tensor, optional): The audio embedding tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the CrossAttnUpBlock3D.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
upsample_size=None,
encoder_hidden_states=None,
):
"""
Forward pass for the UpBlock3D class.
Args:
self (UpBlock3D): An instance of the UpBlock3D class.
hidden_states (Tensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[Tensor]): A tuple of residual hidden states tensors.
temb (Tensor, optional): The token embeddings tensor. Defaults to None.
upsample_size (int, optional): The upsample size. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the UpBlock3D layers.
"""
for resnet, motion_module in zip(self.resnets, self.motion_modules):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
# print(f"UpBlock3D {self.gradient_checkpointing = }")
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = (
motion_module(
hidden_states, encoder_hidden_states=encoder_hidden_states
)
if motion_module is not None
else hidden_states
)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
|
Forward pass for the UpBlock3D class.
Args:
self (UpBlock3D): An instance of the UpBlock3D class.
hidden_states (Tensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[Tensor]): A tuple of residual hidden states tensors.
temb (Tensor, optional): The token embeddings tensor. Defaults to None.
upsample_size (int, optional): The upsample size. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the UpBlock3D layers.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d_blocks.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
input_values,
seq_len,
attention_mask=None,
mask_time_indices=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Forward pass of the Wav2Vec model.
Args:
self: The instance of the model.
input_values: The input values (waveform) to the model.
seq_len: The sequence length of the input values.
attention_mask: Attention mask to be used for the model.
mask_time_indices: Mask indices to be used for the model.
output_attentions: If set to True, returns attentions.
output_hidden_states: If set to True, returns hidden states.
return_dict: If set to True, returns a BaseModelOutput instead of a tuple.
Returns:
The output of the Wav2Vec model.
"""
self.config.output_attentions = True
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
extract_features = linear_interpolation(extract_features, seq_len=seq_len)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1], attention_mask, add_adapter=False
)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if self.adapter is not None:
hidden_states = self.adapter(hidden_states)
if not return_dict:
return (hidden_states, ) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
Forward pass of the Wav2Vec model.
Args:
self: The instance of the model.
input_values: The input values (waveform) to the model.
seq_len: The sequence length of the input values.
attention_mask: Attention mask to be used for the model.
mask_time_indices: Mask indices to be used for the model.
output_attentions: If set to True, returns attentions.
output_hidden_states: If set to True, returns hidden states.
return_dict: If set to True, returns a BaseModelOutput instead of a tuple.
Returns:
The output of the Wav2Vec model.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/wav2vec.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/wav2vec.py
|
MIT
|
def feature_extract(
self,
input_values,
seq_len,
):
"""
Extracts features from the input values and returns the extracted features.
Parameters:
input_values (torch.Tensor): The input values to be processed.
seq_len (torch.Tensor): The sequence lengths of the input values.
Returns:
extracted_features (torch.Tensor): The extracted features from the input values.
"""
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
extract_features = linear_interpolation(extract_features, seq_len=seq_len)
return extract_features
|
Extracts features from the input values and returns the extracted features.
Parameters:
input_values (torch.Tensor): The input values to be processed.
seq_len (torch.Tensor): The sequence lengths of the input values.
Returns:
extracted_features (torch.Tensor): The extracted features from the input values.
|
feature_extract
|
python
|
fudan-generative-vision/hallo
|
hallo/models/wav2vec.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/wav2vec.py
|
MIT
|
def encode(
self,
extract_features,
attention_mask=None,
mask_time_indices=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Encodes the input features into the output space.
Args:
extract_features (torch.Tensor): The extracted features from the audio signal.
attention_mask (torch.Tensor, optional): Attention mask to be used for padding.
mask_time_indices (torch.Tensor, optional): Masked indices for the time dimension.
output_attentions (bool, optional): If set to True, returns the attention weights.
output_hidden_states (bool, optional): If set to True, returns all hidden states.
return_dict (bool, optional): If set to True, returns a BaseModelOutput instead of the tuple.
Returns:
The encoded output features.
"""
self.config.output_attentions = True
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1], attention_mask, add_adapter=False
)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if self.adapter is not None:
hidden_states = self.adapter(hidden_states)
if not return_dict:
return (hidden_states, ) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
Encodes the input features into the output space.
Args:
extract_features (torch.Tensor): The extracted features from the audio signal.
attention_mask (torch.Tensor, optional): Attention mask to be used for padding.
mask_time_indices (torch.Tensor, optional): Masked indices for the time dimension.
output_attentions (bool, optional): If set to True, returns the attention weights.
output_hidden_states (bool, optional): If set to True, returns all hidden states.
return_dict (bool, optional): If set to True, returns a BaseModelOutput instead of the tuple.
Returns:
The encoded output features.
|
encode
|
python
|
fudan-generative-vision/hallo
|
hallo/models/wav2vec.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/wav2vec.py
|
MIT
|
def linear_interpolation(features, seq_len):
"""
Transpose the features to interpolate linearly.
Args:
features (torch.Tensor): The extracted features to be interpolated.
seq_len (torch.Tensor): The sequence lengths of the features.
Returns:
torch.Tensor: The interpolated features.
"""
features = features.transpose(1, 2)
output_features = F.interpolate(features, size=seq_len, align_corners=True, mode='linear')
return output_features.transpose(1, 2)
|
Transpose the features to interpolate linearly.
Args:
features (torch.Tensor): The extracted features to be interpolated.
seq_len (torch.Tensor): The sequence lengths of the features.
Returns:
torch.Tensor: The interpolated features.
|
linear_interpolation
|
python
|
fudan-generative-vision/hallo
|
hallo/models/wav2vec.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/wav2vec.py
|
MIT
|
def filter_non_none(dict_obj: Dict):
"""
Filters out key-value pairs from the given dictionary where the value is None.
Args:
dict_obj (Dict): The dictionary to be filtered.
Returns:
Dict: The dictionary with key-value pairs removed where the value was None.
This function creates a new dictionary containing only the key-value pairs from
the original dictionary where the value is not None. It then clears the original
dictionary and updates it with the filtered key-value pairs.
"""
non_none_filter = { k: v for k, v in dict_obj.items() if v is not None }
dict_obj.clear()
dict_obj.update(non_none_filter)
return dict_obj
|
Filters out key-value pairs from the given dictionary where the value is None.
Args:
dict_obj (Dict): The dictionary to be filtered.
Returns:
Dict: The dictionary with key-value pairs removed where the value was None.
This function creates a new dictionary containing only the key-value pairs from
the original dictionary where the value is not None. It then clears the original
dictionary and updates it with the filtered key-value pairs.
|
filter_non_none
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/config.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/config.py
|
MIT
|
def seed_everything(seed):
"""
Seeds all random number generators to ensure reproducibility.
Args:
seed (int): The seed value to set for all random number generators.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed % (2**32))
random.seed(seed)
|
Seeds all random number generators to ensure reproducibility.
Args:
seed (int): The seed value to set for all random number generators.
|
seed_everything
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def import_filename(filename):
"""
Import a module from a given file location.
Args:
filename (str): The path to the file containing the module to be imported.
Returns:
module: The imported module.
Raises:
ImportError: If the module cannot be imported.
Example:
>>> imported_module = import_filename('path/to/your/module.py')
"""
spec = importlib.util.spec_from_file_location("mymodule", filename)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
return module
|
Import a module from a given file location.
Args:
filename (str): The path to the file containing the module to be imported.
Returns:
module: The imported module.
Raises:
ImportError: If the module cannot be imported.
Example:
>>> imported_module = import_filename('path/to/your/module.py')
|
import_filename
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def delete_additional_ckpt(base_path, num_keep):
"""
Deletes additional checkpoint files in the given directory.
Args:
base_path (str): The path to the directory containing the checkpoint files.
num_keep (int): The number of most recent checkpoint files to keep.
Returns:
None
Raises:
FileNotFoundError: If the base_path does not exist.
Example:
>>> delete_additional_ckpt('path/to/checkpoints', 1)
# This will delete all but the most recent checkpoint file in 'path/to/checkpoints'.
"""
dirs = []
for d in os.listdir(base_path):
if d.startswith("checkpoint-"):
dirs.append(d)
num_tot = len(dirs)
if num_tot <= num_keep:
return
# ensure ckpt is sorted and delete the ealier!
del_dirs = sorted(dirs, key=lambda x: int(
x.split("-")[-1]))[: num_tot - num_keep]
for d in del_dirs:
path_to_dir = osp.join(base_path, d)
if osp.exists(path_to_dir):
shutil.rmtree(path_to_dir)
|
Deletes additional checkpoint files in the given directory.
Args:
base_path (str): The path to the directory containing the checkpoint files.
num_keep (int): The number of most recent checkpoint files to keep.
Returns:
None
Raises:
FileNotFoundError: If the base_path does not exist.
Example:
>>> delete_additional_ckpt('path/to/checkpoints', 1)
# This will delete all but the most recent checkpoint file in 'path/to/checkpoints'.
|
delete_additional_ckpt
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def save_videos_from_pil(pil_images, path, fps=8):
"""
Save a sequence of images as a video using the Pillow library.
Args:
pil_images (List[PIL.Image]): A list of PIL.Image objects representing the frames of the video.
path (str): The output file path for the video.
fps (int, optional): The frames per second rate of the video. Defaults to 8.
Returns:
None
Raises:
ValueError: If the save format is not supported.
This function takes a list of PIL.Image objects and saves them as a video file with a specified frame rate.
The output file format is determined by the file extension of the provided path. Supported formats include
.mp4, .avi, and .mkv. The function uses the Pillow library to handle the image processing and video
creation.
"""
save_fmt = Path(path).suffix
os.makedirs(os.path.dirname(path), exist_ok=True)
width, height = pil_images[0].size
if save_fmt == ".mp4":
codec = "libx264"
container = av.open(path, "w")
stream = container.add_stream(codec, rate=fps)
stream.width = width
stream.height = height
for pil_image in pil_images:
# pil_image = Image.fromarray(image_arr).convert("RGB")
av_frame = av.VideoFrame.from_image(pil_image)
container.mux(stream.encode(av_frame))
container.mux(stream.encode())
container.close()
elif save_fmt == ".gif":
pil_images[0].save(
fp=path,
format="GIF",
append_images=pil_images[1:],
save_all=True,
duration=(1 / fps * 1000),
loop=0,
)
else:
raise ValueError("Unsupported file type. Use .mp4 or .gif.")
|
Save a sequence of images as a video using the Pillow library.
Args:
pil_images (List[PIL.Image]): A list of PIL.Image objects representing the frames of the video.
path (str): The output file path for the video.
fps (int, optional): The frames per second rate of the video. Defaults to 8.
Returns:
None
Raises:
ValueError: If the save format is not supported.
This function takes a list of PIL.Image objects and saves them as a video file with a specified frame rate.
The output file format is determined by the file extension of the provided path. Supported formats include
.mp4, .avi, and .mkv. The function uses the Pillow library to handle the image processing and video
creation.
|
save_videos_from_pil
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):
"""
Save a grid of videos as an animation or video.
Args:
videos (torch.Tensor): A tensor of shape (batch_size, channels, time, height, width)
containing the videos to save.
path (str): The path to save the video grid. Supported formats are .mp4, .avi, and .gif.
rescale (bool, optional): If True, rescale the video to the original resolution.
Defaults to False.
n_rows (int, optional): The number of rows in the video grid. Defaults to 6.
fps (int, optional): The frame rate of the saved video. Defaults to 8.
Raises:
ValueError: If the video format is not supported.
Returns:
None
"""
videos = rearrange(videos, "b c t h w -> t b c h w")
# height, width = videos.shape[-2:]
outputs = []
for x in videos:
x = torchvision.utils.make_grid(x, nrow=n_rows) # (c h w)
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) # (h w c)
if rescale:
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
x = (x * 255).numpy().astype(np.uint8)
x = Image.fromarray(x)
outputs.append(x)
os.makedirs(os.path.dirname(path), exist_ok=True)
save_videos_from_pil(outputs, path, fps)
|
Save a grid of videos as an animation or video.
Args:
videos (torch.Tensor): A tensor of shape (batch_size, channels, time, height, width)
containing the videos to save.
path (str): The path to save the video grid. Supported formats are .mp4, .avi, and .gif.
rescale (bool, optional): If True, rescale the video to the original resolution.
Defaults to False.
n_rows (int, optional): The number of rows in the video grid. Defaults to 6.
fps (int, optional): The frame rate of the saved video. Defaults to 8.
Raises:
ValueError: If the video format is not supported.
Returns:
None
|
save_videos_grid
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def read_frames(video_path):
"""
Reads video frames from a given video file.
Args:
video_path (str): The path to the video file.
Returns:
container (av.container.InputContainer): The input container object
containing the video stream.
Raises:
FileNotFoundError: If the video file is not found.
RuntimeError: If there is an error in reading the video stream.
The function reads the video frames from the specified video file using the
Python AV library (av). It returns an input container object that contains
the video stream. If the video file is not found, it raises a FileNotFoundError,
and if there is an error in reading the video stream, it raises a RuntimeError.
"""
container = av.open(video_path)
video_stream = next(s for s in container.streams if s.type == "video")
frames = []
for packet in container.demux(video_stream):
for frame in packet.decode():
image = Image.frombytes(
"RGB",
(frame.width, frame.height),
frame.to_rgb().to_ndarray(),
)
frames.append(image)
return frames
|
Reads video frames from a given video file.
Args:
video_path (str): The path to the video file.
Returns:
container (av.container.InputContainer): The input container object
containing the video stream.
Raises:
FileNotFoundError: If the video file is not found.
RuntimeError: If there is an error in reading the video stream.
The function reads the video frames from the specified video file using the
Python AV library (av). It returns an input container object that contains
the video stream. If the video file is not found, it raises a FileNotFoundError,
and if there is an error in reading the video stream, it raises a RuntimeError.
|
read_frames
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_fps(video_path):
"""
Get the frame rate (FPS) of a video file.
Args:
video_path (str): The path to the video file.
Returns:
int: The frame rate (FPS) of the video file.
"""
container = av.open(video_path)
video_stream = next(s for s in container.streams if s.type == "video")
fps = video_stream.average_rate
container.close()
return fps
|
Get the frame rate (FPS) of a video file.
Args:
video_path (str): The path to the video file.
Returns:
int: The frame rate (FPS) of the video file.
|
get_fps
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def tensor_to_video(tensor, output_video_file, audio_source, fps=25):
"""
Converts a Tensor with shape [c, f, h, w] into a video and adds an audio track from the specified audio file.
Args:
tensor (Tensor): The Tensor to be converted, shaped [c, f, h, w].
output_video_file (str): The file path where the output video will be saved.
audio_source (str): The path to the audio file (WAV file) that contains the audio track to be added.
fps (int): The frame rate of the output video. Default is 25 fps.
"""
tensor = tensor.permute(1, 2, 3, 0).cpu(
).numpy() # convert to [f, h, w, c]
tensor = np.clip(tensor * 255, 0, 255).astype(
np.uint8
) # to [0, 255]
def make_frame(t):
# get index
frame_index = min(int(t * fps), tensor.shape[0] - 1)
return tensor[frame_index]
new_video_clip = VideoClip(make_frame, duration=tensor.shape[0] / fps)
audio_clip = AudioFileClip(audio_source).subclip(0, tensor.shape[0] / fps)
new_video_clip = new_video_clip.set_audio(audio_clip)
new_video_clip.write_videofile(output_video_file, fps=fps, audio_codec='aac')
|
Converts a Tensor with shape [c, f, h, w] into a video and adds an audio track from the specified audio file.
Args:
tensor (Tensor): The Tensor to be converted, shaped [c, f, h, w].
output_video_file (str): The file path where the output video will be saved.
audio_source (str): The path to the audio file (WAV file) that contains the audio track to be added.
fps (int): The frame rate of the output video. Default is 25 fps.
|
tensor_to_video
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def compute_face_landmarks(detection_result, h, w):
"""
Compute face landmarks from a detection result.
Args:
detection_result (mediapipe.solutions.face_mesh.FaceMesh): The detection result containing face landmarks.
h (int): The height of the video frame.
w (int): The width of the video frame.
Returns:
face_landmarks_list (list): A list of face landmarks.
"""
face_landmarks_list = detection_result.face_landmarks
if len(face_landmarks_list) != 1:
print("#face is invalid:", len(face_landmarks_list))
return []
return [[p.x * w, p.y * h] for p in face_landmarks_list[0]]
|
Compute face landmarks from a detection result.
Args:
detection_result (mediapipe.solutions.face_mesh.FaceMesh): The detection result containing face landmarks.
h (int): The height of the video frame.
w (int): The width of the video frame.
Returns:
face_landmarks_list (list): A list of face landmarks.
|
compute_face_landmarks
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_landmark(file):
"""
This function takes a file as input and returns the facial landmarks detected in the file.
Args:
file (str): The path to the file containing the video or image to be processed.
Returns:
Tuple[List[float], List[float]]: A tuple containing two lists of floats representing the x and y coordinates of the facial landmarks.
"""
model_path = "pretrained_models/face_analysis/models/face_landmarker_v2_with_blendshapes.task"
BaseOptions = mp.tasks.BaseOptions
FaceLandmarker = mp.tasks.vision.FaceLandmarker
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
VisionRunningMode = mp.tasks.vision.RunningMode
# Create a face landmarker instance with the video mode:
options = FaceLandmarkerOptions(
base_options=BaseOptions(model_asset_path=model_path),
running_mode=VisionRunningMode.IMAGE,
)
with FaceLandmarker.create_from_options(options) as landmarker:
image = mp.Image.create_from_file(str(file))
height, width = image.height, image.width
face_landmarker_result = landmarker.detect(image)
face_landmark = compute_face_landmarks(
face_landmarker_result, height, width)
return np.array(face_landmark), height, width
|
This function takes a file as input and returns the facial landmarks detected in the file.
Args:
file (str): The path to the file containing the video or image to be processed.
Returns:
Tuple[List[float], List[float]]: A tuple containing two lists of floats representing the x and y coordinates of the facial landmarks.
|
get_landmark
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_landmark_overframes(landmark_model, frames_path):
"""
This function iterate frames and returns the facial landmarks detected in each frame.
Args:
landmark_model: mediapipe landmark model instance
frames_path (str): The path to the video frames.
Returns:
List[List[float], float, float]: A List containing two lists of floats representing the x and y coordinates of the facial landmarks.
"""
face_landmarks = []
for file in sorted(os.listdir(frames_path)):
image = mp.Image.create_from_file(os.path.join(frames_path, file))
height, width = image.height, image.width
landmarker_result = landmark_model.detect(image)
frame_landmark = compute_face_landmarks(
landmarker_result, height, width)
face_landmarks.append(frame_landmark)
return face_landmarks, height, width
|
This function iterate frames and returns the facial landmarks detected in each frame.
Args:
landmark_model: mediapipe landmark model instance
frames_path (str): The path to the video frames.
Returns:
List[List[float], float, float]: A List containing two lists of floats representing the x and y coordinates of the facial landmarks.
|
get_landmark_overframes
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_lip_mask(landmarks, height, width, out_path=None, expand_ratio=2.0):
"""
Extracts the lip region from the given landmarks and saves it as an image.
Parameters:
landmarks (numpy.ndarray): Array of facial landmarks.
height (int): Height of the output lip mask image.
width (int): Width of the output lip mask image.
out_path (pathlib.Path): Path to save the lip mask image.
expand_ratio (float): Expand ratio of mask.
"""
lip_landmarks = np.take(landmarks, lip_ids, 0)
min_xy_lip = np.round(np.min(lip_landmarks, 0))
max_xy_lip = np.round(np.max(lip_landmarks, 0))
min_xy_lip[0], max_xy_lip[0], min_xy_lip[1], max_xy_lip[1] = expand_region(
[min_xy_lip[0], max_xy_lip[0], min_xy_lip[1], max_xy_lip[1]], width, height, expand_ratio)
lip_mask = np.zeros((height, width), dtype=np.uint8)
lip_mask[round(min_xy_lip[1]):round(max_xy_lip[1]),
round(min_xy_lip[0]):round(max_xy_lip[0])] = 255
if out_path:
cv2.imwrite(str(out_path), lip_mask)
return None
return lip_mask
|
Extracts the lip region from the given landmarks and saves it as an image.
Parameters:
landmarks (numpy.ndarray): Array of facial landmarks.
height (int): Height of the output lip mask image.
width (int): Width of the output lip mask image.
out_path (pathlib.Path): Path to save the lip mask image.
expand_ratio (float): Expand ratio of mask.
|
get_lip_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_union_lip_mask(landmarks, height, width, expand_ratio=1):
"""
Extracts the lip region from the given landmarks and saves it as an image.
Parameters:
landmarks (numpy.ndarray): Array of facial landmarks.
height (int): Height of the output lip mask image.
width (int): Width of the output lip mask image.
expand_ratio (float): Expand ratio of mask.
"""
lip_masks = []
for landmark in landmarks:
lip_masks.append(get_lip_mask(landmarks=landmark, height=height,
width=width, expand_ratio=expand_ratio))
union_mask = get_union_mask(lip_masks)
return union_mask
|
Extracts the lip region from the given landmarks and saves it as an image.
Parameters:
landmarks (numpy.ndarray): Array of facial landmarks.
height (int): Height of the output lip mask image.
width (int): Width of the output lip mask image.
expand_ratio (float): Expand ratio of mask.
|
get_union_lip_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_face_mask(landmarks, height, width, out_path=None, expand_ratio=1.2):
"""
Generate a face mask based on the given landmarks.
Args:
landmarks (numpy.ndarray): The landmarks of the face.
height (int): The height of the output face mask image.
width (int): The width of the output face mask image.
out_path (pathlib.Path): The path to save the face mask image.
expand_ratio (float): Expand ratio of mask.
Returns:
None. The face mask image is saved at the specified path.
"""
face_landmarks = np.take(landmarks, silhouette_ids, 0)
min_xy_face = np.round(np.min(face_landmarks, 0))
max_xy_face = np.round(np.max(face_landmarks, 0))
min_xy_face[0], max_xy_face[0], min_xy_face[1], max_xy_face[1] = expand_region(
[min_xy_face[0], max_xy_face[0], min_xy_face[1], max_xy_face[1]], width, height, expand_ratio)
face_mask = np.zeros((height, width), dtype=np.uint8)
face_mask[round(min_xy_face[1]):round(max_xy_face[1]),
round(min_xy_face[0]):round(max_xy_face[0])] = 255
if out_path:
cv2.imwrite(str(out_path), face_mask)
return None
return face_mask
|
Generate a face mask based on the given landmarks.
Args:
landmarks (numpy.ndarray): The landmarks of the face.
height (int): The height of the output face mask image.
width (int): The width of the output face mask image.
out_path (pathlib.Path): The path to save the face mask image.
expand_ratio (float): Expand ratio of mask.
Returns:
None. The face mask image is saved at the specified path.
|
get_face_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_union_face_mask(landmarks, height, width, expand_ratio=1):
"""
Generate a face mask based on the given landmarks.
Args:
landmarks (numpy.ndarray): The landmarks of the face.
height (int): The height of the output face mask image.
width (int): The width of the output face mask image.
expand_ratio (float): Expand ratio of mask.
Returns:
None. The face mask image is saved at the specified path.
"""
face_masks = []
for landmark in landmarks:
face_masks.append(get_face_mask(landmarks=landmark,height=height,width=width,expand_ratio=expand_ratio))
union_mask = get_union_mask(face_masks)
return union_mask
|
Generate a face mask based on the given landmarks.
Args:
landmarks (numpy.ndarray): The landmarks of the face.
height (int): The height of the output face mask image.
width (int): The width of the output face mask image.
expand_ratio (float): Expand ratio of mask.
Returns:
None. The face mask image is saved at the specified path.
|
get_union_face_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_mask(file, cache_dir, face_expand_raio):
"""
Generate a face mask based on the given landmarks and save it to the specified cache directory.
Args:
file (str): The path to the file containing the landmarks.
cache_dir (str): The directory to save the generated face mask.
Returns:
None
"""
landmarks, height, width = get_landmark(file)
file_name = os.path.basename(file).split(".")[0]
get_lip_mask(landmarks, height, width, os.path.join(
cache_dir, f"{file_name}_lip_mask.png"))
get_face_mask(landmarks, height, width, os.path.join(
cache_dir, f"{file_name}_face_mask.png"), face_expand_raio)
get_blur_mask(os.path.join(
cache_dir, f"{file_name}_face_mask.png"), os.path.join(
cache_dir, f"{file_name}_face_mask_blur.png"), kernel_size=(51, 51))
get_blur_mask(os.path.join(
cache_dir, f"{file_name}_lip_mask.png"), os.path.join(
cache_dir, f"{file_name}_sep_lip.png"), kernel_size=(31, 31))
get_background_mask(os.path.join(
cache_dir, f"{file_name}_face_mask_blur.png"), os.path.join(
cache_dir, f"{file_name}_sep_background.png"))
get_sep_face_mask(os.path.join(
cache_dir, f"{file_name}_face_mask_blur.png"), os.path.join(
cache_dir, f"{file_name}_sep_lip.png"), os.path.join(
cache_dir, f"{file_name}_sep_face.png"))
|
Generate a face mask based on the given landmarks and save it to the specified cache directory.
Args:
file (str): The path to the file containing the landmarks.
cache_dir (str): The directory to save the generated face mask.
Returns:
None
|
get_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def expand_region(region, image_w, image_h, expand_ratio=1.0):
"""
Expand the given region by a specified ratio.
Args:
region (tuple): A tuple containing the coordinates (min_x, max_x, min_y, max_y) of the region.
image_w (int): The width of the image.
image_h (int): The height of the image.
expand_ratio (float, optional): The ratio by which the region should be expanded. Defaults to 1.0.
Returns:
tuple: A tuple containing the expanded coordinates (min_x, max_x, min_y, max_y) of the region.
"""
min_x, max_x, min_y, max_y = region
mid_x = (max_x + min_x) // 2
side_len_x = (max_x - min_x) * expand_ratio
mid_y = (max_y + min_y) // 2
side_len_y = (max_y - min_y) * expand_ratio
min_x = mid_x - side_len_x // 2
max_x = mid_x + side_len_x // 2
min_y = mid_y - side_len_y // 2
max_y = mid_y + side_len_y // 2
if min_x < 0:
max_x -= min_x
min_x = 0
if max_x > image_w:
min_x -= max_x - image_w
max_x = image_w
if min_y < 0:
max_y -= min_y
min_y = 0
if max_y > image_h:
min_y -= max_y - image_h
max_y = image_h
return round(min_x), round(max_x), round(min_y), round(max_y)
|
Expand the given region by a specified ratio.
Args:
region (tuple): A tuple containing the coordinates (min_x, max_x, min_y, max_y) of the region.
image_w (int): The width of the image.
image_h (int): The height of the image.
expand_ratio (float, optional): The ratio by which the region should be expanded. Defaults to 1.0.
Returns:
tuple: A tuple containing the expanded coordinates (min_x, max_x, min_y, max_y) of the region.
|
expand_region
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_blur_mask(file_path, output_file_path, resize_dim=(64, 64), kernel_size=(101, 101)):
"""
Read, resize, blur, normalize, and save an image.
Parameters:
file_path (str): Path to the input image file.
output_dir (str): Path to the output directory to save blurred images.
resize_dim (tuple): Dimensions to resize the images to.
kernel_size (tuple): Size of the kernel to use for Gaussian blur.
"""
# Read the mask image
mask = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
# Check if the image is loaded successfully
if mask is not None:
normalized_mask = blur_mask(mask,resize_dim=resize_dim,kernel_size=kernel_size)
# Save the normalized mask image
cv2.imwrite(output_file_path, normalized_mask)
return f"Processed, normalized, and saved: {output_file_path}"
return f"Failed to load image: {file_path}"
|
Read, resize, blur, normalize, and save an image.
Parameters:
file_path (str): Path to the input image file.
output_dir (str): Path to the output directory to save blurred images.
resize_dim (tuple): Dimensions to resize the images to.
kernel_size (tuple): Size of the kernel to use for Gaussian blur.
|
get_blur_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def blur_mask(mask, resize_dim=(64, 64), kernel_size=(51, 51)):
"""
Read, resize, blur, normalize, and save an image.
Parameters:
file_path (str): Path to the input image file.
resize_dim (tuple): Dimensions to resize the images to.
kernel_size (tuple): Size of the kernel to use for Gaussian blur.
"""
# Check if the image is loaded successfully
normalized_mask = None
if mask is not None:
# Resize the mask image
resized_mask = cv2.resize(mask, resize_dim)
# Apply Gaussian blur to the resized mask image
blurred_mask = cv2.GaussianBlur(resized_mask, kernel_size, 0)
# Normalize the blurred image
normalized_mask = cv2.normalize(
blurred_mask, None, 0, 255, cv2.NORM_MINMAX)
# Save the normalized mask image
return normalized_mask
|
Read, resize, blur, normalize, and save an image.
Parameters:
file_path (str): Path to the input image file.
resize_dim (tuple): Dimensions to resize the images to.
kernel_size (tuple): Size of the kernel to use for Gaussian blur.
|
blur_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_background_mask(file_path, output_file_path):
"""
Read an image, invert its values, and save the result.
Parameters:
file_path (str): Path to the input image file.
output_dir (str): Path to the output directory to save the inverted image.
"""
# Read the image
image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
if image is None:
print(f"Failed to load image: {file_path}")
return
# Invert the image
inverted_image = 1.0 - (
image / 255.0
) # Assuming the image values are in [0, 255] range
# Convert back to uint8
inverted_image = (inverted_image * 255).astype(np.uint8)
# Save the inverted image
cv2.imwrite(output_file_path, inverted_image)
print(f"Processed and saved: {output_file_path}")
|
Read an image, invert its values, and save the result.
Parameters:
file_path (str): Path to the input image file.
output_dir (str): Path to the output directory to save the inverted image.
|
get_background_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_sep_face_mask(file_path1, file_path2, output_file_path):
"""
Read two images, subtract the second one from the first, and save the result.
Parameters:
output_dir (str): Path to the output directory to save the subtracted image.
"""
# Read the images
mask1 = cv2.imread(file_path1, cv2.IMREAD_GRAYSCALE)
mask2 = cv2.imread(file_path2, cv2.IMREAD_GRAYSCALE)
if mask1 is None or mask2 is None:
print(f"Failed to load images: {file_path1}")
return
# Ensure the images are the same size
if mask1.shape != mask2.shape:
print(
f"Image shapes do not match for {file_path1}: {mask1.shape} vs {mask2.shape}"
)
return
# Subtract the second mask from the first
result_mask = cv2.subtract(mask1, mask2)
# Save the result mask image
cv2.imwrite(output_file_path, result_mask)
print(f"Processed and saved: {output_file_path}")
|
Read two images, subtract the second one from the first, and save the result.
Parameters:
output_dir (str): Path to the output directory to save the subtracted image.
|
get_sep_face_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def load_checkpoint(cfg, save_dir, accelerator):
"""
Load the most recent checkpoint from the specified directory.
This function loads the latest checkpoint from the `save_dir` if the `resume_from_checkpoint` parameter is set to "latest".
If a specific checkpoint is provided in `resume_from_checkpoint`, it loads that checkpoint. If no checkpoint is found,
it starts training from scratch.
Args:
cfg: The configuration object containing training parameters.
save_dir (str): The directory where checkpoints are saved.
accelerator: The accelerator object for distributed training.
Returns:
int: The global step at which to resume training.
"""
if cfg.resume_from_checkpoint != "latest":
resume_dir = cfg.resume_from_checkpoint
else:
resume_dir = save_dir
# Get the most recent checkpoint
dirs = os.listdir(resume_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
if len(dirs) > 0:
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1]
accelerator.load_state(os.path.join(resume_dir, path))
accelerator.print(f"Resuming from checkpoint {path}")
global_step = int(path.split("-")[1])
else:
accelerator.print(
f"Could not find checkpoint under {resume_dir}, start training from scratch")
global_step = 0
return global_step
|
Load the most recent checkpoint from the specified directory.
This function loads the latest checkpoint from the `save_dir` if the `resume_from_checkpoint` parameter is set to "latest".
If a specific checkpoint is provided in `resume_from_checkpoint`, it loads that checkpoint. If no checkpoint is found,
it starts training from scratch.
Args:
cfg: The configuration object containing training parameters.
save_dir (str): The directory where checkpoints are saved.
accelerator: The accelerator object for distributed training.
Returns:
int: The global step at which to resume training.
|
load_checkpoint
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def compute_snr(noise_scheduler, timesteps):
"""
Computes SNR as per
https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/
521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849
"""
alphas_cumprod = noise_scheduler.alphas_cumprod
sqrt_alphas_cumprod = alphas_cumprod**0.5
sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5
# Expand the tensors.
# Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/
# 521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026
sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[
timesteps
].float()
while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape):
sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None]
alpha = sqrt_alphas_cumprod.expand(timesteps.shape)
sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(
device=timesteps.device
)[timesteps].float()
while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape):
sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None]
sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape)
# Compute SNR.
snr = (alpha / sigma) ** 2
return snr
|
Computes SNR as per
https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/
521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849
|
compute_snr
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def extract_audio_from_videos(video_path: Path, audio_output_path: Path) -> Path:
"""
Extract audio from a video file and save it as a WAV file.
This function uses ffmpeg to extract the audio stream from a given video file and saves it as a WAV file
in the specified output directory.
Args:
video_path (Path): The path to the input video file.
output_dir (Path): The directory where the extracted audio file will be saved.
Returns:
Path: The path to the extracted audio file.
Raises:
subprocess.CalledProcessError: If the ffmpeg command fails to execute.
"""
ffmpeg_command = [
'ffmpeg', '-y',
'-i', str(video_path),
'-vn', '-acodec',
"pcm_s16le", '-ar', '16000', '-ac', '2',
str(audio_output_path)
]
try:
print(f"Running command: {' '.join(ffmpeg_command)}")
subprocess.run(ffmpeg_command, check=True)
except subprocess.CalledProcessError as e:
print(f"Error extracting audio from video: {e}")
raise
return audio_output_path
|
Extract audio from a video file and save it as a WAV file.
This function uses ffmpeg to extract the audio stream from a given video file and saves it as a WAV file
in the specified output directory.
Args:
video_path (Path): The path to the input video file.
output_dir (Path): The directory where the extracted audio file will be saved.
Returns:
Path: The path to the extracted audio file.
Raises:
subprocess.CalledProcessError: If the ffmpeg command fails to execute.
|
extract_audio_from_videos
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def convert_video_to_images(video_path: Path, output_dir: Path) -> Path:
"""
Convert a video file into a sequence of images.
This function uses ffmpeg to convert each frame of the given video file into an image. The images are saved
in a directory named after the video file stem under the specified output directory.
Args:
video_path (Path): The path to the input video file.
output_dir (Path): The directory where the extracted images will be saved.
Returns:
Path: The path to the directory containing the extracted images.
Raises:
subprocess.CalledProcessError: If the ffmpeg command fails to execute.
"""
ffmpeg_command = [
'ffmpeg',
'-i', str(video_path),
'-vf', 'fps=25',
str(output_dir / '%04d.png')
]
try:
print(f"Running command: {' '.join(ffmpeg_command)}")
subprocess.run(ffmpeg_command, check=True)
except subprocess.CalledProcessError as e:
print(f"Error converting video to images: {e}")
raise
return output_dir
|
Convert a video file into a sequence of images.
This function uses ffmpeg to convert each frame of the given video file into an image. The images are saved
in a directory named after the video file stem under the specified output directory.
Args:
video_path (Path): The path to the input video file.
output_dir (Path): The directory where the extracted images will be saved.
Returns:
Path: The path to the directory containing the extracted images.
Raises:
subprocess.CalledProcessError: If the ffmpeg command fails to execute.
|
convert_video_to_images
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def get_union_mask(masks):
"""
Compute the union of a list of masks.
This function takes a list of masks and computes their union by taking the maximum value at each pixel location.
Additionally, it finds the bounding box of the non-zero regions in the mask and sets the bounding box area to white.
Args:
masks (list of np.ndarray): List of masks to be combined.
Returns:
np.ndarray: The union of the input masks.
"""
union_mask = None
for mask in masks:
if union_mask is None:
union_mask = mask
else:
union_mask = np.maximum(union_mask, mask)
if union_mask is not None:
# Find the bounding box of the non-zero regions in the mask
rows = np.any(union_mask, axis=1)
cols = np.any(union_mask, axis=0)
try:
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
except Exception as e:
print(str(e))
return 0.0
# Set bounding box area to white
union_mask[ymin: ymax + 1, xmin: xmax + 1] = np.max(union_mask)
return union_mask
|
Compute the union of a list of masks.
This function takes a list of masks and computes their union by taking the maximum value at each pixel location.
Additionally, it finds the bounding box of the non-zero regions in the mask and sets the bounding box area to white.
Args:
masks (list of np.ndarray): List of masks to be combined.
Returns:
np.ndarray: The union of the input masks.
|
get_union_mask
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def move_final_checkpoint(save_dir, module_dir, prefix):
"""
Move the final checkpoint file to the save directory.
This function identifies the latest checkpoint file based on the given prefix and moves it to the specified save directory.
Args:
save_dir (str): The directory where the final checkpoint file should be saved.
module_dir (str): The directory containing the checkpoint files.
prefix (str): The prefix used to identify checkpoint files.
Raises:
ValueError: If no checkpoint files are found with the specified prefix.
"""
checkpoints = os.listdir(module_dir)
checkpoints = [d for d in checkpoints if d.startswith(prefix)]
checkpoints = sorted(
checkpoints, key=lambda x: int(x.split("-")[1].split(".")[0])
)
shutil.copy2(os.path.join(
module_dir, checkpoints[-1]), os.path.join(save_dir, prefix + '.pth'))
|
Move the final checkpoint file to the save directory.
This function identifies the latest checkpoint file based on the given prefix and moves it to the specified save directory.
Args:
save_dir (str): The directory where the final checkpoint file should be saved.
module_dir (str): The directory containing the checkpoint files.
prefix (str): The prefix used to identify checkpoint files.
Raises:
ValueError: If no checkpoint files are found with the specified prefix.
|
move_final_checkpoint
|
python
|
fudan-generative-vision/hallo
|
hallo/utils/util.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/utils/util.py
|
MIT
|
def predict(image, audio, pose_weight, face_weight, lip_weight, face_expand_ratio, progress=gr.Progress(track_tqdm=True)):
"""
Create a gradio interface with the configs.
"""
_ = progress
config = {
'source_image': image,
'driving_audio': audio,
'pose_weight': pose_weight,
'face_weight': face_weight,
'lip_weight': lip_weight,
'face_expand_ratio': face_expand_ratio,
'config': 'configs/inference/default.yaml',
'checkpoint': None,
'output': ".cache/output.mp4"
}
args = argparse.Namespace()
for key, value in config.items():
setattr(args, key, value)
return inference_process(args)
|
Create a gradio interface with the configs.
|
predict
|
python
|
fudan-generative-vision/hallo
|
scripts/app.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/app.py
|
MIT
|
def setup_directories(video_path: Path) -> dict:
"""
Setup directories for storing processed files.
Args:
video_path (Path): Path to the video file.
Returns:
dict: A dictionary containing paths for various directories.
"""
base_dir = video_path.parent.parent
dirs = {
"face_mask": base_dir / "face_mask",
"sep_pose_mask": base_dir / "sep_pose_mask",
"sep_face_mask": base_dir / "sep_face_mask",
"sep_lip_mask": base_dir / "sep_lip_mask",
"face_emb": base_dir / "face_emb",
"audio_emb": base_dir / "audio_emb"
}
for path in dirs.values():
path.mkdir(parents=True, exist_ok=True)
return dirs
|
Setup directories for storing processed files.
Args:
video_path (Path): Path to the video file.
Returns:
dict: A dictionary containing paths for various directories.
|
setup_directories
|
python
|
fudan-generative-vision/hallo
|
scripts/data_preprocess.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/data_preprocess.py
|
MIT
|
def process_single_video(video_path: Path,
output_dir: Path,
image_processor: ImageProcessorForDataProcessing,
audio_processor: AudioProcessor,
step: int) -> None:
"""
Process a single video file.
Args:
video_path (Path): Path to the video file.
output_dir (Path): Directory to save the output.
image_processor (ImageProcessorForDataProcessing): Image processor object.
audio_processor (AudioProcessor): Audio processor object.
gpu_status (bool): Whether to use GPU for processing.
"""
assert video_path.exists(), f"Video path {video_path} does not exist"
dirs = setup_directories(video_path)
logging.info(f"Processing video: {video_path}")
try:
if step == 1:
images_output_dir = output_dir / 'images' / video_path.stem
images_output_dir.mkdir(parents=True, exist_ok=True)
images_output_dir = convert_video_to_images(
video_path, images_output_dir)
logging.info(f"Images saved to: {images_output_dir}")
audio_output_dir = output_dir / 'audios'
audio_output_dir.mkdir(parents=True, exist_ok=True)
audio_output_path = audio_output_dir / f'{video_path.stem}.wav'
audio_output_path = extract_audio_from_videos(
video_path, audio_output_path)
logging.info(f"Audio extracted to: {audio_output_path}")
face_mask, _, sep_pose_mask, sep_face_mask, sep_lip_mask = image_processor.preprocess(
images_output_dir)
cv2.imwrite(
str(dirs["face_mask"] / f"{video_path.stem}.png"), face_mask)
cv2.imwrite(str(dirs["sep_pose_mask"] /
f"{video_path.stem}.png"), sep_pose_mask)
cv2.imwrite(str(dirs["sep_face_mask"] /
f"{video_path.stem}.png"), sep_face_mask)
cv2.imwrite(str(dirs["sep_lip_mask"] /
f"{video_path.stem}.png"), sep_lip_mask)
else:
images_dir = output_dir / "images" / video_path.stem
audio_path = output_dir / "audios" / f"{video_path.stem}.wav"
_, face_emb, _, _, _ = image_processor.preprocess(images_dir)
torch.save(face_emb, str(
dirs["face_emb"] / f"{video_path.stem}.pt"))
audio_emb, _ = audio_processor.preprocess(audio_path)
torch.save(audio_emb, str(
dirs["audio_emb"] / f"{video_path.stem}.pt"))
except Exception as e:
logging.error(f"Failed to process video {video_path}: {e}")
|
Process a single video file.
Args:
video_path (Path): Path to the video file.
output_dir (Path): Directory to save the output.
image_processor (ImageProcessorForDataProcessing): Image processor object.
audio_processor (AudioProcessor): Audio processor object.
gpu_status (bool): Whether to use GPU for processing.
|
process_single_video
|
python
|
fudan-generative-vision/hallo
|
scripts/data_preprocess.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/data_preprocess.py
|
MIT
|
def process_all_videos(input_video_list: List[Path], output_dir: Path, step: int) -> None:
"""
Process all videos in the input list.
Args:
input_video_list (List[Path]): List of video paths to process.
output_dir (Path): Directory to save the output.
gpu_status (bool): Whether to use GPU for processing.
"""
face_analysis_model_path = "pretrained_models/face_analysis"
landmark_model_path = "pretrained_models/face_analysis/models/face_landmarker_v2_with_blendshapes.task"
audio_separator_model_file = "pretrained_models/audio_separator/Kim_Vocal_2.onnx"
wav2vec_model_path = 'pretrained_models/wav2vec/wav2vec2-base-960h'
audio_processor = AudioProcessor(
16000,
25,
wav2vec_model_path,
False,
os.path.dirname(audio_separator_model_file),
os.path.basename(audio_separator_model_file),
os.path.join(output_dir, "vocals"),
) if step==2 else None
image_processor = ImageProcessorForDataProcessing(
face_analysis_model_path, landmark_model_path, step)
for video_path in tqdm(input_video_list, desc="Processing videos"):
process_single_video(video_path, output_dir,
image_processor, audio_processor, step)
|
Process all videos in the input list.
Args:
input_video_list (List[Path]): List of video paths to process.
output_dir (Path): Directory to save the output.
gpu_status (bool): Whether to use GPU for processing.
|
process_all_videos
|
python
|
fudan-generative-vision/hallo
|
scripts/data_preprocess.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/data_preprocess.py
|
MIT
|
def get_video_paths(source_dir: Path, parallelism: int, rank: int) -> List[Path]:
"""
Get paths of videos to process, partitioned for parallel processing.
Args:
source_dir (Path): Source directory containing videos.
parallelism (int): Level of parallelism.
rank (int): Rank for distributed processing.
Returns:
List[Path]: List of video paths to process.
"""
video_paths = [item for item in sorted(
source_dir.iterdir()) if item.is_file() and item.suffix == '.mp4']
return [video_paths[i] for i in range(len(video_paths)) if i % parallelism == rank]
|
Get paths of videos to process, partitioned for parallel processing.
Args:
source_dir (Path): Source directory containing videos.
parallelism (int): Level of parallelism.
rank (int): Rank for distributed processing.
Returns:
List[Path]: List of video paths to process.
|
get_video_paths
|
python
|
fudan-generative-vision/hallo
|
scripts/data_preprocess.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/data_preprocess.py
|
MIT
|
def construct_meta_info(frames_dir_path: Path) -> dict:
"""
Construct meta information for a given frames directory.
Args:
frames_dir_path (Path): The path to the frames directory.
Returns:
dict: A dictionary containing the meta information for the frames directory, or None if the required files do not exist.
"""
mask_path = str(frames_dir_path).replace("images", "face_mask") + ".png"
face_emb_path = str(frames_dir_path).replace("images", "face_emb") + ".pt"
if not os.path.exists(mask_path):
print(f"Mask path not found: {mask_path}")
return None
if torch.load(face_emb_path) is None:
print(f"Face emb is None: {face_emb_path}")
return None
return {
"image_path": str(frames_dir_path),
"mask_path": mask_path,
"face_emb": face_emb_path,
}
|
Construct meta information for a given frames directory.
Args:
frames_dir_path (Path): The path to the frames directory.
Returns:
dict: A dictionary containing the meta information for the frames directory, or None if the required files do not exist.
|
construct_meta_info
|
python
|
fudan-generative-vision/hallo
|
scripts/extract_meta_info_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/extract_meta_info_stage1.py
|
MIT
|
def main():
"""
Main function to extract meta info for training.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root_path", type=str,
required=True, help="Root path of the video directories")
parser.add_argument("-n", "--dataset_name", type=str,
required=True, help="Name of the dataset")
parser.add_argument("--meta_info_name", type=str,
help="Name of the meta information file")
args = parser.parse_args()
if args.meta_info_name is None:
args.meta_info_name = args.dataset_name
image_dir = Path(args.root_path) / "images"
output_dir = Path("./data")
output_dir.mkdir(exist_ok=True)
# Collect all video folder paths
frames_dir_paths = collect_video_folder_paths(image_dir)
meta_infos = []
for frames_dir_path in frames_dir_paths:
meta_info = construct_meta_info(frames_dir_path)
if meta_info:
meta_infos.append(meta_info)
output_file = output_dir / f"{args.meta_info_name}_stage1.json"
with output_file.open("w", encoding="utf-8") as f:
json.dump(meta_infos, f, indent=4)
print(f"Final data count: {len(meta_infos)}")
|
Main function to extract meta info for training.
|
main
|
python
|
fudan-generative-vision/hallo
|
scripts/extract_meta_info_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/extract_meta_info_stage1.py
|
MIT
|
def extract_meta_info(video_path: str) -> dict:
"""
Extract meta information for a given video file.
Args:
video_path (str): The path to the video file.
Returns:
dict: A dictionary containing the meta information for the video.
"""
mask_path = construct_paths(
video_path, "videos", "face_mask", ".png")
sep_mask_border = construct_paths(
video_path, "videos", "sep_pose_mask", ".png")
sep_mask_face = construct_paths(
video_path, "videos", "sep_face_mask", ".png")
sep_mask_lip = construct_paths(
video_path, "videos", "sep_lip_mask", ".png")
face_emb_path = construct_paths(
video_path, "videos", "face_emb", ".pt")
audio_path = construct_paths(video_path, "videos", "audios", ".wav")
vocal_emb_base_all = construct_paths(
video_path, "videos", "audio_emb", ".pt")
assert_flag = True
if not file_exists(mask_path):
print(f"Mask path not found: {mask_path}")
assert_flag = False
if not file_exists(sep_mask_border):
print(f"Separate mask border not found: {sep_mask_border}")
assert_flag = False
if not file_exists(sep_mask_face):
print(f"Separate mask face not found: {sep_mask_face}")
assert_flag = False
if not file_exists(sep_mask_lip):
print(f"Separate mask lip not found: {sep_mask_lip}")
assert_flag = False
if not file_exists(face_emb_path):
print(f"Face embedding path not found: {face_emb_path}")
assert_flag = False
if not file_exists(audio_path):
print(f"Audio path not found: {audio_path}")
assert_flag = False
if not file_exists(vocal_emb_base_all):
print(f"Vocal embedding base all not found: {vocal_emb_base_all}")
assert_flag = False
video_frames = VideoReader(video_path, ctx=cpu(0))
audio_emb = torch.load(vocal_emb_base_all)
if abs(len(video_frames) - audio_emb.shape[0]) > 3:
print(f"Frame count mismatch for video: {video_path}")
assert_flag = False
face_emb = torch.load(face_emb_path)
if face_emb is None:
print(f"Face embedding is None for video: {video_path}")
assert_flag = False
del video_frames, audio_emb
if assert_flag:
return {
"video_path": str(video_path),
"mask_path": mask_path,
"sep_mask_border": sep_mask_border,
"sep_mask_face": sep_mask_face,
"sep_mask_lip": sep_mask_lip,
"face_emb_path": face_emb_path,
"audio_path": audio_path,
"vocals_emb_base_all": vocal_emb_base_all,
}
return None
|
Extract meta information for a given video file.
Args:
video_path (str): The path to the video file.
Returns:
dict: A dictionary containing the meta information for the video.
|
extract_meta_info
|
python
|
fudan-generative-vision/hallo
|
scripts/extract_meta_info_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/extract_meta_info_stage2.py
|
MIT
|
def main():
"""
Main function to extract meta info for training.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root_path", type=str,
required=True, help="Root path of the video files")
parser.add_argument("-n", "--dataset_name", type=str,
required=True, help="Name of the dataset")
parser.add_argument("--meta_info_name", type=str,
help="Name of the meta information file")
args = parser.parse_args()
if args.meta_info_name is None:
args.meta_info_name = args.dataset_name
video_dir = Path(args.root_path) / "videos"
video_paths = get_video_paths(video_dir, [".mp4"])
meta_infos = []
for video_path in tqdm(video_paths, desc="Extracting meta info"):
meta_info = extract_meta_info(video_path)
if meta_info:
meta_infos.append(meta_info)
print(f"Final data count: {len(meta_infos)}")
output_file = Path(f"./data/{args.meta_info_name}_stage2.json")
output_file.parent.mkdir(parents=True, exist_ok=True)
with output_file.open("w", encoding="utf-8") as f:
json.dump(meta_infos, f, indent=4)
|
Main function to extract meta info for training.
|
main
|
python
|
fudan-generative-vision/hallo
|
scripts/extract_meta_info_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/extract_meta_info_stage2.py
|
MIT
|
def forward(self,):
"""
empty function to override abstract function of nn Module
"""
|
empty function to override abstract function of nn Module
|
forward
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def get_modules(self):
"""
Simple method to avoid too-few-public-methods pylint error
"""
return {
"reference_unet": self.reference_unet,
"denoising_unet": self.denoising_unet,
"face_locator": self.face_locator,
"imageproj": self.imageproj,
"audioproj": self.audioproj,
}
|
Simple method to avoid too-few-public-methods pylint error
|
get_modules
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def process_audio_emb(audio_emb):
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0]-1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
|
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
|
process_audio_emb
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def inference_process(args: argparse.Namespace):
"""
Perform inference processing.
Args:
args (argparse.Namespace): Command-line arguments.
This function initializes the configuration for the inference process. It sets up the necessary
modules and variables to prepare for the upcoming inference steps.
"""
# 1. init config
cli_args = filter_non_none(vars(args))
config = OmegaConf.load(args.config)
config = OmegaConf.merge(config, cli_args)
source_image_path = config.source_image
driving_audio_path = config.driving_audio
save_path = config.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
motion_scale = [config.pose_weight, config.face_weight, config.lip_weight]
# 2. runtime variables
device = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
if config.weight_dtype == "fp16":
weight_dtype = torch.float16
elif config.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif config.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
weight_dtype = torch.float32
# 3. prepare inference data
# 3.1 prepare source image, face mask, face embeddings
img_size = (config.data.source_image.width,
config.data.source_image.height)
clip_length = config.data.n_sample_frames
face_analysis_model_path = config.face_analysis.model_path
with ImageProcessor(img_size, face_analysis_model_path) as image_processor:
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
source_image_path, save_path, config.face_expand_ratio)
# 3.2 prepare audio embeddings
sample_rate = config.data.driving_audio.sample_rate
assert sample_rate == 16000, "audio sample rate must be 16000"
fps = config.data.export_video.fps
wav2vec_model_path = config.wav2vec.model_path
wav2vec_only_last_features = config.wav2vec.features == "last"
audio_separator_model_file = config.audio_separator.model_path
with AudioProcessor(
sample_rate,
fps,
wav2vec_model_path,
wav2vec_only_last_features,
os.path.dirname(audio_separator_model_file),
os.path.basename(audio_separator_model_file),
os.path.join(save_path, "audio_preprocess")
) as audio_processor:
audio_emb, audio_length = audio_processor.preprocess(driving_audio_path, clip_length)
# 4. build modules
sched_kwargs = OmegaConf.to_container(config.noise_scheduler_kwargs)
if config.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
vae = AutoencoderKL.from_pretrained(config.vae.model_path)
reference_unet = UNet2DConditionModel.from_pretrained(
config.base_model_path, subfolder="unet")
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
config.base_model_path,
config.motion_module_path,
subfolder="unet",
unet_additional_kwargs=OmegaConf.to_container(
config.unet_additional_kwargs),
use_landmark=False,
)
face_locator = FaceLocator(conditioning_embedding_channels=320)
image_proj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
)
audio_proj = AudioProjModel(
seq_len=5,
blocks=12, # use 12 layers' hidden states of wav2vec
channels=768, # audio embedding channel
intermediate_dim=512,
output_dim=768,
context_tokens=32,
).to(device=device, dtype=weight_dtype)
audio_ckpt_dir = config.audio_ckpt_dir
# Freeze
vae.requires_grad_(False)
image_proj.requires_grad_(False)
reference_unet.requires_grad_(False)
denoising_unet.requires_grad_(False)
face_locator.requires_grad_(False)
audio_proj.requires_grad_(False)
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
net = Net(
reference_unet,
denoising_unet,
face_locator,
image_proj,
audio_proj,
)
m,u = net.load_state_dict(
torch.load(
os.path.join(audio_ckpt_dir, "net.pth"),
map_location="cpu",
),
)
assert len(m) == 0 and len(u) == 0, "Fail to load correct checkpoint."
print("loaded weight from ", os.path.join(audio_ckpt_dir, "net.pth"))
# 5. inference
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=net.reference_unet,
denoising_unet=net.denoising_unet,
face_locator=net.face_locator,
scheduler=val_noise_scheduler,
image_proj=net.imageproj,
)
pipeline.to(device=device, dtype=weight_dtype)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
config.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0-config.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=net.audioproj.device, dtype=net.audioproj.dtype)
audio_tensor = net.audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=img_size[0],
height=img_size[1],
video_length=clip_length,
num_inference_steps=config.inference_steps,
guidance_scale=config.cfg_scale,
generator=generator,
motion_scale=motion_scale,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
output_file = config.output
# save the result after all iteration
tensor_to_video(tensor_result, output_file, driving_audio_path)
return output_file
|
Perform inference processing.
Args:
args (argparse.Namespace): Command-line arguments.
This function initializes the configuration for the inference process. It sets up the necessary
modules and variables to prepare for the upcoming inference steps.
|
inference_process
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def forward(
self,
noisy_latents,
timesteps,
ref_image_latents,
face_emb,
face_mask,
uncond_fwd: bool = False,
):
"""
Forward pass of the model.
Args:
self (Net): The model instance.
noisy_latents (torch.Tensor): Noisy latents.
timesteps (torch.Tensor): Timesteps.
ref_image_latents (torch.Tensor): Reference image latents.
face_emb (torch.Tensor): Face embedding.
face_mask (torch.Tensor): Face mask.
uncond_fwd (bool, optional): Unconditional forward pass. Defaults to False.
Returns:
torch.Tensor: Model prediction.
"""
face_emb = self.imageproj(face_emb)
face_mask = face_mask.to(device="cuda")
face_mask_feature = self.face_locator(face_mask)
if not uncond_fwd:
ref_timesteps = torch.zeros_like(timesteps)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=face_mask_feature,
encoder_hidden_states=face_emb,
).sample
return model_pred
|
Forward pass of the model.
Args:
self (Net): The model instance.
noisy_latents (torch.Tensor): Noisy latents.
timesteps (torch.Tensor): Timesteps.
ref_image_latents (torch.Tensor): Reference image latents.
face_emb (torch.Tensor): Face embedding.
face_mask (torch.Tensor): Face mask.
uncond_fwd (bool, optional): Unconditional forward pass. Defaults to False.
Returns:
torch.Tensor: Model prediction.
|
forward
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage1.py
|
MIT
|
def get_noise_scheduler(cfg: argparse.Namespace):
"""
Create noise scheduler for training
Args:
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
Returns:
train noise scheduler and val noise scheduler
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
|
Create noise scheduler for training
Args:
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
Returns:
train noise scheduler and val noise scheduler
|
get_noise_scheduler
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage1.py
|
MIT
|
def log_validation(
vae,
net,
scheduler,
accelerator,
width,
height,
imageproj,
cfg,
save_dir,
global_step,
face_analysis_model_path,
):
"""
Log validation generation image.
Args:
vae (nn.Module): Variational Autoencoder model.
net (Net): Main model.
scheduler (diffusers.SchedulerMixin): Noise scheduler.
accelerator (accelerate.Accelerator): Accelerator for training.
width (int): Width of the input images.
height (int): Height of the input images.
imageproj (nn.Module): Image projection model.
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
save_dir (str): directory path to save log result.
global_step (int): Global step number.
Returns:
None
"""
logger.info("Running validation... ")
ori_net = accelerator.unwrap_model(net)
ori_net = copy.deepcopy(ori_net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
generator = torch.manual_seed(42)
image_enc = FaceAnalysis(
name="",
root=face_analysis_model_path,
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
)
image_enc.prepare(ctx_id=0, det_size=(640, 640))
pipe = StaticPipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=denoising_unet,
face_locator=face_locator,
scheduler=scheduler,
imageproj=imageproj,
)
pil_images = []
for ref_image_path, mask_image_path in zip(cfg.ref_image_paths, cfg.mask_image_paths):
# for mask_image_path in mask_image_paths:
mask_name = os.path.splitext(
os.path.basename(mask_image_path))[0]
ref_name = os.path.splitext(
os.path.basename(ref_image_path))[0]
ref_image_pil = Image.open(ref_image_path).convert("RGB")
mask_image_pil = Image.open(mask_image_path).convert("RGB")
# Prepare face embeds
face_info = image_enc.get(
cv2.cvtColor(np.array(ref_image_pil), cv2.COLOR_RGB2BGR))
face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (
x['bbox'][3] - x['bbox'][1]))[-1] # only use the maximum face
face_emb = torch.tensor(face_info['embedding'])
face_emb = face_emb.to(
imageproj.device, imageproj.dtype)
image = pipe(
ref_image_pil,
mask_image_pil,
width,
height,
20,
3.5,
face_emb,
generator=generator,
).images
image = image[0, :, 0].permute(1, 2, 0).cpu().numpy() # (3, 512, 512)
res_image_pil = Image.fromarray((image * 255).astype(np.uint8))
# Save ref_image, src_image and the generated_image
w, h = res_image_pil.size
canvas = Image.new("RGB", (w * 3, h), "white")
ref_image_pil = ref_image_pil.resize((w, h))
mask_image_pil = mask_image_pil.resize((w, h))
canvas.paste(ref_image_pil, (0, 0))
canvas.paste(mask_image_pil, (w, 0))
canvas.paste(res_image_pil, (w * 2, 0))
out_file = os.path.join(
save_dir, f"{global_step:06d}-{ref_name}_{mask_name}.jpg"
)
canvas.save(out_file)
del pipe
del ori_net
torch.cuda.empty_cache()
return pil_images
|
Log validation generation image.
Args:
vae (nn.Module): Variational Autoencoder model.
net (Net): Main model.
scheduler (diffusers.SchedulerMixin): Noise scheduler.
accelerator (accelerate.Accelerator): Accelerator for training.
width (int): Width of the input images.
height (int): Height of the input images.
imageproj (nn.Module): Image projection model.
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
save_dir (str): directory path to save log result.
global_step (int): Global step number.
Returns:
None
|
log_validation
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage1.py
|
MIT
|
def train_stage1_process(cfg: argparse.Namespace) -> None:
"""
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
"""
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(
gradient_accumulation_steps=cfg.solver.gradient_accumulation_steps,
mixed_precision=cfg.solver.mixed_precision,
log_with="mlflow",
project_dir="./mlruns",
kwargs_handlers=[kwargs],
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if cfg.seed is not None:
seed_everything(cfg.seed)
# create output dir for training
exp_name = cfg.exp_name
save_dir = f"{cfg.output_dir}/{exp_name}"
checkpoint_dir = os.path.join(save_dir, "checkpoints")
module_dir = os.path.join(save_dir, "modules")
validation_dir = os.path.join(save_dir, "validation")
if accelerator.is_main_process:
init_output_dir([save_dir, checkpoint_dir, module_dir, validation_dir])
accelerator.wait_for_everyone()
# create model
if cfg.weight_dtype == "fp16":
weight_dtype = torch.float16
elif cfg.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif cfg.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
raise ValueError(
f"Do not support weight dtype: {cfg.weight_dtype} during training"
)
# create model
vae = AutoencoderKL.from_pretrained(cfg.vae_model_path).to(
"cuda", dtype=weight_dtype
)
reference_unet = UNet2DConditionModel.from_pretrained(
cfg.base_model_path,
subfolder="unet",
).to(device="cuda", dtype=weight_dtype)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
cfg.base_model_path,
"",
subfolder="unet",
unet_additional_kwargs={
"use_motion_module": False,
"unet_use_temporal_attention": False,
},
use_landmark=False
).to(device="cuda", dtype=weight_dtype)
imageproj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
).to(device="cuda", dtype=weight_dtype)
if cfg.face_locator_pretrained:
face_locator = FaceLocator(
conditioning_embedding_channels=320, block_out_channels=(16, 32, 96, 256)
).to(device="cuda", dtype=weight_dtype)
miss, _ = face_locator.load_state_dict(
cfg.face_state_dict_path, strict=False)
logger.info(f"Missing key for face locator: {len(miss)}")
else:
face_locator = FaceLocator(
conditioning_embedding_channels=320,
).to(device="cuda", dtype=weight_dtype)
# Freeze
vae.requires_grad_(False)
denoising_unet.requires_grad_(True)
reference_unet.requires_grad_(True)
imageproj.requires_grad_(True)
face_locator.requires_grad_(True)
reference_control_writer = ReferenceAttentionControl(
reference_unet,
do_classifier_free_guidance=False,
mode="write",
fusion_blocks="full",
)
reference_control_reader = ReferenceAttentionControl(
denoising_unet,
do_classifier_free_guidance=False,
mode="read",
fusion_blocks="full",
)
net = Net(
reference_unet,
denoising_unet,
face_locator,
reference_control_writer,
reference_control_reader,
imageproj,
).to(dtype=weight_dtype)
# get noise scheduler
train_noise_scheduler, val_noise_scheduler = get_noise_scheduler(cfg)
# init optimizer
if cfg.solver.enable_xformers_memory_efficient_attention:
if is_xformers_available():
reference_unet.enable_xformers_memory_efficient_attention()
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if cfg.solver.gradient_checkpointing:
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
if cfg.solver.scale_lr:
learning_rate = (
cfg.solver.learning_rate
* cfg.solver.gradient_accumulation_steps
* cfg.data.train_bs
* accelerator.num_processes
)
else:
learning_rate = cfg.solver.learning_rate
# Initialize the optimizer
if cfg.solver.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError as exc:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
) from exc
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
trainable_params = list(
filter(lambda p: p.requires_grad, net.parameters()))
optimizer = optimizer_cls(
trainable_params,
lr=learning_rate,
betas=(cfg.solver.adam_beta1, cfg.solver.adam_beta2),
weight_decay=cfg.solver.adam_weight_decay,
eps=cfg.solver.adam_epsilon,
)
# init scheduler
lr_scheduler = get_scheduler(
cfg.solver.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=cfg.solver.lr_warmup_steps
* cfg.solver.gradient_accumulation_steps,
num_training_steps=cfg.solver.max_train_steps
* cfg.solver.gradient_accumulation_steps,
)
# get data loader
train_dataset = FaceMaskDataset(
img_size=(cfg.data.train_width, cfg.data.train_height),
data_meta_paths=cfg.data.meta_paths,
sample_margin=cfg.data.sample_margin,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.train_bs, shuffle=True, num_workers=4
)
# Prepare everything with our `accelerator`.
(
net,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
net,
optimizer,
train_dataloader,
lr_scheduler,
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / cfg.solver.gradient_accumulation_steps
)
# Afterwards we recalculate our number of training epochs
num_train_epochs = math.ceil(
cfg.solver.max_train_steps / num_update_steps_per_epoch
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run_time = datetime.now().strftime("%Y%m%d-%H%M")
accelerator.init_trackers(
cfg.exp_name,
init_kwargs={"mlflow": {"run_name": run_time}},
)
# dump config file
mlflow.log_dict(OmegaConf.to_container(cfg), "config.yaml")
logger.info(f"save config to {save_dir}")
OmegaConf.save(
cfg, os.path.join(save_dir, "config.yaml")
)
# Train!
total_batch_size = (
cfg.data.train_bs
* accelerator.num_processes
* cfg.solver.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {cfg.data.train_bs}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
logger.info(
f" Gradient Accumulation steps = {cfg.solver.gradient_accumulation_steps}"
)
logger.info(f" Total optimization steps = {cfg.solver.max_train_steps}")
global_step = 0
first_epoch = 0
# load checkpoint
# Potentially load in the weights and states from a previous save
if cfg.resume_from_checkpoint:
logger.info(f"Loading checkpoint from {checkpoint_dir}")
global_step = load_checkpoint(cfg, checkpoint_dir, accelerator)
first_epoch = global_step // num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(
range(global_step, cfg.solver.max_train_steps),
disable=not accelerator.is_main_process,
)
progress_bar.set_description("Steps")
net.train()
for _ in range(first_epoch, num_train_epochs):
train_loss = 0.0
for _, batch in enumerate(train_dataloader):
with accelerator.accumulate(net):
# Convert videos to latent space
pixel_values = batch["img"].to(weight_dtype)
with torch.no_grad():
latents = vae.encode(pixel_values).latent_dist.sample()
latents = latents.unsqueeze(2) # (b, c, 1, h, w)
latents = latents * 0.18215
noise = torch.randn_like(latents)
if cfg.noise_offset > 0.0:
noise += cfg.noise_offset * torch.randn(
(noise.shape[0], noise.shape[1], 1, 1, 1),
device=noise.device,
)
bsz = latents.shape[0]
# Sample a random timestep for each video
timesteps = torch.randint(
0,
train_noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
face_mask_img = batch["tgt_mask"]
face_mask_img = face_mask_img.unsqueeze(
2)
face_mask_img = face_mask_img.to(weight_dtype)
uncond_fwd = random.random() < cfg.uncond_ratio
face_emb_list = []
ref_image_list = []
for _, (ref_img, face_emb) in enumerate(
zip(batch["ref_img"], batch["face_emb"])
):
if uncond_fwd:
face_emb_list.append(torch.zeros_like(face_emb))
else:
face_emb_list.append(face_emb)
ref_image_list.append(ref_img)
with torch.no_grad():
ref_img = torch.stack(ref_image_list, dim=0).to(
dtype=vae.dtype, device=vae.device
)
ref_image_latents = vae.encode(
ref_img
).latent_dist.sample()
ref_image_latents = ref_image_latents * 0.18215
face_emb = torch.stack(face_emb_list, dim=0).to(
dtype=imageproj.dtype, device=imageproj.device
)
# add noise
noisy_latents = train_noise_scheduler.add_noise(
latents, noise, timesteps
)
# Get the target for loss depending on the prediction type
if train_noise_scheduler.prediction_type == "epsilon":
target = noise
elif train_noise_scheduler.prediction_type == "v_prediction":
target = train_noise_scheduler.get_velocity(
latents, noise, timesteps
)
else:
raise ValueError(
f"Unknown prediction type {train_noise_scheduler.prediction_type}"
)
model_pred = net(
noisy_latents,
timesteps,
ref_image_latents,
face_emb,
face_mask_img,
uncond_fwd,
)
if cfg.snr_gamma == 0:
loss = F.mse_loss(
model_pred.float(), target.float(), reduction="mean"
)
else:
snr = compute_snr(train_noise_scheduler, timesteps)
if train_noise_scheduler.config.prediction_type == "v_prediction":
# Velocity objective requires that we add one to SNR values before we divide by them.
snr = snr + 1
mse_loss_weights = (
torch.stack(
[snr, cfg.snr_gamma * torch.ones_like(timesteps)], dim=1
).min(dim=1)[0]
/ snr
)
loss = F.mse_loss(
model_pred.float(), target.float(), reduction="none"
)
loss = (
loss.mean(dim=list(range(1, len(loss.shape))))
* mse_loss_weights
)
loss = loss.mean()
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(
loss.repeat(cfg.data.train_bs)).mean()
train_loss += avg_loss.item() / cfg.solver.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(
trainable_params,
cfg.solver.max_grad_norm,
)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
reference_control_reader.clear()
reference_control_writer.clear()
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if global_step % cfg.checkpointing_steps == 0 or global_step == cfg.solver.max_train_steps:
accelerator.wait_for_everyone()
save_path = os.path.join(
checkpoint_dir, f"checkpoint-{global_step}")
if accelerator.is_main_process:
delete_additional_ckpt(checkpoint_dir, 3)
accelerator.save_state(save_path)
accelerator.wait_for_everyone()
unwrap_net = accelerator.unwrap_model(net)
if accelerator.is_main_process:
save_checkpoint(
unwrap_net.reference_unet,
module_dir,
"reference_unet",
global_step,
total_limit=3,
)
save_checkpoint(
unwrap_net.imageproj,
module_dir,
"imageproj",
global_step,
total_limit=3,
)
save_checkpoint(
unwrap_net.denoising_unet,
module_dir,
"denoising_unet",
global_step,
total_limit=3,
)
save_checkpoint(
unwrap_net.face_locator,
module_dir,
"face_locator",
global_step,
total_limit=3,
)
if global_step % cfg.val.validation_steps == 0 or global_step == 1:
if accelerator.is_main_process:
generator = torch.Generator(device=accelerator.device)
generator.manual_seed(cfg.seed)
log_validation(
vae=vae,
net=net,
scheduler=val_noise_scheduler,
accelerator=accelerator,
width=cfg.data.train_width,
height=cfg.data.train_height,
imageproj=imageproj,
cfg=cfg,
save_dir=validation_dir,
global_step=global_step,
face_analysis_model_path=cfg.face_analysis_model_path
)
logs = {
"step_loss": loss.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
}
progress_bar.set_postfix(**logs)
if global_step >= cfg.solver.max_train_steps:
# process final module weight for stage2
if accelerator.is_main_process:
move_final_checkpoint(save_dir, module_dir, "reference_unet")
move_final_checkpoint(save_dir, module_dir, "imageproj")
move_final_checkpoint(save_dir, module_dir, "denoising_unet")
move_final_checkpoint(save_dir, module_dir, "face_locator")
break
accelerator.wait_for_everyone()
accelerator.end_training()
|
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
|
train_stage1_process
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage1.py
|
MIT
|
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
|
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
|
load_config
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage1.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage1.py
|
MIT
|
def forward(
self,
noisy_latents: torch.Tensor,
timesteps: torch.Tensor,
ref_image_latents: torch.Tensor,
face_emb: torch.Tensor,
audio_emb: torch.Tensor,
mask: torch.Tensor,
full_mask: torch.Tensor,
face_mask: torch.Tensor,
lip_mask: torch.Tensor,
uncond_img_fwd: bool = False,
uncond_audio_fwd: bool = False,
):
"""
simple docstring to prevent pylint error
"""
face_emb = self.imageproj(face_emb)
mask = mask.to(device="cuda")
mask_feature = self.face_locator(mask)
audio_emb = audio_emb.to(
device=self.audioproj.device, dtype=self.audioproj.dtype)
audio_emb = self.audioproj(audio_emb)
# condition forward
if not uncond_img_fwd:
ref_timesteps = torch.zeros_like(timesteps)
ref_timesteps = repeat(
ref_timesteps,
"b -> (repeat b)",
repeat=ref_image_latents.size(0) // ref_timesteps.size(0),
)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
if uncond_audio_fwd:
audio_emb = torch.zeros_like(audio_emb).to(
device=audio_emb.device, dtype=audio_emb.dtype
)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=mask_feature,
encoder_hidden_states=face_emb,
audio_embedding=audio_emb,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask
).sample
return model_pred
|
simple docstring to prevent pylint error
|
forward
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def get_attention_mask(mask: torch.Tensor, weight_dtype: torch.dtype) -> torch.Tensor:
"""
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
"""
if isinstance(mask, List):
_mask = []
for m in mask:
_mask.append(
rearrange(m, "b f 1 h w -> (b f) (h w)").to(weight_dtype))
return _mask
mask = rearrange(mask, "b f 1 h w -> (b f) (h w)").to(weight_dtype)
return mask
|
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
|
get_attention_mask
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def get_noise_scheduler(cfg: argparse.Namespace) -> Tuple[DDIMScheduler, DDIMScheduler]:
"""
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
|
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
|
get_noise_scheduler
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def process_audio_emb(audio_emb: torch.Tensor) -> torch.Tensor:
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
|
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
|
process_audio_emb
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def log_validation(
accelerator: Accelerator,
vae: AutoencoderKL,
net: Net,
scheduler: DDIMScheduler,
width: int,
height: int,
clip_length: int = 24,
generator: torch.Generator = None,
cfg: dict = None,
save_dir: str = None,
global_step: int = 0,
times: int = None,
face_analysis_model_path: str = "",
) -> None:
"""
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
"""
ori_net = accelerator.unwrap_model(net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
imageproj = ori_net.imageproj
audioproj = ori_net.audioproj
generator = torch.manual_seed(42)
tmp_denoising_unet = copy.deepcopy(denoising_unet)
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=tmp_denoising_unet,
face_locator=face_locator,
image_proj=imageproj,
scheduler=scheduler,
)
pipeline = pipeline.to("cuda")
image_processor = ImageProcessor((width, height), face_analysis_model_path)
audio_processor = AudioProcessor(
cfg.data.sample_rate,
cfg.data.fps,
cfg.wav2vec_config.model_path,
cfg.wav2vec_config.features == "last",
os.path.dirname(cfg.audio_separator.model_path),
os.path.basename(cfg.audio_separator.model_path),
os.path.join(save_dir, '.cache', "audio_preprocess")
)
for idx, ref_img_path in enumerate(cfg.ref_img_path):
audio_path = cfg.audio_path[idx]
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
ref_img_path, os.path.join(save_dir, '.cache'), cfg.face_expand_ratio)
audio_emb, audio_length = audio_processor.preprocess(
audio_path, clip_length)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
cfg.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0 - cfg.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=audioproj.device, dtype=audioproj.dtype)
audio_tensor = audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=cfg.data.train_width,
height=cfg.data.train_height,
video_length=clip_length,
num_inference_steps=cfg.inference_steps,
guidance_scale=cfg.cfg_scale,
generator=generator,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
audio_name = os.path.basename(audio_path).split('.')[0]
ref_name = os.path.basename(ref_img_path).split('.')[0]
output_file = os.path.join(save_dir,f"{global_step}_{ref_name}_{audio_name}.mp4")
# save the result after all iteration
tensor_to_video(tensor_result, output_file, audio_path)
# clean up
del tmp_denoising_unet
del pipeline
del image_processor
del audio_processor
torch.cuda.empty_cache()
return tensor_result
|
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
|
log_validation
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def train_stage2_process(cfg: argparse.Namespace) -> None:
"""
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
"""
kwargs = DistributedDataParallelKwargs(find_unused_parameters=False)
accelerator = Accelerator(
gradient_accumulation_steps=cfg.solver.gradient_accumulation_steps,
mixed_precision=cfg.solver.mixed_precision,
log_with="mlflow",
project_dir="./mlruns",
kwargs_handlers=[kwargs],
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if cfg.seed is not None:
seed_everything(cfg.seed)
# create output dir for training
exp_name = cfg.exp_name
save_dir = f"{cfg.output_dir}/{exp_name}"
checkpoint_dir = os.path.join(save_dir, "checkpoints")
module_dir = os.path.join(save_dir, "modules")
validation_dir = os.path.join(save_dir, "validation")
if accelerator.is_main_process:
init_output_dir([save_dir, checkpoint_dir, module_dir, validation_dir])
accelerator.wait_for_everyone()
if cfg.weight_dtype == "fp16":
weight_dtype = torch.float16
elif cfg.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif cfg.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
raise ValueError(
f"Do not support weight dtype: {cfg.weight_dtype} during training"
)
# Create Models
vae = AutoencoderKL.from_pretrained(cfg.vae_model_path).to(
"cuda", dtype=weight_dtype
)
reference_unet = UNet2DConditionModel.from_pretrained(
cfg.base_model_path,
subfolder="unet",
).to(device="cuda", dtype=weight_dtype)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
cfg.base_model_path,
cfg.mm_path,
subfolder="unet",
unet_additional_kwargs=OmegaConf.to_container(
cfg.unet_additional_kwargs),
use_landmark=False
).to(device="cuda", dtype=weight_dtype)
imageproj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
).to(device="cuda", dtype=weight_dtype)
face_locator = FaceLocator(
conditioning_embedding_channels=320,
).to(device="cuda", dtype=weight_dtype)
audioproj = AudioProjModel(
seq_len=5,
blocks=12,
channels=768,
intermediate_dim=512,
output_dim=768,
context_tokens=32,
).to(device="cuda", dtype=weight_dtype)
# load module weight from stage 1
stage1_ckpt_dir = cfg.stage1_ckpt_dir
denoising_unet.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "denoising_unet.pth"),
map_location="cpu",
),
strict=False,
)
reference_unet.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "reference_unet.pth"),
map_location="cpu",
),
strict=False,
)
face_locator.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "face_locator.pth"),
map_location="cpu",
),
strict=False,
)
imageproj.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "imageproj.pth"),
map_location="cpu",
),
strict=False,
)
# Freeze
vae.requires_grad_(False)
imageproj.requires_grad_(False)
reference_unet.requires_grad_(False)
denoising_unet.requires_grad_(False)
face_locator.requires_grad_(False)
audioproj.requires_grad_(True)
# Set motion module learnable
trainable_modules = cfg.trainable_para
for name, module in denoising_unet.named_modules():
if any(trainable_mod in name for trainable_mod in trainable_modules):
for params in module.parameters():
params.requires_grad_(True)
reference_control_writer = ReferenceAttentionControl(
reference_unet,
do_classifier_free_guidance=False,
mode="write",
fusion_blocks="full",
)
reference_control_reader = ReferenceAttentionControl(
denoising_unet,
do_classifier_free_guidance=False,
mode="read",
fusion_blocks="full",
)
net = Net(
reference_unet,
denoising_unet,
face_locator,
reference_control_writer,
reference_control_reader,
imageproj,
audioproj,
).to(dtype=weight_dtype)
# get noise scheduler
train_noise_scheduler, val_noise_scheduler = get_noise_scheduler(cfg)
if cfg.solver.enable_xformers_memory_efficient_attention:
if is_xformers_available():
reference_unet.enable_xformers_memory_efficient_attention()
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if cfg.solver.gradient_checkpointing:
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
if cfg.solver.scale_lr:
learning_rate = (
cfg.solver.learning_rate
* cfg.solver.gradient_accumulation_steps
* cfg.data.train_bs
* accelerator.num_processes
)
else:
learning_rate = cfg.solver.learning_rate
# Initialize the optimizer
if cfg.solver.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError as exc:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
) from exc
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
trainable_params = list(
filter(lambda p: p.requires_grad, net.parameters()))
logger.info(f"Total trainable params {len(trainable_params)}")
optimizer = optimizer_cls(
trainable_params,
lr=learning_rate,
betas=(cfg.solver.adam_beta1, cfg.solver.adam_beta2),
weight_decay=cfg.solver.adam_weight_decay,
eps=cfg.solver.adam_epsilon,
)
# Scheduler
lr_scheduler = get_scheduler(
cfg.solver.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=cfg.solver.lr_warmup_steps
* cfg.solver.gradient_accumulation_steps,
num_training_steps=cfg.solver.max_train_steps
* cfg.solver.gradient_accumulation_steps,
)
# get data loader
train_dataset = TalkingVideoDataset(
img_size=(cfg.data.train_width, cfg.data.train_height),
sample_rate=cfg.data.sample_rate,
n_sample_frames=cfg.data.n_sample_frames,
n_motion_frames=cfg.data.n_motion_frames,
audio_margin=cfg.data.audio_margin,
data_meta_paths=cfg.data.train_meta_paths,
wav2vec_cfg=cfg.wav2vec_config,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.train_bs, shuffle=True, num_workers=16
)
# Prepare everything with our `accelerator`.
(
net,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
net,
optimizer,
train_dataloader,
lr_scheduler,
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / cfg.solver.gradient_accumulation_steps
)
# Afterwards we recalculate our number of training epochs
num_train_epochs = math.ceil(
cfg.solver.max_train_steps / num_update_steps_per_epoch
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run_time = datetime.now().strftime("%Y%m%d-%H%M")
accelerator.init_trackers(
exp_name,
init_kwargs={"mlflow": {"run_name": run_time}},
)
# dump config file
mlflow.log_dict(
OmegaConf.to_container(
cfg), "config.yaml"
)
logger.info(f"save config to {save_dir}")
OmegaConf.save(
cfg, os.path.join(save_dir, "config.yaml")
)
# Train!
total_batch_size = (
cfg.data.train_bs
* accelerator.num_processes
* cfg.solver.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {cfg.data.train_bs}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
logger.info(
f" Gradient Accumulation steps = {cfg.solver.gradient_accumulation_steps}"
)
logger.info(f" Total optimization steps = {cfg.solver.max_train_steps}")
global_step = 0
first_epoch = 0
# # Potentially load in the weights and states from a previous save
if cfg.resume_from_checkpoint:
logger.info(f"Loading checkpoint from {checkpoint_dir}")
global_step = load_checkpoint(cfg, checkpoint_dir, accelerator)
first_epoch = global_step // num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(
range(global_step, cfg.solver.max_train_steps),
disable=not accelerator.is_local_main_process,
)
progress_bar.set_description("Steps")
for _ in range(first_epoch, num_train_epochs):
train_loss = 0.0
t_data_start = time.time()
for _, batch in enumerate(train_dataloader):
t_data = time.time() - t_data_start
with accelerator.accumulate(net):
# Convert videos to latent space
pixel_values_vid = batch["pixel_values_vid"].to(weight_dtype)
pixel_values_face_mask = batch["pixel_values_face_mask"]
pixel_values_face_mask = get_attention_mask(
pixel_values_face_mask, weight_dtype
)
pixel_values_lip_mask = batch["pixel_values_lip_mask"]
pixel_values_lip_mask = get_attention_mask(
pixel_values_lip_mask, weight_dtype
)
pixel_values_full_mask = batch["pixel_values_full_mask"]
pixel_values_full_mask = get_attention_mask(
pixel_values_full_mask, weight_dtype
)
with torch.no_grad():
video_length = pixel_values_vid.shape[1]
pixel_values_vid = rearrange(
pixel_values_vid, "b f c h w -> (b f) c h w"
)
latents = vae.encode(pixel_values_vid).latent_dist.sample()
latents = rearrange(
latents, "(b f) c h w -> b c f h w", f=video_length
)
latents = latents * 0.18215
noise = torch.randn_like(latents)
if cfg.noise_offset > 0:
noise += cfg.noise_offset * torch.randn(
(latents.shape[0], latents.shape[1], 1, 1, 1),
device=latents.device,
)
bsz = latents.shape[0]
# Sample a random timestep for each video
timesteps = torch.randint(
0,
train_noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
# mask for face locator
pixel_values_mask = (
batch["pixel_values_mask"].unsqueeze(
1).to(dtype=weight_dtype)
)
pixel_values_mask = repeat(
pixel_values_mask,
"b f c h w -> b (repeat f) c h w",
repeat=video_length,
)
pixel_values_mask = pixel_values_mask.transpose(
1, 2)
uncond_img_fwd = random.random() < cfg.uncond_img_ratio
uncond_audio_fwd = random.random() < cfg.uncond_audio_ratio
start_frame = random.random() < cfg.start_ratio
pixel_values_ref_img = batch["pixel_values_ref_img"].to(
dtype=weight_dtype
)
# initialize the motion frames as zero maps
if start_frame:
pixel_values_ref_img[:, 1:] = 0.0
ref_img_and_motion = rearrange(
pixel_values_ref_img, "b f c h w -> (b f) c h w"
)
with torch.no_grad():
ref_image_latents = vae.encode(
ref_img_and_motion
).latent_dist.sample()
ref_image_latents = ref_image_latents * 0.18215
image_prompt_embeds = batch["face_emb"].to(
dtype=imageproj.dtype, device=imageproj.device
)
# add noise
noisy_latents = train_noise_scheduler.add_noise(
latents, noise, timesteps
)
# Get the target for loss depending on the prediction type
if train_noise_scheduler.prediction_type == "epsilon":
target = noise
elif train_noise_scheduler.prediction_type == "v_prediction":
target = train_noise_scheduler.get_velocity(
latents, noise, timesteps
)
else:
raise ValueError(
f"Unknown prediction type {train_noise_scheduler.prediction_type}"
)
# ---- Forward!!! -----
model_pred = net(
noisy_latents=noisy_latents,
timesteps=timesteps,
ref_image_latents=ref_image_latents,
face_emb=image_prompt_embeds,
mask=pixel_values_mask,
full_mask=pixel_values_full_mask,
face_mask=pixel_values_face_mask,
lip_mask=pixel_values_lip_mask,
audio_emb=batch["audio_tensor"].to(
dtype=weight_dtype),
uncond_img_fwd=uncond_img_fwd,
uncond_audio_fwd=uncond_audio_fwd,
)
if cfg.snr_gamma == 0:
loss = F.mse_loss(
model_pred.float(),
target.float(),
reduction="mean",
)
else:
snr = compute_snr(train_noise_scheduler, timesteps)
if train_noise_scheduler.config.prediction_type == "v_prediction":
# Velocity objective requires that we add one to SNR values before we divide by them.
snr = snr + 1
mse_loss_weights = (
torch.stack(
[snr, cfg.snr_gamma * torch.ones_like(timesteps)], dim=1
).min(dim=1)[0]
/ snr
)
loss = F.mse_loss(
model_pred.float(),
target.float(),
reduction="mean",
)
loss = (
loss.mean(dim=list(range(1, len(loss.shape))))
* mse_loss_weights
).mean()
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(
loss.repeat(cfg.data.train_bs)).mean()
train_loss += avg_loss.item() / cfg.solver.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(
trainable_params,
cfg.solver.max_grad_norm,
)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
reference_control_reader.clear()
reference_control_writer.clear()
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if global_step % cfg.val.validation_steps == 0 or global_step==1:
if accelerator.is_main_process:
generator = torch.Generator(device=accelerator.device)
generator.manual_seed(cfg.seed)
log_validation(
accelerator=accelerator,
vae=vae,
net=net,
scheduler=val_noise_scheduler,
width=cfg.data.train_width,
height=cfg.data.train_height,
clip_length=cfg.data.n_sample_frames,
cfg=cfg,
save_dir=validation_dir,
global_step=global_step,
times=cfg.single_inference_times if cfg.single_inference_times is not None else None,
face_analysis_model_path=cfg.face_analysis_model_path
)
logs = {
"step_loss": loss.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
"td": f"{t_data:.2f}s",
}
t_data_start = time.time()
progress_bar.set_postfix(**logs)
if (
global_step % cfg.checkpointing_steps == 0
or global_step == cfg.solver.max_train_steps
):
# save model
save_path = os.path.join(
checkpoint_dir, f"checkpoint-{global_step}")
if accelerator.is_main_process:
delete_additional_ckpt(checkpoint_dir, 30)
accelerator.wait_for_everyone()
accelerator.save_state(save_path)
# save model weight
unwrap_net = accelerator.unwrap_model(net)
if accelerator.is_main_process:
save_checkpoint(
unwrap_net,
module_dir,
"net",
global_step,
total_limit=30,
)
if global_step >= cfg.solver.max_train_steps:
break
# Create the pipeline using the trained modules and save it.
accelerator.wait_for_everyone()
accelerator.end_training()
|
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
|
train_stage2_process
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
|
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
|
load_config
|
python
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/train_stage2.py
|
MIT
|
def make_tfrecord_loaders(args):
"""Load train/val/test dataset from shuffled TFRecords"""
import data_utils.tf_dl
data_set_args = {'batch_size': args.batch_size,
'max_seq_len': args.seq_length,
'max_preds_per_seq': args.max_preds_per_seq,
'train': True,
'num_workers': max(args.num_workers, 1),
'seed': args.seed + args.rank + 1,
'threaded_dl': args.num_workers > 0
}
train = data_utils.tf_dl.TFRecordDataLoader(args.train_data,
**data_set_args)
data_set_args['train'] = False
if args.eval_seq_length is not None:
data_set_args['max_seq_len'] = args.eval_seq_length
if args.eval_max_preds_per_seq is not None:
data_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq
valid = None
if args.valid_data is not None:
valid = data_utils.tf_dl.TFRecordDataLoader(args.valid_data,
**data_set_args)
test = None
if args.test_data is not None:
test = data_utils.tf_dl.TFRecordDataLoader(args.test_data,
**data_set_args)
tokenizer = data_utils.make_tokenizer(args.tokenizer_type,
train,
args.tokenizer_path,
args.vocab_size,
args.tokenizer_model_type,
cache_dir=args.cache_dir)
return (train, valid, test), tokenizer
|
Load train/val/test dataset from shuffled TFRecords
|
make_tfrecord_loaders
|
python
|
THUDM/GLM
|
configure_data.py
|
https://github.com/THUDM/GLM/blob/master/configure_data.py
|
MIT
|
def get_split(args):
"""
Get dataset splits from comma separated string list
"""
splits = []
if args.split.find(',') != -1:
splits = [float(s) for s in args.split.split(',')]
elif args.split.find('/') != -1:
splits = [float(s) for s in args.split.split('/')]
else:
splits = [float(args.split)]
split_total = sum(splits)
if split_total < 1.:
splits.append(1 - split_total)
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
if args.valid_data is not None:
splits[1] = 0.
if args.test_data is not None:
splits[2] = 0.
final_sum = sum(splits)
return [s / final_sum for s in splits]
|
Get dataset splits from comma separated string list
|
get_split
|
python
|
THUDM/GLM
|
configure_data.py
|
https://github.com/THUDM/GLM/blob/master/configure_data.py
|
MIT
|
def configure_data():
"""add cmdline flags for configuring datasets"""
# These are options that are used by data_utils, but are either
# deprecated or not meant to be exposed to the command line user.
# These options are intneded to be set in code by specific scripts.
defaults = {
'world_size': 1,
'rank': -1,
'persist_state': 0,
'lazy': False,
'transpose': False,
'data_set_type': 'supervised',
'seq_length': 256,
'eval_seq_length': 256,
'samples_per_shard': 100
}
return DataConfig(defaults=defaults)
|
add cmdline flags for configuring datasets
|
configure_data
|
python
|
THUDM/GLM
|
configure_data.py
|
https://github.com/THUDM/GLM/blob/master/configure_data.py
|
MIT
|
def process_batch(batch, args):
"""Process batch and produce inputs for the model."""
keys = ["text", "label"]
if args.pretrained_bert:
keys += ["padding_mask", "types"]
else:
keys += ["mask", "position"]
if args.cloze_eval:
if args.fast_decode:
keys += ["dec_text", "dec_position", "dec_mask", "dec_target", "dec_logit_mask"]
else:
keys += ["target", "logit_mask"]
if args.segment_length > 0:
keys += ["segment_id"]
if args.continuous_prompt:
keys += ["prompt_pos"]
if args.variable_num_choices:
keys.append("loss_mask")
# Broadcast data.
datatype = torch.int64
data_b = mpu.broadcast_data(keys, batch, datatype)
if "padding_mask" in data_b:
attention_mask = data_b['padding_mask'].float().cuda().contiguous()
if args.fp16:
attention_mask = attention_mask.half()
data_b["padding_mask"] = attention_mask
return data_b
|
Process batch and produce inputs for the model.
|
process_batch
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def finetune_forward_step(batch, model, args, timers, mems):
"""Simple forward step with cross-entropy loss."""
# Get the batch.
timers('batch generator').start()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
data = process_batch(batch_, args)
timers('batch generator').stop()
# Forward model.
if args.pretrained_bert:
tokens, types, labels, attention_mask = data['text'], data['types'], data['label'], data['padding_mask']
logits = model(tokens, token_type_ids=types, attention_mask=attention_mask, checkpoint_activations=True)
elif args.cloze_eval:
tokens, labels, position_ids = data['text'], data['label'], data['position']
attention_mask = data['mask']
if not args.fast_decode:
target_ids, logit_mask = data['target'], data['logit_mask']
if args.continuous_prompt:
prompt_pos = data["prompt_pos"]
result = model(tokens, position_ids, attention_mask, target_ids, logit_mask, prompt_pos=prompt_pos)
else:
result = model(tokens, position_ids, attention_mask, target_ids, logit_mask)
if not args.multi_token:
logits, lm_logits, *mems = result
else:
logits, *mems = result
else:
dec_input_ids, dec_position_ids, dec_attention_mask = data['dec_text'], data['dec_position'], data[
'dec_mask']
dec_target_ids, dec_logit_mask = data['dec_target'], data['dec_logit_mask']
logits, *mems = model(tokens, position_ids, attention_mask, dec_input_ids, dec_position_ids,
dec_attention_mask, dec_target_ids, dec_logit_mask)
else:
tokens, labels, position_ids, attention_mask = data['text'], data['label'], data['position'], data['mask']
logits, *mems = model(tokens, position_ids, attention_mask)
if args.adapet:
batch_size, num_classes = logits.size()[:2]
label_mask = torch.ones(batch_size, num_classes, device=logits.device)
label_mask.scatter_(1, labels.unsqueeze(1), -1.0)
if "loss_mask" in data:
loss_mask = data["loss_mask"]
label_mask = label_mask * loss_mask
loss = logits.contiguous().float() * label_mask
loss = loss.sum() / batch_size
else:
if "segment_id" in data:
from torch_scatter import scatter_sum
if "loss_mask" in data:
logits = logits * data["loss_mask"]
logits = scatter_sum(logits, data["segment_id"], dim=1)
elif "loss_mask" in data:
loss_mask = data["loss_mask"]
logits = logits * loss_mask - 10000.0 * (1.0 - loss_mask)
if args.loss_func == "cross_entropy":
# Cross-entropy loss.
loss_func = torch.nn.CrossEntropyLoss()
loss = loss_func(logits.contiguous().float(), labels)
elif args.loss_func == "hinge":
correct_logits = logits[range(logits.size(0)), labels]
hinge_loss = 1 + logits - correct_logits.unsqueeze(1)
hinge_loss[hinge_loss < 0.0] = 0.0
loss = hinge_loss.sum(dim=1).mean() - 1.0
elif args.loss_func == "generative" or args.loss_func == "mix":
batch_size = logits.size(0)
loss = - logits[range(batch_size), labels].mean()
if args.loss_func == "mix":
loss_func = torch.nn.CrossEntropyLoss()
loss = loss + loss_func(logits.contiguous().float(), labels)
else:
raise NotImplementedError
# Reduce loss for logging.
return loss, mems, 'bert'
|
Simple forward step with cross-entropy loss.
|
finetune_forward_step
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def _build_infinite_size_dataloader(dataloader):
"""Build a looped dataloader with infinite size."""
iterator = dataloader.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = dataloader.__iter__()
|
Build a looped dataloader with infinite size.
|
_build_infinite_size_dataloader
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def finetune(args, train_valid_datasets_provider, model_kwargs, forward_step=finetune_forward_step,
end_of_epoch_callback_provider=None):
"""Main finetune function used across all tasks."""
global tokenizer
timers = Timers()
tokenizer = prepare_tokenizer(args)
pretrain_glm.tokenizer = tokenizer
if args.save:
args.save = os.path.join(args.save, args.experiment_name)
# Train and validation data loaders.
timers('train/valid/test dataset/dataloder').start()
train_dataloader, valid_dataloader = None, None
train_block_dataloader, valid_block_dataloader = None, None
if train_valid_datasets_provider is not None and args.epochs > 0:
if mpu.get_model_parallel_rank() == 0:
train_dataset, valid_dataset = train_valid_datasets_provider(args, tokenizer)
train_dataloader, valid_dataloader = _build_train_valid_dataloaders(train_dataset, valid_dataset, args)
if args.no_validation:
valid_dataloader = None
train_iters = torch.cuda.LongTensor([len(train_dataloader)])
else:
train_iters = torch.cuda.LongTensor([0])
torch.distributed.broadcast(train_iters, mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
if mpu.get_model_parallel_rank() != 0:
args.train_iters_per_epoch = train_iters[0].item()
args.train_iters = args.epochs * args.train_iters_per_epoch
train_dataloader = FakeDataloader(args.train_iters_per_epoch)
if args.no_validation:
valid_dataloader = None
else:
valid_dataloader = FakeDataloader(None)
if args.block_lm_ratio > 0.0:
if mpu.get_model_parallel_rank() == 0:
train_block_dataset, valid_block_dataset = train_valid_datasets_provider(args, tokenizer,
pattern_text=True)
train_block_dataloader = make_data_loader(train_block_dataset, tokenizer,
args.batch_size * mpu.get_data_parallel_world_size(),
args.train_iters, args, shuffle=True,
block_collate=True)
valid_block_dataloader = make_data_loader(valid_block_dataset, tokenizer,
args.batch_size * mpu.get_data_parallel_world_size(), (
args.train_iters // args.eval_interval + 1) * args.eval_iters,
args, shuffle=True, block_collate=True)
else:
train_block_dataloader = FakeDataloader(args.train_iters)
valid_block_dataloader = FakeDataloader(None)
train_block_dataloader, valid_block_dataloader = iter(train_block_dataloader), iter(valid_block_dataloader)
timers('train/valid/test dataset/dataloder').stop()
# Build calback function.
timers('callback function').start()
end_of_epoch_callback, end_of_train_callback = None, None
if end_of_epoch_callback_provider is not None:
if train_valid_datasets_provider is not None and args.epochs > 0 and not args.no_validation:
end_of_epoch_callback = end_of_epoch_callback_provider(args, tokenizer, is_test=False)
end_of_train_callback = end_of_epoch_callback_provider(args, tokenizer, is_test=True)
timers('callback function').stop()
# Build model, optimizer and learning rate scheduler.
timers('model and optimizer').start()
model, optimizer, lr_scheduler = setup_model_and_optimizer(args, **model_kwargs)
timers('model and optimizer').stop()
# If pretrained checkpoint is provided and we have not trained for
# any iteration (i.e., iteration is zero), then load the pretrained
# checkpoint.
timers('pretrained checkpoint').start()
if args.load_pretrained is not None and not args.pretrained_bert:
task_tokens = None
if args.continuous_prompt and args.prompt_init:
if mpu.get_model_parallel_rank() == 0:
dataset = train_dataloader.dataset
processor, pvp = dataset.processor, dataset.pvp
task_tokens = []
for label in processor.get_labels():
verbalizer = pvp.verbalize(label)[0]
verbalizer_ids = tokenizer.EncodeAsIds(verbalizer).tokenization
task_tokens += verbalizer_ids
print_rank_0("Task tokens: " + tokenizer.DecodeIds(task_tokens))
num_task_tokens = len(task_tokens)
else:
num_task_tokens, task_tokens = 0, []
num_task_tokens = torch.cuda.LongTensor([num_task_tokens])
torch.distributed.broadcast(num_task_tokens, mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
num_task_tokens = num_task_tokens.item()
if num_task_tokens > 0:
if mpu.get_model_parallel_rank() == 0:
task_tokens = torch.cuda.LongTensor(task_tokens)
else:
task_tokens = torch.empty(num_task_tokens, device=torch.cuda.current_device(), dtype=torch.long)
torch.distributed.broadcast(task_tokens, mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
task_tokens = task_tokens.tolist()
with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1):
load_pretrained(model, args.load_pretrained, args, task_tokens=task_tokens)
# This is critical when only model is loaded. We should make sure
# master parameters are also updated.
if args.fp16 and optimizer is not None:
if args.deepspeed:
optimizer.refresh_fp32_params()
else:
optimizer._model_params_to_master_params()
if args.load is not None:
with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1):
load_checkpoint(model, optimizer, lr_scheduler, args, no_deepspeed=args.no_deepspeed_load)
# This is critical when only model is loaded. We should make sure
# master parameters are also updated.
if args.fp16 and optimizer is not None:
if args.deepspeed:
optimizer.refresh_fp32_params()
else:
optimizer._model_params_to_master_params()
torch.distributed.barrier()
timers('pretrained checkpoint').stop()
args.iteration = 0
summary_writer = None
if torch.distributed.get_rank() == 0:
args.log_dir = get_log_dir(base=args.summary_dir, name=args.experiment_name)
if os.path.exists(os.path.join(args.log_dir, "test_results.json")) and args.load is None and not args.overwrite:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.log_dir))
summary_writer = get_sample_writer(log_dir=args.log_dir, iteration=args.iteration)
print_and_save_args(args, verbose=True, log_dir=args.log_dir)
# Print setup timing.
print_rank_0('done with setups ...')
timers.log(['train/valid/test dataset/dataloder', 'callback function',
'model and optimizer', 'pretrained checkpoint'])
print_rank_0('training ...')
# Finetune the model.
score_dict = None
if train_dataloader is not None and args.epochs > 0:
if args.block_lm_ratio > 0.0:
forward_step = mix_forward_step
best_iteration = _train(model, optimizer, lr_scheduler, forward_step,
(train_dataloader, train_block_dataloader), (valid_dataloader, valid_block_dataloader),
end_of_epoch_callback, args, timers,
summary_writer=summary_writer)
if end_of_train_callback is not None and best_iteration is not None:
with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1):
args.load = os.path.join(args.save, "best")
load_checkpoint(model, optimizer, lr_scheduler, args, no_load_optim=True, no_deepspeed=True)
args.load = None
torch.distributed.barrier()
if end_of_train_callback is not None:
score_dict = end_of_train_callback(model, epoch=-1, output_predictions=True)
# Or just evaluate.
else:
if end_of_train_callback is not None:
print_rank_0('evaluation only mode, setting epoch to -1')
score_dict = end_of_train_callback(model, epoch=-1, output_predictions=True)
if score_dict is not None and torch.distributed.get_rank() == 0:
score_dict.update({"type": "test"})
with open(os.path.join(args.log_dir, "test_results.json"), "w") as output:
output.write(json.dumps(score_dict) + "\n")
print_rank_0('done :-)')
|
Main finetune function used across all tasks.
|
finetune
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def add(self, hyp: torch.LongTensor, sum_logprobs: float, mems=None):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / (max(hyp.shape[-1], 1) ** self.length_penalty)
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp, mems))
if len(self) > self.num_beams:
sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
del self.beams[sorted_next_scores[0][1]]
self.worst_score = sorted_next_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
|
Add a new hypothesis to the list.
|
add
|
python
|
THUDM/GLM
|
generation_utils.py
|
https://github.com/THUDM/GLM/blob/master/generation_utils.py
|
MIT
|
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
"""
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
|
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
one in the heap, then we are done with this sentence.
|
is_done
|
python
|
THUDM/GLM
|
generation_utils.py
|
https://github.com/THUDM/GLM/blob/master/generation_utils.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.