code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i, size in enumerate(slice_size):
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
memoavatar/memo
|
memo/models/unet_2d_condition.py
|
https://github.com/memoavatar/memo/blob/master/memo/models/unet_2d_condition.py
|
Apache-2.0
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
cond_tensor: torch.FloatTensor = None,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
post_process: bool = False,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added to UNet long skip connections from down blocks to up blocks for
example from ControlNet side model(s)
mid_block_additional_residual (`torch.Tensor`, *optional*):
additional residual to be added to UNet mid block output, for example from ControlNet side model
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "text_image":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image'"
"which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time'"
"which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time'"
"which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
elif self.config.addition_embed_type == "image":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image'"
"which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
aug_emb = self.add_embedding(image_embs)
elif self.config.addition_embed_type == "image_hint":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint'"
"which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
hint = added_cond_kwargs.get("hint")
aug_emb, hint = self.add_embedding(image_embs, hint)
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
image_embeds = self.encoder_hid_proj(image_embeds).to(encoder_hidden_states.dtype)
encoder_hidden_states = torch.cat([encoder_hidden_states, image_embeds], dim=1)
# 2. pre-process
sample = self.conv_in(sample)
if cond_tensor is not None:
sample = sample + cond_tensor
# 2.5 GLIGEN position net
if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
# 3. down
lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
ref_features = {"down": [], "mid": [], "up": []}
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
sample, res_samples, ref_feature_list = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples, ref_feature_list = downsample_block(
hidden_states=sample, temb=emb, scale=lora_scale
)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
ref_features["down"].append(ref_feature_list)
down_block_res_samples += res_samples
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
sample, ref_feature_list = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
ref_features["mid"].append(ref_feature_list)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample, ref_feature_list = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample, ref_feature_list = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
scale=lora_scale,
)
ref_features["up"].append(ref_feature_list)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return ref_features
return UNet2DConditionOutput(ref_features=ref_features)
|
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added to UNet long skip connections from down blocks to up blocks for
example from ControlNet side model(s)
mid_block_additional_residual (`torch.Tensor`, *optional*):
additional residual to be added to UNet mid block output, for example from ControlNet side model
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
|
forward
|
python
|
memoavatar/memo
|
memo/models/unet_2d_condition.py
|
https://github.com/memoavatar/memo/blob/master/memo/models/unet_2d_condition.py
|
Apache-2.0
|
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(
name: str,
module: torch.nn.Module,
processors: Dict[str, AttentionProcessor],
):
if hasattr(module, "set_processor"):
processors[f"{name}.processor"] = module.processor
for sub_name, child in module.named_children():
if "temporal_transformer" not in sub_name:
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
if "temporal_transformer" not in name:
fn_recursive_add_processors(name, module, processors)
return processors
|
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
|
attn_processors
|
python
|
memoavatar/memo
|
memo/models/unet_3d.py
|
https://github.com/memoavatar/memo/blob/master/memo/models/unet_3d.py
|
Apache-2.0
|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i, size in enumerate(slice_size):
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
memoavatar/memo
|
memo/models/unet_3d.py
|
https://github.com/memoavatar/memo/blob/master/memo/models/unet_3d.py
|
Apache-2.0
|
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
if "temporal_transformer" not in sub_name:
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
if "temporal_transformer" not in name:
fn_recursive_attn_processor(name, module, processor)
|
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
|
set_attn_processor
|
python
|
memoavatar/memo
|
memo/models/unet_3d.py
|
https://github.com/memoavatar/memo/blob/master/memo/models/unet_3d.py
|
Apache-2.0
|
def preprocess_audio(
wav_path: str,
fps: int,
wav2vec_model: str,
vocal_separator_model: str = None,
cache_dir: str = "",
device: str = "cuda",
sample_rate: int = 16000,
num_generated_frames_per_clip: int = -1,
):
"""
Preprocess the audio file and extract audio embeddings.
Args:
wav_path (str): Path to the input audio file.
fps (int): Frames per second for the audio processing.
wav2vec_model (str): Path to the pretrained Wav2Vec model.
vocal_separator_model (str, optional): Path to the vocal separator model. Defaults to None.
cache_dir (str, optional): Directory for cached files. Defaults to "".
device (str, optional): Device to use ('cuda' or 'cpu'). Defaults to "cuda".
sample_rate (int, optional): Sampling rate for audio processing. Defaults to 16000.
num_generated_frames_per_clip (int, optional): Number of generated frames per clip for padding. Defaults to -1.
Returns:
tuple: A tuple containing:
- audio_emb (torch.Tensor): The processed audio embeddings.
- audio_length (int): The length of the audio in frames.
"""
# Initialize Wav2Vec model
audio_encoder = Wav2VecModel.from_pretrained(wav2vec_model).to(device=device)
audio_encoder.feature_extractor._freeze_parameters()
# Initialize Wav2Vec feature extractor
wav2vec_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(wav2vec_model)
# Initialize vocal separator if provided
vocal_separator = None
if vocal_separator_model is not None:
os.makedirs(cache_dir, exist_ok=True)
vocal_separator = Separator(
output_dir=cache_dir,
output_single_stem="vocals",
model_file_dir=os.path.dirname(vocal_separator_model),
)
vocal_separator.load_model(os.path.basename(vocal_separator_model))
assert vocal_separator.model_instance is not None, "Failed to load audio separation model."
# Perform vocal separation if applicable
if vocal_separator is not None:
outputs = vocal_separator.separate(wav_path)
assert len(outputs) > 0, "Audio separation failed."
vocal_audio_file = outputs[0]
vocal_audio_name, _ = os.path.splitext(vocal_audio_file)
vocal_audio_file = os.path.join(vocal_separator.output_dir, vocal_audio_file)
vocal_audio_file = resample_audio(
vocal_audio_file,
os.path.join(vocal_separator.output_dir, f"{vocal_audio_name}-16k.wav"),
sample_rate,
)
else:
vocal_audio_file = wav_path
# Load audio and extract Wav2Vec features
speech_array, sampling_rate = librosa.load(vocal_audio_file, sr=sample_rate)
audio_feature = np.squeeze(wav2vec_feature_extractor(speech_array, sampling_rate=sampling_rate).input_values)
audio_length = math.ceil(len(audio_feature) / sample_rate * fps)
audio_feature = torch.from_numpy(audio_feature).float().to(device=device)
# Pad audio features to match the required length
if num_generated_frames_per_clip > 0 and audio_length % num_generated_frames_per_clip != 0:
audio_feature = torch.nn.functional.pad(
audio_feature,
(
0,
(num_generated_frames_per_clip - audio_length % num_generated_frames_per_clip) * (sample_rate // fps),
),
"constant",
0.0,
)
audio_length += num_generated_frames_per_clip - audio_length % num_generated_frames_per_clip
audio_feature = audio_feature.unsqueeze(0)
# Extract audio embeddings
with torch.no_grad():
embeddings = audio_encoder(audio_feature, seq_len=audio_length, output_hidden_states=True)
assert len(embeddings) > 0, "Failed to extract audio embeddings."
audio_emb = torch.stack(embeddings.hidden_states[1:], dim=1).squeeze(0)
audio_emb = rearrange(audio_emb, "b s d -> s b d")
# Concatenate embeddings with surrounding frames
audio_emb = audio_emb.cpu().detach()
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)] for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
if vocal_separator is not None:
del vocal_separator
del audio_encoder
return audio_emb, audio_length
|
Preprocess the audio file and extract audio embeddings.
Args:
wav_path (str): Path to the input audio file.
fps (int): Frames per second for the audio processing.
wav2vec_model (str): Path to the pretrained Wav2Vec model.
vocal_separator_model (str, optional): Path to the vocal separator model. Defaults to None.
cache_dir (str, optional): Directory for cached files. Defaults to "".
device (str, optional): Device to use ('cuda' or 'cpu'). Defaults to "cuda".
sample_rate (int, optional): Sampling rate for audio processing. Defaults to 16000.
num_generated_frames_per_clip (int, optional): Number of generated frames per clip for padding. Defaults to -1.
Returns:
tuple: A tuple containing:
- audio_emb (torch.Tensor): The processed audio embeddings.
- audio_length (int): The length of the audio in frames.
|
preprocess_audio
|
python
|
memoavatar/memo
|
memo/utils/audio_utils.py
|
https://github.com/memoavatar/memo/blob/master/memo/utils/audio_utils.py
|
Apache-2.0
|
def extract_audio_emotion_labels(
model: str,
wav_path: str,
emotion2vec_model: str,
audio_length: int,
sample_rate: int = 16000,
device: str = "cuda",
):
"""
Extract audio emotion labels from an audio file.
Args:
model (str): Path to the MEMO model.
wav_path (str): Path to the input audio file.
emotion2vec_model (str): Path to the Emotion2vec model.
audio_length (int): Target length for interpolated emotion labels.
sample_rate (int, optional): Sample rate of the input audio. Default is 16000.
device (str, optional): Device to use ('cuda' or 'cpu'). Default is "cuda".
Returns:
torch.Tensor: Processed emotion labels with shape matching the target audio length.
"""
# Load models
logger.info("Downloading emotion2vec models from modelscope")
kwargs = download_model(model=emotion2vec_model)
kwargs["tokenizer"] = None
kwargs["input_size"] = None
kwargs["frontend"] = None
emotion_model = Emotion2vec(**kwargs, vocab_size=-1).to(device)
init_param = kwargs.get("init_param", None)
load_emotion2vec_model(
model=emotion_model,
path=init_param,
ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
oss_bucket=kwargs.get("oss_bucket", None),
scope_map=kwargs.get("scope_map", []),
)
emotion_model.eval()
classifier = AudioEmotionClassifierModel.from_pretrained(
model,
subfolder="misc/audio_emotion_classifier",
use_safetensors=True,
).to(device=device)
classifier.eval()
# Load audio
wav, sr = torchaudio.load(wav_path)
if sr != sample_rate:
wav = torchaudio.functional.resample(wav, sr, sample_rate)
wav = wav.view(-1) if wav.dim() == 1 else wav[0].view(-1)
emotion_labels = torch.full_like(wav, -1, dtype=torch.int32)
def extract_emotion(x):
"""
Extract emotion for a given audio segment.
"""
x = x.to(device=device)
x = F.layer_norm(x, x.shape).view(1, -1)
feats = emotion_model.extract_features(x)
x = feats["x"].mean(dim=1) # average across frames
x = classifier(x)
x = torch.softmax(x, dim=-1)
return torch.argmax(x, dim=-1)
# Process start, middle, and end segments
start_label = extract_emotion(wav[: sample_rate * 2]).item()
emotion_labels[:sample_rate] = start_label
for i in range(sample_rate, len(wav) - sample_rate, sample_rate):
mid_wav = wav[i - sample_rate : i - sample_rate + sample_rate * 3]
mid_label = extract_emotion(mid_wav).item()
emotion_labels[i : i + sample_rate] = mid_label
end_label = extract_emotion(wav[-sample_rate * 2 :]).item()
emotion_labels[-sample_rate:] = end_label
# Interpolate to match the target audio length
emotion_labels = emotion_labels.unsqueeze(0).unsqueeze(0).float()
emotion_labels = F.interpolate(emotion_labels, size=audio_length, mode="nearest").squeeze(0).squeeze(0).int()
num_emotion_classes = classifier.num_emotion_classes
del emotion_model
del classifier
return emotion_labels, num_emotion_classes
|
Extract audio emotion labels from an audio file.
Args:
model (str): Path to the MEMO model.
wav_path (str): Path to the input audio file.
emotion2vec_model (str): Path to the Emotion2vec model.
audio_length (int): Target length for interpolated emotion labels.
sample_rate (int, optional): Sample rate of the input audio. Default is 16000.
device (str, optional): Device to use ('cuda' or 'cpu'). Default is "cuda".
Returns:
torch.Tensor: Processed emotion labels with shape matching the target audio length.
|
extract_audio_emotion_labels
|
python
|
memoavatar/memo
|
memo/utils/audio_utils.py
|
https://github.com/memoavatar/memo/blob/master/memo/utils/audio_utils.py
|
Apache-2.0
|
def extract_emotion(x):
"""
Extract emotion for a given audio segment.
"""
x = x.to(device=device)
x = F.layer_norm(x, x.shape).view(1, -1)
feats = emotion_model.extract_features(x)
x = feats["x"].mean(dim=1) # average across frames
x = classifier(x)
x = torch.softmax(x, dim=-1)
return torch.argmax(x, dim=-1)
|
Extract emotion for a given audio segment.
|
extract_emotion
|
python
|
memoavatar/memo
|
memo/utils/audio_utils.py
|
https://github.com/memoavatar/memo/blob/master/memo/utils/audio_utils.py
|
Apache-2.0
|
def tensor_to_video(tensor, output_video_path, input_audio_path, fps=30):
"""
Converts a Tensor with shape [c, f, h, w] into a video and adds an audio track from the specified audio file.
Args:
tensor (Tensor): The Tensor to be converted, shaped [c, f, h, w].
output_video_path (str): The file path where the output video will be saved.
input_audio_path (str): The path to the audio file (WAV file) that contains the audio track to be added.
fps (int): The frame rate of the output video. Default is 30 fps.
"""
tensor = tensor.permute(1, 2, 3, 0).cpu().numpy() # convert to [f, h, w, c]
tensor = np.clip(tensor * 255, 0, 255).astype(np.uint8) # to [0, 255]
def make_frame(t):
frame_index = min(int(t * fps), tensor.shape[0] - 1)
return tensor[frame_index]
video_duration = tensor.shape[0] / fps
audio_clip = AudioFileClip(input_audio_path)
audio_duration = audio_clip.duration
final_duration = min(video_duration, audio_duration)
audio_clip = audio_clip.subclip(0, final_duration)
new_video_clip = VideoClip(make_frame, duration=final_duration)
new_video_clip = new_video_clip.set_audio(audio_clip)
new_video_clip.write_videofile(output_video_path, fps=fps, audio_codec="aac")
|
Converts a Tensor with shape [c, f, h, w] into a video and adds an audio track from the specified audio file.
Args:
tensor (Tensor): The Tensor to be converted, shaped [c, f, h, w].
output_video_path (str): The file path where the output video will be saved.
input_audio_path (str): The path to the audio file (WAV file) that contains the audio track to be added.
fps (int): The frame rate of the output video. Default is 30 fps.
|
tensor_to_video
|
python
|
memoavatar/memo
|
memo/utils/vision_utils.py
|
https://github.com/memoavatar/memo/blob/master/memo/utils/vision_utils.py
|
Apache-2.0
|
def preprocess_image(face_analysis_model: str, image_path: str, image_size: int = 512):
"""
Preprocess the image and extract face embedding.
Args:
face_analysis_model (str): Path to the FaceAnalysis model directory.
image_path (str): Path to the image file.
image_size (int, optional): Target size for resizing the image. Default is 512.
Returns:
tuple: A tuple containing:
- pixel_values (torch.Tensor): Tensor of the preprocessed image.
- face_emb (torch.Tensor): Tensor of the face embedding.
"""
# Define the image transformation
transform = transforms.Compose(
[
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
# Initialize the FaceAnalysis model
face_analysis = FaceAnalysis(
name="",
root=face_analysis_model,
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
)
face_analysis.prepare(ctx_id=0, det_size=(640, 640))
# Load and preprocess the image
image = Image.open(image_path).convert("RGB")
pixel_values = transform(image)
pixel_values = pixel_values.unsqueeze(0)
# Detect faces and extract the face embedding
image_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
faces = face_analysis.get(image_bgr)
if not faces:
logger.warning("No faces detected in the image. Using a zero vector as the face embedding.")
face_emb = np.zeros(512)
else:
# Sort faces by size and select the largest one
faces_sorted = sorted(
faces,
key=lambda x: (x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]),
reverse=True,
)
face_emb = faces_sorted[0]["embedding"]
# Convert face embedding to a PyTorch tensor
face_emb = face_emb.reshape(1, -1)
face_emb = torch.tensor(face_emb)
del face_analysis
return pixel_values, face_emb
|
Preprocess the image and extract face embedding.
Args:
face_analysis_model (str): Path to the FaceAnalysis model directory.
image_path (str): Path to the image file.
image_size (int, optional): Target size for resizing the image. Default is 512.
Returns:
tuple: A tuple containing:
- pixel_values (torch.Tensor): Tensor of the preprocessed image.
- face_emb (torch.Tensor): Tensor of the face embedding.
|
preprocess_image
|
python
|
memoavatar/memo
|
memo/utils/vision_utils.py
|
https://github.com/memoavatar/memo/blob/master/memo/utils/vision_utils.py
|
Apache-2.0
|
def get_video_duration(file_path):
"""Use ffmpeg to get the video duration in seconds."""
global global_counter
result = subprocess.run(["ffmpeg", "-i", file_path], stderr=subprocess.PIPE, text=True)
for line in result.stderr.split("\n"):
if "Duration" in line:
duration = line.split("Duration: ")[1].split(",")[0]
h, m, s = map(float, duration.split(":"))
global_counter += 1
return int(h * 3600 + m * 60 + s)
print("fail path", file_path)
return 0
|
Use ffmpeg to get the video duration in seconds.
|
get_video_duration
|
python
|
memoavatar/memo
|
scripts/calculate_durations.py
|
https://github.com/memoavatar/memo/blob/master/scripts/calculate_durations.py
|
Apache-2.0
|
def update_progress(duration):
"""Update the progress bar and count."""
nonlocal progress_count
with progress_lock:
progress_count += 1
percent = int((100 * progress_count) / total)
bar = "#" * (percent // 2)
sys.stdout.write(f"\r[{bar:<50}] {percent}%")
sys.stdout.flush()
return duration
|
Update the progress bar and count.
|
update_progress
|
python
|
memoavatar/memo
|
scripts/calculate_durations.py
|
https://github.com/memoavatar/memo/blob/master/scripts/calculate_durations.py
|
Apache-2.0
|
def convert_audio_emb_to_vocals_path(audio_emb_path):
"""
Convert audio embedding path to the corresponding original vocals path.
"""
path_parts = Path(audio_emb_path).parts
filename = path_parts[-1]
filename_base = filename.replace(".pt", "")
new_filename = f"{filename_base}-raw_(Vocals)_Kim_Vocal_2.wav"
new_path = Path(*path_parts[:-2], "vocals", new_filename)
return new_path
|
Convert audio embedding path to the corresponding original vocals path.
|
convert_audio_emb_to_vocals_path
|
python
|
memoavatar/memo
|
scripts/prepare_data.py
|
https://github.com/memoavatar/memo/blob/master/scripts/prepare_data.py
|
Apache-2.0
|
def extract_emotion(x):
"""
Extract emotion for a given audio segment.
"""
x = x.to(device=args.device)
x = F.layer_norm(x, x.shape).view(1, -1)
feats = emotion_model.extract_features(x)
x = feats["x"].mean(dim=1) # average across frames
x = emotion_classifier(x)
x = torch.softmax(x, dim=-1)
return torch.argmax(x, dim=-1)
|
Extract emotion for a given audio segment.
|
extract_emotion
|
python
|
memoavatar/memo
|
scripts/prepare_data.py
|
https://github.com/memoavatar/memo/blob/master/scripts/prepare_data.py
|
Apache-2.0
|
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
|
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
|
make_closing
|
python
|
hankcs/pyhanlp
|
pyhanlp/util.py
|
https://github.com/hankcs/pyhanlp/blob/master/pyhanlp/util.py
|
Apache-2.0
|
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
|
Convert a string (bytestring in `encoding` or unicode), to unicode.
|
any2unicode
|
python
|
hankcs/pyhanlp
|
pyhanlp/util.py
|
https://github.com/hankcs/pyhanlp/blob/master/pyhanlp/util.py
|
Apache-2.0
|
def newline(p1, p2, color=None, marker=None):
"""
https://stackoverflow.com/questions/36470343/how-to-draw-a-line-with-matplotlib
:param p1:
:param p2:
:return:
"""
ax = plt.gca()
xmin, xmax = ax.get_xbound()
if (p2[0] == p1[0]):
xmin = xmax = p1[0]
ymin, ymax = ax.get_ybound()
else:
ymax = p1[1] + (p2[1] - p1[1]) / (p2[0] - p1[0]) * (xmax - p1[0])
ymin = p1[1] + (p2[1] - p1[1]) / (p2[0] - p1[0]) * (xmin - p1[0])
l = mlines.Line2D([xmin, xmax], [ymin, ymax], color=color, marker=marker)
ax.add_line(l)
return l
| ERROR: type should be string, got "\n https://stackoverflow.com/questions/36470343/how-to-draw-a-line-with-matplotlib\n :param p1:\n :param p2:\n :return:\n " |
newline
|
python
|
hankcs/pyhanlp
|
tests/book/ch05/plot_name.py
|
https://github.com/hankcs/pyhanlp/blob/master/tests/book/ch05/plot_name.py
|
Apache-2.0
|
def estimate_mfu(self, fwdbwd_per_iter, dt):
""" estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
# first estimate the number of flops we do per iteration.
# see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
N = sum(p.numel() for p in self.parameters())
cfg = self.params
L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim//cfg.n_heads, cfg.max_seq_len
flops_per_token = 6*N + 12*L*H*Q*T
flops_per_fwdbwd = flops_per_token * T
flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
# express our flops throughput as ratio of A100 bfloat16 peak flops
flops_achieved = flops_per_iter * (1.0/dt) # per second
flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
mfu = flops_achieved / flops_promised
return mfu
|
estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS
|
estimate_mfu
|
python
|
DLLXW/baby-llama2-chinese
|
model.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/model.py
|
MIT
|
def generate(self, idx, eos, max_new_tokens, temperature=1.0, top_k=None):
"""
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
Also note this is a super inefficient version of sampling with no key/value cache.
"""
for _ in range(max_new_tokens):
# if the sequence context is growing too long we must crop it at block_size
idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]
# forward the model to get the logits for the index in the sequence
logits = self(idx_cond)
logits = logits[:, -1, :] # crop to just the final time step
if temperature == 0.0:
# "sample" the single most likely index
_, idx_next = torch.topk(logits, k=1, dim=-1)
else:
# pluck the logits at the final step and scale by desired temperature
logits = logits / temperature
# optionally crop the logits to only the top k options
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
logits[logits < v[:, [-1]]] = -float('Inf')
# apply softmax to convert logits to (normalized) probabilities
probs = F.softmax(logits, dim=-1)
idx_next = torch.multinomial(probs, num_samples=1)
# append sampled index to the running sequence and continue
idx = torch.cat((idx, idx_next), dim=1)
if idx_next==eos:
break
return idx
|
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
Also note this is a super inefficient version of sampling with no key/value cache.
|
generate
|
python
|
DLLXW/baby-llama2-chinese
|
model.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/model.py
|
MIT
|
def export(self, filepath='model.bin'):
"""export the model weights in fp32 into .bin file to be read from C"""
f = open(filepath, 'wb')
def serialize(t):
d = t.detach().cpu().view(-1).numpy().astype(np.float32)
b = struct.pack(f'{len(d)}f', *d)
f.write(b)
# first write out the header
hidden_dim = self.layers[0].feed_forward.w1.weight.shape[0]
p = self.params
n_kv_heads = p.n_heads if p.n_kv_heads is None else p.n_kv_heads
header = struct.pack('iiiiiii', p.dim, hidden_dim, p.n_layers, p.n_heads,
n_kv_heads, p.vocab_size, p.max_seq_len)
f.write(header)
# next write out the embedding weights
serialize(self.tok_embeddings.weight)
# now all the layers
# attention weights
for layer in self.layers:
serialize(layer.attention_norm.weight)
for layer in self.layers:
serialize(layer.attention.wq.weight)
for layer in self.layers:
serialize(layer.attention.wk.weight)
for layer in self.layers:
serialize(layer.attention.wv.weight)
for layer in self.layers:
serialize(layer.attention.wo.weight)
# ffn weights
for layer in self.layers:
serialize(layer.ffn_norm.weight)
for layer in self.layers:
serialize(layer.feed_forward.w1.weight)
for layer in self.layers:
serialize(layer.feed_forward.w2.weight)
for layer in self.layers:
serialize(layer.feed_forward.w3.weight)
# final rmsnorm
serialize(self.norm.weight)
# note: no need to write final classifier weights due to weight sharing
# freqs_cis
serialize(self.freqs_cos[:p.max_seq_len])
serialize(self.freqs_sin[:p.max_seq_len])
# write to binary file
f.close()
print(f"wrote {filepath}")
|
export the model weights in fp32 into .bin file to be read from C
|
export
|
python
|
DLLXW/baby-llama2-chinese
|
model.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/model.py
|
MIT
|
def convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
if token in self.special_tokens:
return self.special_tokens[token]
return self.sp_model.PieceToId(token)
|
Converts a token (str) in an id using the vocab.
|
convert_token_to_id
|
python
|
DLLXW/baby-llama2-chinese
|
chatglm_tokenizer/tokenization_chatglm.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py
|
MIT
|
def convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.index_special_tokens or index in [self.eos_id, self.bos_id, self.pad_id] or index < 0:
return ""
return self.sp_model.IdToPiece(index)
|
Converts an index (integer) in a token (str) using the vocab.
|
convert_id_to_token
|
python
|
DLLXW/baby-llama2-chinese
|
chatglm_tokenizer/tokenization_chatglm.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py
|
MIT
|
def save_vocabulary(self, save_directory, filename_prefix=None):
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, self.vocab_files_names["vocab_file"]
)
else:
vocab_file = save_directory
with open(self.vocab_file, 'rb') as fin:
proto_str = fin.read()
with open(vocab_file, "wb") as writer:
writer.write(proto_str)
return (vocab_file,)
|
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
|
save_vocabulary
|
python
|
DLLXW/baby-llama2-chinese
|
chatglm_tokenizer/tokenization_chatglm.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py
|
MIT
|
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
prefix_tokens = self.get_prefix_tokens()
token_ids_0 = prefix_tokens + token_ids_0
if token_ids_1 is not None:
token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")]
return token_ids_0
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
build_inputs_with_special_tokens
|
python
|
DLLXW/baby-llama2-chinese
|
chatglm_tokenizer/tokenization_chatglm.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py
|
MIT
|
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
assert self.padding_side == "left"
required_input = encoded_inputs[self.model_input_names[0]]
seq_length = len(required_input)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * seq_length
if "position_ids" not in encoded_inputs:
encoded_inputs["position_ids"] = list(range(seq_length))
if needs_to_be_padded:
difference = max_length - len(required_input)
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "position_ids" in encoded_inputs:
encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
return encoded_inputs
|
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
|
_pad
|
python
|
DLLXW/baby-llama2-chinese
|
chatglm_tokenizer/tokenization_chatglm.py
|
https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py
|
MIT
|
def fill_homoglyphs():
"""
Use http://dev.networkerror.org/utf8/?start=0&end=255&cols=10&show_uni_hex=on
with the stupid table width forced to auto.
This dataset is for ASCII characters mapped to UTF-8 homoglyphs (some approximate).
Some of the entries are also selected from the results of search(), below.
Forward entries should stand a reasonable chance of rendering to look like their ASCII equivalent on most common
fonts and systems.
Reverse entries should exist for anything that could possibly be confused with an ASCII char, even if it doesn't
render on some fonts and systems.
If a character is deemed unprintable on some systems, don't delete it - move it from the fwd string to rev.
"""
from collections import namedtuple
Hgs = namedtuple('Hgs', ('ascii', 'fwd', 'rev'))
all_hgs.extend(Hgs(*t) for t in (
(' ', u'\u00A0\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F', u'\u3000'),
('!', u'\uFF01\u01C3\u2D51\uFE15\uFE57', u'\u119D'),
('"', u'\uFF02', u'\u030E\u17C9'),
('#', u'\uFF03\uFE5F', u''),
('$', u'\uFF04\uFE69', u''),
('%', u'\uFF05\u066A\u2052\uFE6A', u''),
('&', u'\uFF06\uFE60', u''),
("'", u'\uFF07\u02B9\u0374', u'\u030D'),
('(', u'\uFF08\uFE59', u'\u207D\u208D\u27EE'),
(')', u'\uFF09\uFE5A', u'\u207E\u208E\u27EF'),
('*', u'\uFF0A\u22C6\uFE61', u''),
('+', u'\uFF0B\u16ED\uFE62', u'\u207A\u208A'),
(',', u'\uFF0C\u02CF\u16E7\u201A', u'\uFE10\uFE50\u0317\u0326'),
('-', u'\uFF0D\u02D7\u2212\u23BC\u2574\uFE63',
u'\u207B\u208B\u0335\u1680\u174D\u1806\u1C7C\u23AF\u2CBB\u30FC\u3127'),
('.', u'\uFF0E\u2024', u'\uFE52\u0323'),
('/', u'\uFF0F\u1735\u2044\u2215\u29F8', u'\u0338\u2CC6\u3033'),
('0', u'', u'\u2070\u2080\u24EA\uFF10\u1C50'),
('1', u'', u'\u00B9\u2081\u2460\uFF11'),
('2', u'\u14BF', u'\u00B2\u2082\u2461\uFF12'),
('3', u'\u01B7\u2128', u'\u00B3\u2083\u2462\uFF13\u1883\u2CC4\u2CCC\u2CCD'),
('4', u'\u13CE', u'\u2074\u2084\u2463\uFF14'),
('5', u'', u'\u2075\u2085\u2464\uFF15'),
('6', u'\u13EE', u'\u2076\u2086\u2465\uFF16'),
('7', u'', u'\u2077\u2087\u2466\uFF17'),
('8', u'', u'\u2078\u2088\u2467\uFF18'),
('9', u'\u13ED', u'\u2079\u2089\u2468\uFF19'),
(':', u'\uFF1A\u02D0\u02F8\u0589\u1361\u16EC\u205A\u2236\u2806\uFE13\uFE55',
u'\u05C3\u17C7\u17C8\u1804\u1C7A\uFE30'),
(';', u'\uFF1B\u037E\uFE14\uFE54', u''),
('<', u'\uFF1C\u02C2\u2039\u227A\u276E\u2D66\uFE64', u'\u3031\u3111'),
('=', u'\uFF1D\u2550\u268C\uFE66', u'\u207C\u208C\u30A0'),
('>', u'\uFF1E\u02C3\u203A\u227B\u276F\uFE65', u''),
('?', u'\uFF1F\uFE16\uFE56', u''),
('@', u'\uFF20\uFE6B', u''),
('A', u'\u0391\u0410\u13AA', u'\u1D2C\u24B6\uFF21'),
('B', u'\u0392\u0412\u13F4\u15F7\u2C82', u'\u1D2E\u212C\u24B7\uFF22'),
('C', u'\u03F9\u0421\u13DF\u216D\u2CA4', u'\u2102\u212D\u24B8\uFF23'),
('D', u'\u13A0\u15EA\u216E', u'\u1D30\u2145\u24B9\uFF24'),
('E', u'\u0395\u0415\u13AC', u'\u1D31\u2130\u24BA\uFF25'),
('F', u'\u15B4', u'\u2131\u24BB\uFF26'),
('G', u'\u050C\u13C0', u'\u1D33\u24BC\uFF27'),
('H', u'\u0397\u041D\u12D8\u13BB\u157C\u2C8E', u'\u1D34\u210B\u210C\u210D\u24BD\uFF28'),
('I', u'\u0399\u0406\u2160', u'\u1D35\u2110\u2111\u24BE\uFF29'),
('J', u'\u0408\u13AB\u148D', u'\u1D36\u24BF\uFF2A'),
('K', u'\u039A\u13E6\u16D5\u212A\u2C94', u'\u1D37\u24C0\uFF2B'),
('L', u'\u13DE\u14AA\u216C', u'\u1D38\u2112\u24C1\uFF2C\u2CD0\u31C4'),
('M', u'\u039C\u03FA\u041C\u13B7\u216F', u'\u1D39\u2133\u24C2\uFF2D'),
('N', u'\u039D\u2C9A', u'\u1D3A\u2115\u24C3\uFF2E'),
('O', u'\u039F\u041E\u2C9E', u'\u1D3C\u24C4\uFF2F\u1C5B'),
('P', u'\u03A1\u0420\u13E2\u2CA2', u'\u1D3E\u2119\u24C5\uFF30'),
('Q', u'\u051A\u2D55', u'\u211A\u24C6\uFF31\u10B3'),
('R', u'\u13A1\u13D2\u1587', u'\u1D3F\u211B\u211C\u211D\u24C7\uFF32'),
('S', u'\u0405\u13DA', u'\u24C8\uFF33\u10BD'),
('T', u'\u03A4\u0422\u13A2', u'\u1D40\u24C9\uFF34'),
('U', u'', u'\u1D41\u24CA\uFF35'),
('V', u'\u13D9\u2164', u'\u24CB\uFF36'),
('W', u'\u13B3\u13D4', u'\u1D42\u24CC\uFF37'),
('X', u'\u03A7\u0425\u2169\u2CAC', u'\u24CD\uFF38'),
('Y', u'\u03A5\u2CA8', u'\u03D2\u24CE\uFF39'),
('Z', u'\u0396\u13C3', u'\u2124\u24CF\uFF3A'),
('[', u'\uFF3B', u''),
('\\', u'\uFF3C\u2216\u29F5\u29F9\uFE68', u'\u3035'),
(']', u'\uFF3D', u''),
('^', u'\uFF3E\u02C4\u02C6\u1DBA\u2303', u'\u0302'),
('_', u'\uFF3F\u02CD\u268A', u'\u0331\u0320\uFE58'),
('`', u'\uFF40\u02CB\u1FEF\u2035', u'\u0300'),
('a', u'\u0251\u0430', u'\u00AA\u1D43\u1D45\u2090\u24D0\uFF41'),
('b', u'', u'\u1D47\u24D1\uFF42'),
('c', u'\u03F2\u0441\u217D', u'\u1D9C\u24D2\uFF43'),
('d', u'\u0501\u217E', u'\u1D48\u2146\u24D3\uFF44'),
('e', u'\u0435\u1971', u'\u1D49\u2091\u212F\u2147\u24D4\uFF45\u19C9'),
('f', u'', u'\u1DA0\u24D5\uFF46'),
('g', u'\u0261', u'\u1D4D\u1DA2\u210A\u24D6\uFF47'),
('h', u'\u04BB', u'\u02B0\u210E\u24D7\uFF48'),
('i', u'\u0456\u2170', u'\u1D62\u2071\u2139\u2148\u24D8\uFF49'),
('j', u'\u03F3\u0458', u'\u02B2\u2149\u24D9\u2C7C\uFF4A'),
('k', u'', u'\u1D4F\u24DA\uFF4B'),
('l', u'\u217C', u'\u02E1\u2113\u24DB\uFF4C'),
('m', u'\u217F', u'\u1D50\u24DC\uFF4D'),
('n', u'\u1952', u'\u207F\u24DD\uFF4E'),
('o', u'\u03BF\u043E\u0D20\u2C9F', u'\u00BA\u1D52\u2092\u2134\u24DE\uFF4F\u0CE6\u0D66\u199E\u19D0'),
('p', u'\u0440\u2CA3', u'\u1D56\u24DF\uFF50'),
('q', u'', u'\u24E0\uFF51'),
('r', u'', u'\u02B3\u1D63\u24E1\uFF52'),
('s', u'\u0455', u'\u02E2\u24E2\uFF53'),
('t', u'', u'\u1D57\u24E3\uFF54'),
('u', u'\u1959\u222A', u'\u1D58\u1D64\u24E4\uFF55'),
('v', u'\u1D20\u2174\u2228\u22C1', u'\u1D5B\u1D65\u24E5\u2C7D\uFF56'),
('w', u'\u1D21', u'\u02B7\u24E6\uFF57'),
('x', u'\u0445\u2179\u2CAD', u'\u02E3\u2093\u24E7\uFF58'),
('y', u'\u0443\u1EFF', u'\u02B8\u24E8\uFF59'),
('z', u'\u1D22', u'\u1DBB\u24E9\uFF5A\u1901'),
('{', u'\uFF5B\uFE5B', u''),
('|', u'\uFF5C\u01C0\u16C1\u239C\u239F\u23A2\u23A5\u23AA\u23AE\uFFE8',
u'\uFE33\u0846\u1175\u20D2\u2F01\u3021\u4E28\uFE31'),
('}', u'\uFF5D\uFE5C', u''),
('~', u'\uFF5E\u02DC\u2053\u223C', u'\u301C')
))
hg_index.update({c: hgs for hgs in all_hgs for c in hgs.ascii + hgs.fwd + hgs.rev})
|
Use http://dev.networkerror.org/utf8/?start=0&end=255&cols=10&show_uni_hex=on
with the stupid table width forced to auto.
This dataset is for ASCII characters mapped to UTF-8 homoglyphs (some approximate).
Some of the entries are also selected from the results of search(), below.
Forward entries should stand a reasonable chance of rendering to look like their ASCII equivalent on most common
fonts and systems.
Reverse entries should exist for anything that could possibly be confused with an ASCII char, even if it doesn't
render on some fonts and systems.
If a character is deemed unprintable on some systems, don't delete it - move it from the fwd string to rev.
|
fill_homoglyphs
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def get_writer():
"""
:return: A codec writer for stdout. Necessary for output piping to work.
"""
from codecs import getwriter
from sys import stdout
if version_info >= (3,):
return stdout
return getwriter(stdout.encoding or 'utf-8')(stdout)
|
:return: A codec writer for stdout. Necessary for output piping to work.
|
get_writer
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def listing():
"""
Show a list of all known homoglyphs
"""
out = get_writer()
for hgs in all_hgs:
out.write(hgs.ascii + ':')
if hgs.fwd:
out.write(' fwd ')
for c in hgs.fwd:
out.write(field + c)
out.write(field)
if hgs.rev:
out.write(' rev ')
for c in hgs.rev:
out.write(field + c)
out.write(field)
out.write('\n')
|
Show a list of all known homoglyphs
|
listing
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def explain(char):
"""
Show an explanation of all known homoglyphs for the given ASCII char
:param char: An ASCII char to explain
"""
if char not in hg_index:
print('No homoglyphs.')
return
try:
import unicodedata
except ImportError:
print('Install docutils.')
return
out = get_writer()
out.write('Char\t%6s %20s %11s Cat Name\n' % ('Point', 'Normal', 'Rev/fwd/asc'))
hgs = hg_index[char]
for hg in hgs.ascii + hgs.fwd + hgs.rev:
norms = ''
for form in ('NFC', 'NFKC', 'NFD', 'NFKD'):
h = unicodedata.normalize(form, hg)
if h == hgs.ascii:
if norms:
norms += ' '
norms += form
fwd_rev = ''
if hg not in hgs.rev:
fwd_rev += 'F'
if hg in hgs.ascii:
fwd_rev += 'A'
else:
fwd_rev += 'R'
out.write(' %(field)c%(hg)c%(field)c\t'
'U+%(point)04X %(norms)20s %(fwdrev)11s %(cat)3s %(name)s\n' % {
'field': field,
'hg': hg,
'point': ord(hg),
'norms': norms,
'fwdrev': fwd_rev,
'cat': unicodedata.category(hg),
'name': unicodedata.name(hg, '<unnamed>')
})
|
Show an explanation of all known homoglyphs for the given ASCII char
:param char: An ASCII char to explain
|
explain
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def search():
"""
(Not useful to the user) Troll the unicode DB for normalization matches, which are potentially homoglyphs.
"""
try:
import unicodedata
except ImportError:
print('Install docutils.')
return
out = get_writer()
for point in xrange(ord('~') + 1, 0x10000):
u = unichr(point)
for form in ('NFC', 'NFKC', 'NFD', 'NFKD'):
if u in hg_index:
continue
h = unicodedata.normalize(form, u)
if len(h) == 1 and ord(h) != ord(u) and h in hg_index:
out.write('%(ascii)c %(form)s-> %(hg)c\tU+%(point)04X %(cat)s/%(name)s\n' % {
'ascii': h,
'form': form,
'hg': u,
'point': ord(u),
'cat': unicodedata.category(u),
'name': unicodedata.name(u, '<unnamed>')
})
break
|
(Not useful to the user) Troll the unicode DB for normalization matches, which are potentially homoglyphs.
|
search
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def pipe(replace):
"""
Pipe from input to output
End with ctrl+C or EOF
:param replace: A function to replace each char
"""
out = get_writer()
# "for line in stdin" works for piped input but not keyboard input
while True:
try:
line = read_line()
except EOFError:
return
for c in line:
out.write(replace(c))
out.write('\n')
|
Pipe from input to output
End with ctrl+C or EOF
:param replace: A function to replace each char
|
pipe
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def pipe_mimic(hardness):
"""
Pipe from input to output, replacing chars with homoglyphs
:param hardness: Percent probability to replace a char
"""
from itertools import chain
from random import random, randrange
def replace(c):
if random() > hardness / 100. or c not in hg_index:
return c
hms = hg_index[c]
# hms contains the current character. We've already decided, above, that this character should be replaced, so
# we need to try and avoid that. Loop through starting at a random index.
fwd = hms.ascii + hms.fwd
start = randrange(len(fwd))
for index in chain(xrange(start, len(fwd)), xrange(0, start)):
if fwd[index] != c:
return fwd[index]
return c
pipe(replace)
|
Pipe from input to output, replacing chars with homoglyphs
:param hardness: Percent probability to replace a char
|
pipe_mimic
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def replace_check(c):
"""
Replace non-ASCII chars with their code point
"""
if ord(c) <= ord('~'):
return c
return '<%(orig)c:U+%(point)04X>' % {
'orig': c,
'point': ord(c)
}
|
Replace non-ASCII chars with their code point
|
replace_check
|
python
|
reinderien/mimic
|
mimic/__init__.py
|
https://github.com/reinderien/mimic/blob/master/mimic/__init__.py
|
MIT
|
def build(class_cfg):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
ag_type = class_cfg.WhichOneof('anchor_generator')
if ag_type == 'anchor_generator_stride':
config = class_cfg.anchor_generator_stride
ag = AnchorGeneratorStride(
sizes=list(config.sizes),
anchor_strides=list(config.strides),
anchor_offsets=list(config.offsets),
rotations=list(config.rotations),
match_threshold=class_cfg.matched_threshold,
unmatch_threshold=class_cfg.unmatched_threshold,
class_name=class_cfg.class_name,
custom_values=list(config.custom_values))
return ag
elif ag_type == 'anchor_generator_range':
config = class_cfg.anchor_generator_range
ag = AnchorGeneratorRange(
sizes=list(config.sizes),
anchor_ranges=list(config.anchor_ranges),
rotations=list(config.rotations),
match_threshold=class_cfg.matched_threshold,
unmatch_threshold=class_cfg.unmatched_threshold,
class_name=class_cfg.class_name,
custom_values=list(config.custom_values))
return ag
elif ag_type == 'no_anchor':
return None
else:
raise ValueError(" unknown anchor generator type")
|
Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
|
build
|
python
|
traveller59/second.pytorch
|
second/builder/anchor_generator_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/builder/anchor_generator_builder.py
|
MIT
|
def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner,
multi_gpu=False):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
prep_cfg = input_reader_config.preprocess
dataset_cfg = input_reader_config.dataset
num_point_features = model_config.num_point_features
out_size_factor = get_downsample_factor(model_config)
assert out_size_factor > 0
cfg = input_reader_config
db_sampler_cfg = prep_cfg.database_sampler
db_sampler = None
if len(db_sampler_cfg.sample_groups) > 0 or db_sampler_cfg.database_info_path != "": # enable sample
db_sampler = dbsampler_builder.build(db_sampler_cfg)
grid_size = voxel_generator.grid_size
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
print("feature_map_size", feature_map_size)
assert all([n != '' for n in target_assigner.classes]), "you must specify class_name in anchor_generators."
dataset_cls = get_dataset_class(dataset_cfg.dataset_class_name)
assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
prep_func = partial(
prep_pointcloud,
root_path=dataset_cfg.kitti_root_path,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
training=training,
max_voxels=prep_cfg.max_number_of_voxels,
remove_outside_points=False,
remove_unknown=prep_cfg.remove_unknown_examples,
create_targets=training,
shuffle_points=prep_cfg.shuffle_points,
gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
global_random_rot_range=list(
prep_cfg.global_random_rotation_range_per_object),
global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
db_sampler=db_sampler,
num_point_features=dataset_cls.NumPointFeatures,
anchor_area_threshold=prep_cfg.anchor_area_threshold,
gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
remove_points_after_sample=prep_cfg.remove_points_after_sample,
remove_environment=prep_cfg.remove_environment,
use_group_id=prep_cfg.use_group_id,
out_size_factor=out_size_factor,
multi_gpu=multi_gpu,
min_points_in_gt=prep_cfg.min_num_of_points_in_gt,
random_flip_x=prep_cfg.random_flip_x,
random_flip_y=prep_cfg.random_flip_y,
sample_importance=prep_cfg.sample_importance)
ret = target_assigner.generate_anchors(feature_map_size)
class_names = target_assigner.classes
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors_list = []
for k, v in anchors_dict.items():
anchors_list.append(v["anchors"])
# anchors = ret["anchors"]
anchors = np.concatenate(anchors_list, axis=0)
anchors = anchors.reshape([-1, target_assigner.box_ndim])
assert np.allclose(anchors, ret["anchors"].reshape(-1, target_assigner.box_ndim))
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
"anchors_dict": anchors_dict,
}
prep_func = partial(prep_func, anchor_cache=anchor_cache)
dataset = dataset_cls(
info_path=dataset_cfg.kitti_info_path,
root_path=dataset_cfg.kitti_root_path,
class_names=class_names,
prep_func=prep_func)
return dataset
|
Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
|
build
|
python
|
traveller59/second.pytorch
|
second/builder/dataset_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/builder/dataset_builder.py
|
MIT
|
def build(similarity_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
similarity_type = similarity_config.WhichOneof('region_similarity')
if similarity_type == 'rotate_iou_similarity':
return region_similarity.RotateIouSimilarity()
elif similarity_type == 'nearest_iou_similarity':
return region_similarity.NearestIouSimilarity()
elif similarity_type == 'distance_similarity':
cfg = similarity_config.distance_similarity
return region_similarity.DistanceSimilarity(
distance_norm=cfg.distance_norm,
with_rotation=cfg.with_rotation,
rotation_alpha=cfg.rotation_alpha)
else:
raise ValueError("unknown similarity type")
|
Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
|
build
|
python
|
traveller59/second.pytorch
|
second/builder/similarity_calculator_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/builder/similarity_calculator_builder.py
|
MIT
|
def build(target_assigner_config, bv_range, box_coder):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(target_assigner_config, (target_pb2.TargetAssigner)):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
classes_cfg = target_assigner_config.class_settings
anchor_generators = []
classes = []
feature_map_sizes = []
for class_setting in classes_cfg:
anchor_generator = anchor_generator_builder.build(class_setting)
if anchor_generator is not None:
anchor_generators.append(anchor_generator)
else:
assert target_assigner_config.assign_per_class is False
classes.append(class_setting.class_name)
feature_map_sizes.append(class_setting.feature_map_size)
similarity_calcs = []
for class_setting in classes_cfg:
similarity_calcs.append(similarity_calculator_builder.build(
class_setting.region_similarity_calculator))
positive_fraction = target_assigner_config.sample_positive_fraction
if positive_fraction < 0:
positive_fraction = None
target_assigner = TargetAssigner(
box_coder=box_coder,
anchor_generators=anchor_generators,
feature_map_sizes=feature_map_sizes,
positive_fraction=positive_fraction,
sample_size=target_assigner_config.sample_size,
region_similarity_calculators=similarity_calcs,
classes=classes,
assign_per_class=target_assigner_config.assign_per_class)
return target_assigner
|
Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
|
build
|
python
|
traveller59/second.pytorch
|
second/builder/target_assigner_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/builder/target_assigner_builder.py
|
MIT
|
def build(voxel_config):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(voxel_config, (voxel_generator_pb2.VoxelGenerator)):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
voxel_generator = VoxelGeneratorV2(
voxel_size=list(voxel_config.voxel_size),
point_cloud_range=list(voxel_config.point_cloud_range),
max_num_points=voxel_config.max_number_of_points_per_voxel,
max_voxels=20000,
full_mean=voxel_config.full_empty_part_with_mean,
block_filtering=voxel_config.block_filtering,
block_factor=voxel_config.block_factor,
block_size=voxel_config.block_size,
height_threshold=voxel_config.height_threshold)
return voxel_generator
|
Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
|
build
|
python
|
traveller59/second.pytorch
|
second/builder/voxel_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/builder/voxel_builder.py
|
MIT
|
def second_box_encode(boxes,
anchors,
encode_angle_to_vector=False,
smooth_dim=False,
cylindrical=False):
"""box encode for VoxelNet in lidar
Args:
boxes ([N, 7 + ?] Tensor): normal boxes: x, y, z, w, l, h, r, custom values
anchors ([N, 7] Tensor): anchors
"""
# need to convert boxes to z-center format
box_ndim = anchors.shape[-1]
cas, cgs = [], []
if box_ndim > 7:
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=1)
xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=1)
else:
xa, ya, za, wa, la, ha, ra = np.split(anchors, box_ndim, axis=1)
xg, yg, zg, wg, lg, hg, rg = np.split(boxes, box_ndim, axis=1)
diagonal = np.sqrt(la**2 + wa**2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha # 1.6
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
ht = hg / ha - 1
else:
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
if encode_angle_to_vector:
rgx = np.cos(rg)
rgy = np.sin(rg)
rax = np.cos(ra)
ray = np.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return np.concatenate([xt, yt, zt, wt, lt, ht, rtx, rty, *cts], axis=1)
else:
rt = rg - ra
return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=1)
|
box encode for VoxelNet in lidar
Args:
boxes ([N, 7 + ?] Tensor): normal boxes: x, y, z, w, l, h, r, custom values
anchors ([N, 7] Tensor): anchors
|
second_box_encode
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def second_box_decode(box_encodings,
anchors,
encode_angle_to_vector=False,
smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
# need to convert box_encodings to z-bottom format
box_ndim = anchors.shape[-1]
cas, cts = [], []
if box_ndim > 7:
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty, *cts = np.split(box_encodings, box_ndim + 1, axis=-1)
else:
xt, yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1)
else:
xa, ya, za, wa, la, ha, ra = np.split(anchors, box_ndim, axis=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty = np.split(box_encodings, box_ndim + 1, axis=-1)
else:
xt, yt, zt, wt, lt, ht, rt = np.split(box_encodings, box_ndim, axis=-1)
diagonal = np.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
hg = (ht + 1) * ha
else:
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
hg = np.exp(ht) * ha
if encode_angle_to_vector:
rax = np.cos(ra)
ray = np.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = np.arctan2(rgy, rgx)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1)
|
box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
|
second_box_decode
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def bev_box_encode(boxes,
anchors,
encode_angle_to_vector=False,
smooth_dim=False):
"""box encode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
encode_angle_to_vector: bool. increase aos performance,
decrease other performance.
"""
# need to convert boxes to z-center format
xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)
xg, yg, wg, lg, rg = np.split(boxes, 5, axis=-1)
diagonal = np.sqrt(la**2 + wa**2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
else:
lt = np.log(lg / la)
wt = np.log(wg / wa)
if encode_angle_to_vector:
rgx = np.cos(rg)
rgy = np.sin(rg)
rax = np.cos(ra)
ray = np.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return np.concatenate([xt, yt, wt, lt, rtx, rty], axis=-1)
else:
rt = rg - ra
return np.concatenate([xt, yt, wt, lt, rt], axis=-1)
|
box encode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
encode_angle_to_vector: bool. increase aos performance,
decrease other performance.
|
bev_box_encode
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def bev_box_decode(box_encodings,
anchors,
encode_angle_to_vector=False,
smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
# need to convert box_encodings to z-bottom format
xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)
if encode_angle_to_vector:
xt, yt, wt, lt, rtx, rty = np.split(box_encodings, 6, axis=-1)
else:
xt, yt, wt, lt, rt = np.split(box_encodings, 5, axis=-1)
diagonal = np.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
else:
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
if encode_angle_to_vector:
rax = np.cos(ra)
ray = np.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = np.arctan2(rgy, rgx)
else:
rg = rt + ra
return np.concatenate([xg, yg, wg, lg, rg], axis=-1)
|
box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
|
bev_box_decode
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
corners_norm = np.stack(
np.unravel_index(np.arange(2**ndim), [2] * ndim),
axis=1).astype(dims.dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start with minimum point
# for 3d boxes, please draw lines by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape(
[1, 2**ndim, ndim])
return corners
|
generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
|
corners_nd
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
|
convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
|
rbbox2d_to_near_bbox
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])
return np.einsum('aij,jka->aik', points, rot_mat_T)
|
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
|
rotation_2d
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def rotation_box(box_corners, angle):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array([[rot_cos, -rot_sin], [rot_sin, rot_cos]],
dtype=box_corners.dtype)
return box_corners @ rot_mat_T
|
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
|
rotation_box
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def center_to_corner_box3d(centers,
dims,
angles=None,
origin=(0.5, 0.5, 0.5),
axis=2):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
if angles is not None:
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([-1, 1, 3])
return corners
|
convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
|
center_to_corner_box3d
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
|
convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
|
center_to_corner_box2d
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def create_anchors_3d_stride(feature_size,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = anchor_strides
x_offset, y_offset, z_offset = anchor_offsets
z_centers = np.arange(feature_size[0], dtype=dtype)
y_centers = np.arange(feature_size[1], dtype=dtype)
x_centers = np.arange(feature_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
rets = np.meshgrid(
x_centers, y_centers, z_centers, rotations, indexing='ij')
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5])
|
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
|
create_anchors_3d_stride
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def create_anchors_3d_range(feature_size,
anchor_range,
sizes=[1.6, 3.9, 1.56],
rotations=[0, np.pi / 2],
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
anchor_range = np.array(anchor_range, dtype)
z_centers = np.linspace(
anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype)
y_centers = np.linspace(
anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype)
x_centers = np.linspace(
anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype)
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
rets = np.meshgrid(
x_centers, y_centers, z_centers, rotations, indexing='ij')
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
res = np.transpose(ret, [2, 1, 0, 3, 4, 5])
return res
|
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
|
create_anchors_3d_range
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def iou_jit(boxes, query_boxes, eps=1.0):
"""calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) *
(query_boxes[k, 3] - query_boxes[k, 1] + eps))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
boxes[n, 0], query_boxes[k, 0]) + eps)
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
boxes[n, 1], query_boxes[k, 1]) + eps)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps) *
(boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih)
overlaps[n, k] = iw * ih / ua
return overlaps
|
calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
|
iou_jit
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def corner_to_surfaces_3d(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
surfaces = np.array([
[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],
[corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],
[corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],
[corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],
[corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],
[corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],
]).transpose([2, 0, 1, 3])
return surfaces
|
convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
|
corner_to_surfaces_3d
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array([
0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7
]).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
|
convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
|
corner_to_surfaces_3d_jit
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_centers = voxel_origins + voxel_size * 0.5
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3] - voxel_size * 0.5,
gt_boxes[:, 3:6] + voxel_size,
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
|
assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
|
assign_label_to_voxel
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
|
assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
|
assign_label_to_voxel_v3
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def image_box_region_area(img_cumsum, bbox):
"""check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
"""
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = ID - IB - IC + IA
return ret
|
check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
|
image_box_region_area
|
python
|
traveller59/second.pytorch
|
second/core/box_np_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py
|
MIT
|
def is_line_segment_intersection_jit(lines1, lines2):
"""check if line segments1 and line segments2 have cross point
Args:
lines1 (float, [N, 2, 2]): [description]
lines2 (float, [M, 2, 2]): [description]
Returns:
[type]: [description]
"""
# Return true if line segments AB and CD intersect
N = lines1.shape[0]
M = lines2.shape[0]
ret = np.zeros((N, M), dtype=np.bool_)
for i in range(N):
for j in range(M):
A = lines1[i, 0]
B = lines1[i, 1]
C = lines2[j, 0]
D = lines2[j, 1]
acd = (D[1] - A[1]) * (C[0] - A[0]) > (C[1] - A[1]) * (D[0] - A[0])
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0])
abd = (D[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (D[0] - A[0])
if abc != abd:
ret[i, j] = True
return ret
|
check if line segments1 and line segments2 have cross point
Args:
lines1 (float, [N, 2, 2]): [description]
lines2 (float, [M, 2, 2]): [description]
Returns:
[type]: [description]
|
is_line_segment_intersection_jit
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def points_in_convex_polygon_3d_jit_v1(points,
polygon_surfaces,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jit(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[i, j] = False
break
return ret
|
check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
|
points_in_convex_polygon_3d_jit_v1
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def points_in_convex_polygon_3d_jit(points,
polygon_surfaces,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jitv2(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces)
|
check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
|
points_in_convex_polygon_3d_jit
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def points_count_convex_polygon_3d_jit(points,
polygon_surfaces,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_polygon] array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jitv2(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_count_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces)
|
check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_polygon] array.
|
points_count_convex_polygon_3d_jit
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def _points_in_convex_polygon_3d_jit(points,
polygon_surfaces,
normal_vec, d,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[i, j] = False
break
return ret
|
check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
|
_points_in_convex_polygon_3d_jit
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def _points_count_convex_polygon_3d_jit(points,
polygon_surfaces,
normal_vec, d,
num_surfaces=None):
"""count points in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_polygon] array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
ret = np.full((num_polygons,), num_points, dtype=np.int64)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[j] -= 1
break
return ret
|
count points in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_polygon] array.
|
_points_count_convex_polygon_3d_jit
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def points_in_convex_polygon_jit(points, polygon, clockwise=True):
"""check points is in 2d convex polygons. True when point in polygon
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns:
[num_points, num_polygon] bool array.
"""
# first convert polygon to directed lines
num_points_of_polygon = polygon.shape[1]
num_points = points.shape[0]
num_polygons = polygon.shape[0]
if clockwise:
vec1 = polygon - polygon[:, [num_points_of_polygon - 1] +
list(range(num_points_of_polygon - 1)), :]
else:
vec1 = polygon[:, [num_points_of_polygon - 1] +
list(range(num_points_of_polygon - 1)), :] - polygon
# vec1: [num_polygon, num_points_of_polygon, 2]
ret = np.zeros((num_points, num_polygons), dtype=np.bool_)
success = True
cross = 0.0
for i in range(num_points):
for j in range(num_polygons):
success = True
for k in range(num_points_of_polygon):
cross = vec1[j, k, 1] * (polygon[j, k, 0] - points[i, 0])
cross -= vec1[j, k, 0] * (polygon[j, k, 1] - points[i, 1])
if cross >= 0:
success = False
break
ret[i, j] = success
return ret
|
check points is in 2d convex polygons. True when point in polygon
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns:
[num_points, num_polygon] bool array.
|
points_in_convex_polygon_jit
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def points_in_convex_polygon(points, polygon, clockwise=True):
"""check points is in convex polygons. may run 2x faster when write in
cython(don't need to calculate all cross-product between edge and point)
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns:
[num_points, num_polygon] bool array.
"""
# first convert polygon to directed lines
num_lines = polygon.shape[1]
polygon_next = polygon[:, [num_lines - 1] + list(range(num_lines - 1)), :]
if clockwise:
vec1 = (polygon - polygon_next)[np.newaxis, ...]
else:
vec1 = (polygon_next - polygon)[np.newaxis, ...]
vec2 = polygon[np.newaxis, ...] - points[:, np.newaxis, np.newaxis, :]
# [num_points, num_polygon, num_points_of_polygon, 2]
cross = np.cross(vec1, vec2)
return np.all(cross > 0, axis=2)
|
check points is in convex polygons. may run 2x faster when write in
cython(don't need to calculate all cross-product between edge and point)
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns:
[num_points, num_polygon] bool array.
|
points_in_convex_polygon
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def points_in_convex_polygon_3d_jit_v2(points,
surfaces,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
num_polygon = surfaces.shape[0]
max_num_surfaces = surfaces.shape[1]
num_points = points.shape[0]
normal_vec = np.zeros((num_polygon, max_num_surfaces, 3), dtype=surfaces.dtype)
d = np.zeros((num_polygon, max_num_surfaces), dtype=surfaces.dtype)
sv0 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
sv1 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
ret = np.ones((num_points, num_polygon), dtype=np.bool_)
for i in range(num_polygon):
for j in range(max_num_surfaces):
sv0[0] = surfaces[i, j, 0, 0] - surfaces[i, j, 1, 0]
sv0[1] = surfaces[i, j, 0, 1] - surfaces[i, j, 1, 1]
sv0[2] = surfaces[i, j, 0, 2] - surfaces[i, j, 1, 2]
sv1[0] = surfaces[i, j, 1, 0] - surfaces[i, j, 2, 0]
sv1[1] = surfaces[i, j, 1, 1] - surfaces[i, j, 2, 1]
sv1[2] = surfaces[i, j, 1, 2] - surfaces[i, j, 2, 2]
normal_vec[i, j, 0] = (sv0[1] * sv1[2] - sv0[2] * sv1[1])
normal_vec[i, j, 1] = (sv0[2] * sv1[0] - sv0[0] * sv1[2])
normal_vec[i, j, 2] = (sv0[0] * sv1[1] - sv0[1] * sv1[0])
d[i, j] = -surfaces[i, j, 0, 0] * normal_vec[i, j, 0] - \
surfaces[i, j, 0, 1] * normal_vec[i, j, 1] - \
surfaces[i, j, 0, 2] * normal_vec[i, j, 2]
sign = 0.0
for i in range(num_points):
for j in range(num_polygon):
for k in range(max_num_surfaces):
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[i, j] = False
break
return ret
|
check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
|
points_in_convex_polygon_3d_jit_v2
|
python
|
traveller59/second.pytorch
|
second/core/geometry.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py
|
MIT
|
def noise_per_object_v3_(gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=5,
group_ids=None):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7+?], gt box in lidar.points_transform_
points: [M, 3+], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [
-global_random_rot_range, global_random_rot_range
]
enable_grot = np.abs(global_random_rot_range[0] -
global_random_rot_range[1]) >= 1e-3
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [
center_noise_std, center_noise_std, center_noise_std
]
if all([c == 0 for c in center_noise_std]) and all([c == 0 for c in rotation_perturb]) and not enable_grot:
return
if valid_mask is None:
valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(
scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try])
if group_ids is not None:
if enable_grot:
set_group_noise_same_v2_(loc_noises, rot_noises, global_rot_noises,
group_ids)
else:
set_group_noise_same_(loc_noises, rot_noises, group_ids)
group_centers, group_id_num_dict = get_group_center(
gt_boxes[:, :3], group_ids)
if enable_grot:
group_transform_v2_(loc_noises, rot_noises, gt_boxes[:, :3],
gt_boxes[:, 6], group_centers,
global_rot_noises, valid_mask)
else:
group_transform_(loc_noises, rot_noises, gt_boxes[:, :3],
gt_boxes[:, 6], group_centers, valid_mask)
group_nums = np.array(list(group_id_num_dict.values()), dtype=np.int64)
origin = [0.5, 0.5, 0.5]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=origin,
axis=2)
if group_ids is not None:
if not enable_grot:
selected_noise = noise_per_box_group(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises,
rot_noises, group_nums)
else:
selected_noise = noise_per_box_group_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises,
rot_noises, group_nums, global_rot_noises)
else:
if not enable_grot:
selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises)
else:
selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises,
rot_noises, global_rot_noises)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
if points is not None:
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
rot_transforms, valid_mask)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
|
random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7+?], gt box in lidar.points_transform_
points: [M, 3+], point cloud in lidar.
|
noise_per_object_v3_
|
python
|
traveller59/second.pytorch
|
second/core/preprocess.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/preprocess.py
|
MIT
|
def noise_per_object_v2_(gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7+?], gt box in lidar.points_transform_
points: [M, 3+], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [
-global_random_rot_range, global_random_rot_range
]
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [
center_noise_std, center_noise_std, center_noise_std
]
if valid_mask is None:
valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(
scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try])
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=origin,
axis=2)
if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 1e-3:
selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises)
else:
selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises,
global_rot_noises)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
if points is not None:
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
rot_transforms, valid_mask)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
|
random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7+?], gt box in lidar.points_transform_
points: [M, 3+], point cloud in lidar.
|
noise_per_object_v2_
|
python
|
traveller59/second.pytorch
|
second/core/preprocess.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/preprocess.py
|
MIT
|
def global_translate_(gt_boxes, points, noise_translate_std):
"""
Apply global translation to gt_boxes and points.
"""
if not isinstance(noise_translate_std, (list, tuple, np.ndarray)):
noise_translate_std = np.array([noise_translate_std, noise_translate_std, noise_translate_std])
if all([e == 0 for e in noise_translate_std]):
return gt_boxes, points
noise_translate = np.array([np.random.normal(0, noise_translate_std[0], 1),
np.random.normal(0, noise_translate_std[1], 1),
np.random.normal(0, noise_translate_std[0], 1)]).T
points[:, :3] += noise_translate
gt_boxes[:, :3] += noise_translate
|
Apply global translation to gt_boxes and points.
|
global_translate_
|
python
|
traveller59/second.pytorch
|
second/core/preprocess.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/preprocess.py
|
MIT
|
def _compare(self, boxes1, boxes2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
boxes1_bv = box_np_ops.rbbox2d_to_near_bbox(boxes1)
boxes2_bv = box_np_ops.rbbox2d_to_near_bbox(boxes2)
ret = box_np_ops.iou_jit(boxes1_bv, boxes2_bv, eps=0.0)
return ret
|
Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
|
_compare
|
python
|
traveller59/second.pytorch
|
second/core/region_similarity.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/region_similarity.py
|
MIT
|
def _compare(self, boxes1, boxes2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return box_np_ops.distance_similarity(
boxes1[..., [0, 1, -1]],
boxes2[..., [0, 1, -1]],
dist_norm=self._distance_norm,
with_rotation=self._with_rotation,
rot_alpha=self._rotation_alpha)
|
Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
|
_compare
|
python
|
traveller59/second.pytorch
|
second/core/region_similarity.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/region_similarity.py
|
MIT
|
def assign_per_class(self,
anchors_dict,
gt_boxes,
anchors_mask=None,
gt_classes=None,
gt_names=None,
importance=None):
"""this function assign target individally for each class.
recommend for multi-class network.
"""
def box_encoding_fn(boxes, anchors):
return self._box_coder.encode(boxes, anchors)
targets_list = []
anchor_loc_idx = 0
anchor_gene_idx = 0
for class_name, anchor_dict in anchors_dict.items():
def similarity_fn(anchors, gt_boxes):
anchors_rbv = anchors[:, [0, 1, 3, 4, 6]]
gt_boxes_rbv = gt_boxes[:, [0, 1, 3, 4, 6]]
return self._sim_calcs[anchor_gene_idx].compare(
anchors_rbv, gt_boxes_rbv)
mask = np.array([c == class_name for c in gt_names],
dtype=np.bool_)
feature_map_size = anchor_dict["anchors"].shape[:3]
num_loc = anchor_dict["anchors"].shape[-2]
if anchors_mask is not None:
anchors_mask = anchors_mask.reshape(-1)
a_range = self.anchors_range(class_name)
anchors_mask_class = anchors_mask[a_range[0]:a_range[1]].reshape(-1)
prune_anchor_fn = lambda _: np.where(anchors_mask_class)[0]
else:
prune_anchor_fn = None
# print(f"num of {class_name}:", np.sum(mask))
targets = create_target_np(
anchor_dict["anchors"].reshape(-1, self.box_ndim),
gt_boxes[mask],
similarity_fn,
box_encoding_fn,
prune_anchor_fn=prune_anchor_fn,
gt_classes=gt_classes[mask],
matched_threshold=anchor_dict["matched_thresholds"],
unmatched_threshold=anchor_dict["unmatched_thresholds"],
positive_fraction=self._positive_fraction,
rpn_batch_size=self._sample_size,
norm_by_num_examples=False,
box_code_size=self.box_coder.code_size,
gt_importance=importance)
# print(f"num of positive:", np.sum(targets["labels"] == self.classes.index(class_name) + 1))
anchor_loc_idx += num_loc
targets_list.append(targets)
anchor_gene_idx += 1
targets_dict = {
"labels": [t["labels"] for t in targets_list],
"bbox_targets": [t["bbox_targets"] for t in targets_list],
"importance": [t["importance"] for t in targets_list],
}
targets_dict["bbox_targets"] = np.concatenate([
v.reshape(-1, self.box_coder.code_size)
for v in targets_dict["bbox_targets"]
],
axis=0)
targets_dict["bbox_targets"] = targets_dict["bbox_targets"].reshape(
-1, self.box_coder.code_size)
targets_dict["labels"] = np.concatenate(
[v.reshape(-1) for v in targets_dict["labels"]],
axis=0)
targets_dict["importance"] = np.concatenate(
[v.reshape(-1) for v in targets_dict["importance"]],
axis=0)
targets_dict["labels"] = targets_dict["labels"].reshape(-1)
targets_dict["importance"] = targets_dict["importance"].reshape(-1)
return targets_dict
|
this function assign target individally for each class.
recommend for multi-class network.
|
assign_per_class
|
python
|
traveller59/second.pytorch
|
second/core/target_assigner.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/target_assigner.py
|
MIT
|
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of
size count)"""
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=data.dtype)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[inds, :] = data
return ret
|
Unmap a subset of item (data) back to the original set of items (of
size count)
|
unmap
|
python
|
traveller59/second.pytorch
|
second/core/target_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/target_ops.py
|
MIT
|
def create_target_np(all_anchors,
gt_boxes,
similarity_fn,
box_encoding_fn,
prune_anchor_fn=None,
gt_classes=None,
matched_threshold=0.6,
unmatched_threshold=0.45,
bbox_inside_weight=None,
positive_fraction=None,
rpn_batch_size=300,
norm_by_num_examples=False,
gt_importance=None,
box_code_size=7):
"""Modified from FAIR detectron.
Args:
all_anchors: [num_of_anchors, box_ndim] float tensor.
gt_boxes: [num_gt_boxes, box_ndim] float tensor.
similarity_fn: a function, accept anchors and gt_boxes, return
similarity matrix(such as IoU).
box_encoding_fn: a function, accept gt_boxes and anchors, return
box encodings(offsets).
prune_anchor_fn: a function, accept anchors, return indices that
indicate valid anchors.
gt_classes: [num_gt_boxes] int tensor. indicate gt classes, must
start with 1.
matched_threshold: float, iou greater than matched_threshold will
be treated as positives.
unmatched_threshold: float, iou smaller than unmatched_threshold will
be treated as negatives.
bbox_inside_weight: unused
positive_fraction: [0-1] float or None. if not None, we will try to
keep ratio of pos/neg equal to positive_fraction when sample.
if there is not enough positives, it fills the rest with negatives
rpn_batch_size: int. sample size
norm_by_num_examples: bool. norm box_weight by number of examples, but
I recommend to do this outside.
gt_importance: 1d array. loss weight per gt.
Returns:
labels, bbox_targets, bbox_outside_weights
"""
total_anchors = all_anchors.shape[0]
if prune_anchor_fn is not None:
inds_inside = prune_anchor_fn(all_anchors)
anchors = all_anchors[inds_inside, :]
if not isinstance(matched_threshold, float):
matched_threshold = matched_threshold[inds_inside]
if not isinstance(unmatched_threshold, float):
unmatched_threshold = unmatched_threshold[inds_inside]
else:
anchors = all_anchors
inds_inside = None
num_inside = len(inds_inside) if inds_inside is not None else total_anchors
box_ndim = all_anchors.shape[1]
logger.debug('total_anchors: {}'.format(total_anchors))
logger.debug('inds_inside: {}'.format(num_inside))
logger.debug('anchors.shape: {}'.format(anchors.shape))
if gt_classes is None:
gt_classes = np.ones([gt_boxes.shape[0]], dtype=np.int32)
if gt_importance is None:
gt_importance = np.ones([gt_boxes.shape[0]], dtype=np.float32)
# Compute anchor labels:
# label=1 is positive, 0 is negative, -1 is don't care (ignore)
labels = np.empty((num_inside, ), dtype=np.int32)
gt_ids = np.empty((num_inside, ), dtype=np.int32)
labels.fill(-1)
gt_ids.fill(-1)
importance = np.empty((num_inside, ), dtype=np.float32)
importance.fill(1)
if len(gt_boxes) > 0:
# Compute overlaps between the anchors and the gt boxes overlaps
anchor_by_gt_overlap = similarity_fn(anchors, gt_boxes)
# Map from anchor to gt box that has highest overlap
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
# For each anchor, amount of overlap with most overlapping gt box
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),
anchor_to_gt_argmax] #
# Map from gt box to an anchor that has highest overlap
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
# For each gt box, amount of overlap with most overlapping anchor
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.
shape[1])]
# must remove gt which doesn't match any anchor.
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
"""
if not np.all(empty_gt_mask):
gt_to_anchor_max = gt_to_anchor_max[empty_gt_mask]
anchor_by_gt_overlap = anchor_by_gt_overlap[:, empty_gt_mask]
gt_classes = gt_classes[empty_gt_mask]
gt_boxes = gt_boxes[empty_gt_mask]
"""
# Find all anchors that share the max overlap amount
# (this includes many ties)
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max)[0]
# Fg label: for each gt use anchors with highest overlap
# (including ties)
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force
# Fg label: above threshold IOU
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds]
gt_ids[pos_inds] = gt_inds
bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]
importance[pos_inds] = gt_importance[gt_inds]
else:
# labels[:] = 0
bg_inds = np.arange(num_inside)
fg_inds = np.where(labels > 0)[0]
fg_max_overlap = None
if len(gt_boxes) > 0:
fg_max_overlap = anchor_to_gt_max[fg_inds]
gt_pos_ids = gt_ids[fg_inds]
# bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]
# bg_inds = np.where(labels == 0)[0]
# subsample positive labels if we have too many
if positive_fraction is not None:
num_fg = int(positive_fraction * rpn_batch_size)
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
fg_inds = np.where(labels > 0)[0]
# subsample negative labels if we have too many
# (samples with replacement, but since the set of bg inds is large most
# samples will not have repeats)
num_bg = rpn_batch_size - np.sum(labels > 0)
# print(num_fg, num_bg, len(bg_inds) )
if len(bg_inds) > num_bg:
enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]
labels[enable_inds] = 0
bg_inds = np.where(labels == 0)[0]
else:
if len(gt_boxes) == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
# re-enable anchors_with_max_overlap
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = np.zeros((num_inside, box_code_size),
dtype=all_anchors.dtype)
if len(gt_boxes) > 0:
# print(anchors[fg_inds, :].shape, gt_boxes[anchor_to_gt_argmax[fg_inds], :].shape)
# bbox_targets[fg_inds, :] = box_encoding_fn(
# anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :])
bbox_targets[fg_inds, :] = box_encoding_fn(
gt_boxes[anchor_to_gt_argmax[fg_inds], :], anchors[fg_inds, :])
# Bbox regression loss has the form:
# loss(x) = weight_outside * L(weight_inside * x)
# Inside weights allow us to set zero loss on an element-wise basis
# Bbox regression is only trained on positive examples so we set their
# weights to 1.0 (or otherwise if config is different) and 0 otherwise
# NOTE: we don't need bbox_inside_weights, remove it.
# bbox_inside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)
# bbox_inside_weights[labels == 1, :] = [1.0] * box_ndim
# The bbox regression loss only averages by the number of images in the
# mini-batch, whereas we need to average by the total number of example
# anchors selected
# Outside weights are used to scale each element-wise loss so the final
# average over the mini-batch is correct
# bbox_outside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)
bbox_outside_weights = np.zeros((num_inside, ), dtype=all_anchors.dtype)
# uniform weighting of examples (given non-uniform sampling)
if norm_by_num_examples:
num_examples = np.sum(labels >= 0) # neg + pos
num_examples = np.maximum(1.0, num_examples)
bbox_outside_weights[labels > 0] = 1.0 / num_examples
else:
bbox_outside_weights[labels > 0] = 1.0
# bbox_outside_weights[labels == 0, :] = 1.0 / num_examples
# Map up to original set of anchors
if inds_inside is not None:
labels = unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)
# bbox_inside_weights = unmap(
# bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = unmap(
bbox_outside_weights, total_anchors, inds_inside, fill=0)
importance = unmap(importance, total_anchors, inds_inside, fill=0)
# return labels, bbox_targets, bbox_outside_weights
ret = {
"labels": labels,
"bbox_targets": bbox_targets,
"bbox_outside_weights": bbox_outside_weights,
"assigned_anchors_overlap": fg_max_overlap,
"positive_gt_id": gt_pos_ids,
"importance": importance,
}
if inds_inside is not None:
ret["assigned_anchors_inds"] = inds_inside[fg_inds]
else:
ret["assigned_anchors_inds"] = fg_inds
return ret
|
Modified from FAIR detectron.
Args:
all_anchors: [num_of_anchors, box_ndim] float tensor.
gt_boxes: [num_gt_boxes, box_ndim] float tensor.
similarity_fn: a function, accept anchors and gt_boxes, return
similarity matrix(such as IoU).
box_encoding_fn: a function, accept gt_boxes and anchors, return
box encodings(offsets).
prune_anchor_fn: a function, accept anchors, return indices that
indicate valid anchors.
gt_classes: [num_gt_boxes] int tensor. indicate gt classes, must
start with 1.
matched_threshold: float, iou greater than matched_threshold will
be treated as positives.
unmatched_threshold: float, iou smaller than unmatched_threshold will
be treated as negatives.
bbox_inside_weight: unused
positive_fraction: [0-1] float or None. if not None, we will try to
keep ratio of pos/neg equal to positive_fraction when sample.
if there is not enough positives, it fills the rest with negatives
rpn_batch_size: int. sample size
norm_by_num_examples: bool. norm box_weight by number of examples, but
I recommend to do this outside.
gt_importance: 1d array. loss weight per gt.
Returns:
labels, bbox_targets, bbox_outside_weights
|
create_target_np
|
python
|
traveller59/second.pytorch
|
second/core/target_ops.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/target_ops.py
|
MIT
|
def nms_gpu(dets, nms_overlap_thresh, device_id=0):
"""nms in gpu.
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
boxes_num = dets.shape[0]
keep_out = np.zeros([boxes_num], dtype=np.int32)
scores = dets[:, 4]
order = scores.argsort()[::-1].astype(np.int32)
boxes_host = dets[order, :]
threadsPerBlock = 8 * 8
col_blocks = div_up(boxes_num, threadsPerBlock)
cuda.select_device(device_id)
mask_host = np.zeros((boxes_num * col_blocks, ), dtype=np.uint64)
blockspergrid = (div_up(boxes_num, threadsPerBlock),
div_up(boxes_num, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream)
mask_dev = cuda.to_device(mask_host, stream)
nms_kernel[blockspergrid, threadsPerBlock, stream](
boxes_num, nms_overlap_thresh, boxes_dev, mask_dev)
mask_dev.copy_to_host(mask_host, stream=stream)
# stream.synchronize()
num_out = nms_postprocess(keep_out, mask_host, boxes_num)
keep = keep_out[:num_out]
return list(order[keep])
|
nms in gpu.
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
|
nms_gpu
|
python
|
traveller59/second.pytorch
|
second/core/non_max_suppression/nms_gpu.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py
|
MIT
|
def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=0):
"""nms in gpu. WARNING: this function can provide right result
but its performance isn't be tested
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
dets = dets.astype(np.float32)
boxes_num = dets.shape[0]
keep_out = np.zeros([boxes_num], dtype=np.int32)
scores = dets[:, 5]
order = scores.argsort()[::-1].astype(np.int32)
boxes_host = dets[order, :]
threadsPerBlock = 8 * 8
col_blocks = div_up(boxes_num, threadsPerBlock)
cuda.select_device(device_id)
# mask_host shape: boxes_num * col_blocks * sizeof(np.uint64)
mask_host = np.zeros((boxes_num * col_blocks, ), dtype=np.uint64)
blockspergrid = (div_up(boxes_num, threadsPerBlock),
div_up(boxes_num, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream)
mask_dev = cuda.to_device(mask_host, stream)
rotate_nms_kernel[blockspergrid, threadsPerBlock, stream](
boxes_num, nms_overlap_thresh, boxes_dev, mask_dev)
mask_dev.copy_to_host(mask_host, stream=stream)
num_out = nms_postprocess(keep_out, mask_host, boxes_num)
keep = keep_out[:num_out]
return list(order[keep])
|
nms in gpu. WARNING: this function can provide right result
but its performance isn't be tested
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
|
rotate_nms_gpu
|
python
|
traveller59/second.pytorch
|
second/core/non_max_suppression/nms_gpu.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py
|
MIT
|
def rotate_iou_gpu(boxes, query_boxes, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
|
rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
|
rotate_iou_gpu
|
python
|
traveller59/second.pytorch
|
second/core/non_max_suppression/nms_gpu.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py
|
MIT
|
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 8x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
|
rotated box iou running in gpu. 8x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
|
rotate_iou_gpu_eval
|
python
|
traveller59/second.pytorch
|
second/core/non_max_suppression/nms_gpu.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py
|
MIT
|
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
|
Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
|
area
|
python
|
traveller59/second.pytorch
|
second/data/kitti_common.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py
|
MIT
|
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
|
Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
|
intersection
|
python
|
traveller59/second.pytorch
|
second/data/kitti_common.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py
|
MIT
|
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
|
Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
|
iou
|
python
|
traveller59/second.pytorch
|
second/data/kitti_common.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py
|
MIT
|
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
"""
KITTI annotation format version 2:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 4
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam: ...
P2: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
"""
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
info = {}
pc_info = {'num_features': 4}
calib_info = {}
image_info = {'image_idx': idx}
annotations = None
if velodyne:
pc_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['image_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['image_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['image_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
info["image"] = image_info
info["point_cloud"] = pc_info
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
calib_info['P0'] = P0
calib_info['P1'] = P1
calib_info['P2'] = P2
calib_info['P3'] = P3
calib_info['R0_rect'] = rect_4x4
calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam
calib_info['Tr_imu_to_velo'] = Tr_imu_to_velo
info["calib"] = calib_info
if annotations is not None:
info['annos'] = annotations
add_difficulty_to_annos(info)
return info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
|
KITTI annotation format version 2:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 4
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam: ...
P2: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
|
get_kitti_image_info
|
python
|
traveller59/second.pytorch
|
second/data/kitti_common.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py
|
MIT
|
def evaluation(self, detections, output_dir):
"""
detection
When you want to eval your own dataset, you MUST set correct
the z axis and box z center.
If you want to eval by my KITTI eval function, you must
provide the correct format annotations.
ground_truth_annotations format:
{
bbox: [N, 4], if you fill fake data, MUST HAVE >25 HEIGHT!!!!!!
alpha: [N], you can use -10 to ignore it.
occluded: [N], you can use zero.
truncated: [N], you can use zero.
name: [N]
location: [N, 3] center of 3d box.
dimensions: [N, 3] dim of 3d box.
rotation_y: [N] angle.
}
all fields must be filled, but some fields can fill
zero.
"""
if "annos" not in self._kitti_infos[0]:
return None
gt_annos = [info["annos"] for info in self._kitti_infos]
dt_annos = self.convert_detection_to_kitti_annos(detections)
# firstly convert standard detection to kitti-format dt annos
z_axis = 1 # KITTI camera format use y as regular "z" axis.
z_center = 1.0 # KITTI camera box's center is [0.5, 1, 0.5]
# for regular raw lidar data, z_axis = 2, z_center = 0.5.
result_official_dict = get_official_eval_result(
gt_annos,
dt_annos,
self._class_names,
z_axis=z_axis,
z_center=z_center)
result_coco = get_coco_eval_result(
gt_annos,
dt_annos,
self._class_names,
z_axis=z_axis,
z_center=z_center)
return {
"results": {
"official": result_official_dict["result"],
"coco": result_coco["result"],
},
"detail": {
"eval.kitti": {
"official": result_official_dict["detail"],
"coco": result_coco["detail"]
}
},
}
|
detection
When you want to eval your own dataset, you MUST set correct
the z axis and box z center.
If you want to eval by my KITTI eval function, you must
provide the correct format annotations.
ground_truth_annotations format:
{
bbox: [N, 4], if you fill fake data, MUST HAVE >25 HEIGHT!!!!!!
alpha: [N], you can use -10 to ignore it.
occluded: [N], you can use zero.
truncated: [N], you can use zero.
name: [N]
location: [N, 3] center of 3d box.
dimensions: [N, 3] dim of 3d box.
rotation_y: [N] angle.
}
all fields must be filled, but some fields can fill
zero.
|
evaluation
|
python
|
traveller59/second.pytorch
|
second/data/kitti_dataset.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_dataset.py
|
MIT
|
def convert_to_kitti_info_version2(info):
"""convert kitti info v1 to v2 if possible.
"""
if "image" not in info or "calib" not in info or "point_cloud" not in info:
info["image"] = {
'image_shape': info["img_shape"],
'image_idx': info['image_idx'],
'image_path': info['img_path'],
}
info["calib"] = {
"R0_rect": info['calib/R0_rect'],
"Tr_velo_to_cam": info['calib/Tr_velo_to_cam'],
"P2": info['calib/P2'],
}
info["point_cloud"] = {
"velodyne_path": info['velodyne_path'],
}
|
convert kitti info v1 to v2 if possible.
|
convert_to_kitti_info_version2
|
python
|
traveller59/second.pytorch
|
second/data/kitti_dataset.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_dataset.py
|
MIT
|
def evaluation_kitti(self, detections, output_dir):
"""eval by kitti evaluation tool.
I use num_lidar_pts to set easy, mod, hard.
easy: num>15, mod: num>7, hard: num>0.
"""
print("++++++++NuScenes KITTI unofficial Evaluation:")
print(
"++++++++easy: num_lidar_pts>15, mod: num_lidar_pts>7, hard: num_lidar_pts>0"
)
print("++++++++The bbox AP is invalid. Don't forget to ignore it.")
class_names = self._class_names
gt_annos = self.ground_truth_annotations
if gt_annos is None:
return None
gt_annos = deepcopy(gt_annos)
detections = deepcopy(detections)
dt_annos = []
for det in detections:
final_box_preds = det["box3d_lidar"].detach().cpu().numpy()
label_preds = det["label_preds"].detach().cpu().numpy()
scores = det["scores"].detach().cpu().numpy()
anno = kitti.get_start_result_anno()
num_example = 0
box3d_lidar = final_box_preds
for j in range(box3d_lidar.shape[0]):
anno["bbox"].append(np.array([0, 0, 50, 50]))
anno["alpha"].append(-10)
anno["dimensions"].append(box3d_lidar[j, 3:6])
anno["location"].append(box3d_lidar[j, :3])
anno["rotation_y"].append(box3d_lidar[j, 6])
anno["name"].append(class_names[int(label_preds[j])])
anno["truncated"].append(0.0)
anno["occluded"].append(0)
anno["score"].append(scores[j])
num_example += 1
if num_example != 0:
anno = {n: np.stack(v) for n, v in anno.items()}
dt_annos.append(anno)
else:
dt_annos.append(kitti.empty_result_anno())
num_example = dt_annos[-1]["name"].shape[0]
dt_annos[-1]["metadata"] = det["metadata"]
for anno in gt_annos:
names = anno["name"].tolist()
mapped_names = []
for n in names:
if n in self.NameMapping:
mapped_names.append(self.NameMapping[n])
else:
mapped_names.append(n)
anno["name"] = np.array(mapped_names)
for anno in dt_annos:
names = anno["name"].tolist()
mapped_names = []
for n in names:
if n in self.NameMapping:
mapped_names.append(self.NameMapping[n])
else:
mapped_names.append(n)
anno["name"] = np.array(mapped_names)
mapped_class_names = []
for n in self._class_names:
if n in self.NameMapping:
mapped_class_names.append(self.NameMapping[n])
else:
mapped_class_names.append(n)
z_axis = 2
z_center = 0.5
# for regular raw lidar data, z_axis = 2, z_center = 0.5.
result_official_dict = get_official_eval_result(
gt_annos,
dt_annos,
mapped_class_names,
z_axis=z_axis,
z_center=z_center)
result_coco = get_coco_eval_result(
gt_annos,
dt_annos,
mapped_class_names,
z_axis=z_axis,
z_center=z_center)
return {
"results": {
"official": result_official_dict["result"],
"coco": result_coco["result"],
},
"detail": {
"official": result_official_dict["detail"],
"coco": result_coco["detail"],
},
}
|
eval by kitti evaluation tool.
I use num_lidar_pts to set easy, mod, hard.
easy: num>15, mod: num>7, hard: num>0.
|
evaluation_kitti
|
python
|
traveller59/second.pytorch
|
second/data/nuscenes_dataset.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/nuscenes_dataset.py
|
MIT
|
def evaluation(self, detections, output_dir):
"""kitti evaluation is very slow, remove it.
"""
# res_kitti = self.evaluation_kitti(detections, output_dir)
res_nusc = self.evaluation_nusc(detections, output_dir)
res = {
"results": {
"nusc": res_nusc["results"]["nusc"],
# "kitti.official": res_kitti["results"]["official"],
# "kitti.coco": res_kitti["results"]["coco"],
},
"detail": {
"eval.nusc": res_nusc["detail"]["nusc"],
# "eval.kitti": {
# "official": res_kitti["detail"]["official"],
# "coco": res_kitti["detail"]["coco"],
# },
},
}
return res
|
kitti evaluation is very slow, remove it.
|
evaluation
|
python
|
traveller59/second.pytorch
|
second/data/nuscenes_dataset.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/nuscenes_dataset.py
|
MIT
|
def prep_pointcloud(input_dict,
root_path,
voxel_generator,
target_assigner,
db_sampler=None,
max_voxels=20000,
remove_outside_points=False,
training=True,
create_targets=True,
shuffle_points=False,
remove_unknown=False,
gt_rotation_noise=(-np.pi / 3, np.pi / 3),
gt_loc_noise_std=(1.0, 1.0, 1.0),
global_rotation_noise=(-np.pi / 4, np.pi / 4),
global_scaling_noise=(0.95, 1.05),
global_random_rot_range=(0.78, 2.35),
global_translate_noise_std=(0, 0, 0),
num_point_features=4,
anchor_area_threshold=1,
gt_points_drop=0.0,
gt_drop_max_keep=10,
remove_points_after_sample=True,
anchor_cache=None,
remove_environment=False,
random_crop=False,
reference_detections=None,
out_size_factor=2,
use_group_id=False,
multi_gpu=False,
min_points_in_gt=-1,
random_flip_x=True,
random_flip_y=True,
sample_importance=1.0,
out_dtype=np.float32):
"""convert point cloud to voxels, create targets if ground truths
exists.
input_dict format: dataset.get_sensor_data format
"""
t = time.time()
class_names = target_assigner.classes
points = input_dict["lidar"]["points"]
if training:
anno_dict = input_dict["lidar"]["annotations"]
gt_dict = {
"gt_boxes": anno_dict["boxes"],
"gt_names": anno_dict["names"],
"gt_importance": np.ones([anno_dict["boxes"].shape[0]], dtype=anno_dict["boxes"].dtype),
}
if "difficulty" not in anno_dict:
difficulty = np.zeros([anno_dict["boxes"].shape[0]],
dtype=np.int32)
gt_dict["difficulty"] = difficulty
else:
gt_dict["difficulty"] = anno_dict["difficulty"]
if use_group_id and "group_ids" in anno_dict:
group_ids = anno_dict["group_ids"]
gt_dict["group_ids"] = group_ids
calib = None
if "calib" in input_dict:
calib = input_dict["calib"]
if reference_detections is not None:
assert calib is not None and "image" in input_dict
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(reference_detections, C)
frustums -= T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
masks = points_in_convex_polygon_3d_jit(points, surfaces)
points = points[masks.any(-1)]
if remove_outside_points:
assert calib is not None
image_shape = input_dict["image"]["image_shape"]
points = box_np_ops.remove_outside_points(
points, calib["rect"], calib["Trv2c"], calib["P2"], image_shape)
if remove_environment is True and training:
selected = kitti.keep_arrays_by_name(gt_names, target_assigner.classes)
_dict_select(gt_dict, selected)
masks = box_np_ops.points_in_rbbox(points, gt_dict["gt_boxes"])
points = points[masks.any(-1)]
metrics = {}
if training:
"""
boxes_lidar = gt_dict["gt_boxes"]
bev_map = simplevis.nuscene_vis(points, boxes_lidar)
cv2.imshow('pre-noise', bev_map)
"""
selected = kitti.drop_arrays_by_name(gt_dict["gt_names"], ["DontCare"])
_dict_select(gt_dict, selected)
if remove_unknown:
remove_mask = gt_dict["difficulty"] == -1
"""
gt_boxes_remove = gt_boxes[remove_mask]
gt_boxes_remove[:, 3:6] += 0.25
points = prep.remove_points_in_boxes(points, gt_boxes_remove)
"""
keep_mask = np.logical_not(remove_mask)
_dict_select(gt_dict, keep_mask)
gt_dict.pop("difficulty")
if min_points_in_gt > 0:
# points_count_rbbox takes 10ms with 10 sweeps nuscenes data
point_counts = box_np_ops.points_count_rbbox(points, gt_dict["gt_boxes"])
mask = point_counts >= min_points_in_gt
_dict_select(gt_dict, mask)
gt_boxes_mask = np.array(
[n in class_names for n in gt_dict["gt_names"]], dtype=np.bool_)
if db_sampler is not None:
group_ids = None
if "group_ids" in gt_dict:
group_ids = gt_dict["group_ids"]
sampled_dict = db_sampler.sample_all(
root_path,
gt_dict["gt_boxes"],
gt_dict["gt_names"],
num_point_features,
random_crop,
gt_group_ids=group_ids,
calib=calib)
if sampled_dict is not None:
sampled_gt_names = sampled_dict["gt_names"]
sampled_gt_boxes = sampled_dict["gt_boxes"]
sampled_points = sampled_dict["points"]
sampled_gt_masks = sampled_dict["gt_masks"]
gt_dict["gt_names"] = np.concatenate(
[gt_dict["gt_names"], sampled_gt_names], axis=0)
gt_dict["gt_boxes"] = np.concatenate(
[gt_dict["gt_boxes"], sampled_gt_boxes])
gt_boxes_mask = np.concatenate(
[gt_boxes_mask, sampled_gt_masks], axis=0)
sampled_gt_importance = np.full([sampled_gt_boxes.shape[0]], sample_importance, dtype=sampled_gt_boxes.dtype)
gt_dict["gt_importance"] = np.concatenate(
[gt_dict["gt_importance"], sampled_gt_importance])
if group_ids is not None:
sampled_group_ids = sampled_dict["group_ids"]
gt_dict["group_ids"] = np.concatenate(
[gt_dict["group_ids"], sampled_group_ids])
if remove_points_after_sample:
masks = box_np_ops.points_in_rbbox(points,
sampled_gt_boxes)
points = points[np.logical_not(masks.any(-1))]
points = np.concatenate([sampled_points, points], axis=0)
pc_range = voxel_generator.point_cloud_range
group_ids = None
if "group_ids" in gt_dict:
group_ids = gt_dict["group_ids"]
prep.noise_per_object_v3_(
gt_dict["gt_boxes"],
points,
gt_boxes_mask,
rotation_perturb=gt_rotation_noise,
center_noise_std=gt_loc_noise_std,
global_random_rot_range=global_random_rot_range,
group_ids=group_ids,
num_try=100)
# should remove unrelated objects after noise per object
# for k, v in gt_dict.items():
# print(k, v.shape)
_dict_select(gt_dict, gt_boxes_mask)
gt_classes = np.array(
[class_names.index(n) + 1 for n in gt_dict["gt_names"]],
dtype=np.int32)
gt_dict["gt_classes"] = gt_classes
gt_dict["gt_boxes"], points = prep.random_flip(gt_dict["gt_boxes"],
points, 0.5, random_flip_x, random_flip_y)
gt_dict["gt_boxes"], points = prep.global_rotation_v2(
gt_dict["gt_boxes"], points, *global_rotation_noise)
gt_dict["gt_boxes"], points = prep.global_scaling_v2(
gt_dict["gt_boxes"], points, *global_scaling_noise)
prep.global_translate_(gt_dict["gt_boxes"], points, global_translate_noise_std)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
mask = prep.filter_gt_box_outside_range_by_center(gt_dict["gt_boxes"], bv_range)
_dict_select(gt_dict, mask)
# limit rad to [-pi, pi]
gt_dict["gt_boxes"][:, 6] = box_np_ops.limit_period(
gt_dict["gt_boxes"][:, 6], offset=0.5, period=2 * np.pi)
# boxes_lidar = gt_dict["gt_boxes"]
# bev_map = simplevis.nuscene_vis(points, boxes_lidar)
# cv2.imshow('post-noise', bev_map)
# cv2.waitKey(0)
if shuffle_points:
# shuffle is a little slow.
np.random.shuffle(points)
# [0, -40, -3, 70.4, 40, 1]
voxel_size = voxel_generator.voxel_size
pc_range = voxel_generator.point_cloud_range
grid_size = voxel_generator.grid_size
# [352, 400]
t1 = time.time()
if not multi_gpu:
res = voxel_generator.generate(
points, max_voxels)
voxels = res["voxels"]
coordinates = res["coordinates"]
num_points = res["num_points_per_voxel"]
num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
else:
res = voxel_generator.generate_multi_gpu(
points, max_voxels)
voxels = res["voxels"]
coordinates = res["coordinates"]
num_points = res["num_points_per_voxel"]
num_voxels = np.array([res["voxel_num"]], dtype=np.int64)
metrics["voxel_gene_time"] = time.time() - t1
example = {
'voxels': voxels,
'num_points': num_points,
'coordinates': coordinates,
"num_voxels": num_voxels,
"metrics": metrics,
}
if calib is not None:
example["calib"] = calib
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
if anchor_cache is not None:
anchors = anchor_cache["anchors"]
anchors_bv = anchor_cache["anchors_bv"]
anchors_dict = anchor_cache["anchors_dict"]
matched_thresholds = anchor_cache["matched_thresholds"]
unmatched_thresholds = anchor_cache["unmatched_thresholds"]
else:
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, target_assigner.box_ndim])
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
example["anchors"] = anchors
anchors_mask = None
if anchor_area_threshold >= 0:
# slow with high resolution. recommend disable this forever.
coors = coordinates
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coors, tuple(grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_area = box_np_ops.fused_get_anchors_area(
dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
anchors_mask = anchors_area > anchor_area_threshold
# example['anchors_mask'] = anchors_mask.astype(np.uint8)
example['anchors_mask'] = anchors_mask
# print("prep time", time.time() - t)
metrics["prep_time"] = time.time() - t
if not training:
return example
example["gt_names"] = gt_dict["gt_names"]
# voxel_labels = box_np_ops.assign_label_to_voxel(gt_boxes, coordinates,
# voxel_size, coors_range)
if create_targets:
t1 = time.time()
targets_dict = target_assigner.assign(
anchors,
anchors_dict,
gt_dict["gt_boxes"],
anchors_mask,
gt_classes=gt_dict["gt_classes"],
gt_names=gt_dict["gt_names"],
matched_thresholds=matched_thresholds,
unmatched_thresholds=unmatched_thresholds,
importance=gt_dict["gt_importance"])
"""
boxes_lidar = gt_dict["gt_boxes"]
bev_map = simplevis.nuscene_vis(points, boxes_lidar, gt_dict["gt_names"])
assigned_anchors = anchors[targets_dict['labels'] > 0]
ignored_anchors = anchors[targets_dict['labels'] == -1]
bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], ignored_anchors, [128, 128, 128], 2)
bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], assigned_anchors, [255, 0, 0])
cv2.imshow('anchors', bev_map)
cv2.waitKey(0)
boxes_lidar = gt_dict["gt_boxes"]
pp_map = np.zeros(grid_size[:2], dtype=np.float32)
voxels_max = np.max(voxels[:, :, 2], axis=1, keepdims=False)
voxels_min = np.min(voxels[:, :, 2], axis=1, keepdims=False)
voxels_height = voxels_max - voxels_min
voxels_height = np.minimum(voxels_height, 4)
# sns.distplot(voxels_height)
# plt.show()
pp_map[coordinates[:, 1], coordinates[:, 2]] = voxels_height / 4
pp_map = (pp_map * 255).astype(np.uint8)
pp_map = cv2.cvtColor(pp_map, cv2.COLOR_GRAY2RGB)
pp_map = simplevis.draw_box_in_bev(pp_map, [-50, -50, 3, 50, 50, 1], boxes_lidar, [128, 0, 128], 1)
cv2.imshow('heights', pp_map)
cv2.waitKey(0)
"""
example.update({
'labels': targets_dict['labels'],
'reg_targets': targets_dict['bbox_targets'],
# 'reg_weights': targets_dict['bbox_outside_weights'],
'importance': targets_dict['importance'],
})
return example
|
convert point cloud to voxels, create targets if ground truths
exists.
input_dict format: dataset.get_sensor_data format
|
prep_pointcloud
|
python
|
traveller59/second.pytorch
|
second/data/preprocess.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/data/preprocess.py
|
MIT
|
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape,
b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(
np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
|
Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
|
assertAllEqual
|
python
|
traveller59/second.pytorch
|
second/framework/test.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/framework/test.py
|
MIT
|
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a,
b))
if is_a_dict:
self.assertCountEqual(
a.keys(),
b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(),
b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k],
b[k],
rtol=rtol,
atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
|
Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
|
assertAllClose
|
python
|
traveller59/second.pytorch
|
second/framework/test.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/framework/test.py
|
MIT
|
def onColorPicker(self):
'''
Show color-picker dialog to select color.
Qt will use the native dialog by default.
'''
dlg = QColorDialog(self)
if self._color:
dlg.setCurrentColor(QColor(self._color))
if dlg.exec_():
# self.setColor(dlg.currentColor().name())
self.setColor(dlg.currentColor().rgba())
|
Show color-picker dialog to select color.
Qt will use the native dialog by default.
|
onColorPicker
|
python
|
traveller59/second.pytorch
|
second/kittiviewer/control_panel.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/kittiviewer/control_panel.py
|
MIT
|
def train(config_path,
model_dir,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pretrained_path=None,
pretrained_include=None,
pretrained_exclude=None,
freeze_include=None,
freeze_exclude=None,
multi_gpu=False,
measure_time=False,
resume=False):
"""train a VoxelNet model specified by a config file.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = str(Path(model_dir).resolve())
if create_folder:
if Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
model_dir = Path(model_dir)
if not resume and model_dir.exists():
raise ValueError("model dir exists and you don't specify resume.")
model_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config_file_bkp = "pipeline.config"
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
with (model_dir / config_file_bkp).open("w") as f:
f.write(proto_str)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, measure_time).to(device)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
print("num parameters:", len(list(net.parameters())))
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
if pretrained_path is not None:
model_dict = net.state_dict()
pretrained_dict = torch.load(pretrained_path)
pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)
new_pretrained_dict = {}
for k, v in pretrained_dict.items():
if k in model_dict and v.shape == model_dict[k].shape:
new_pretrained_dict[k] = v
print("Load pretrained parameters:")
for k, v in new_pretrained_dict.items():
print(k, v.shape)
model_dict.update(new_pretrained_dict)
net.load_state_dict(model_dict)
freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)
net.clear_global_step()
net.clear_metrics()
if multi_gpu:
net_parallel = torch.nn.DataParallel(net)
else:
net_parallel = net
optimizer_cfg = train_cfg.optimizer
loss_scale = train_cfg.loss_scale_factor
fastai_optimizer = optimizer_builder.build(
optimizer_cfg,
net,
mixed=False,
loss_scale=loss_scale)
if loss_scale < 0:
loss_scale = "dynamic"
if train_cfg.enable_mixed_precision:
max_num_voxels = input_cfg.preprocess.max_number_of_voxels * input_cfg.batch_size
assert max_num_voxels < 65535, "spconv fp16 training only support this"
from apex import amp
net, amp_optimizer = amp.initialize(net, fastai_optimizer,
opt_level="O2",
keep_batchnorm_fp32=True,
loss_scale=loss_scale
)
net.metrics_to_float()
else:
amp_optimizer = fastai_optimizer
torchplus.train.try_restore_latest_checkpoints(model_dir,
[fastai_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,
train_cfg.steps)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
if multi_gpu:
num_gpu = torch.cuda.device_count()
print(f"MULTI-GPU: use {num_gpu} gpu")
collate_fn = merge_second_batch_multigpu
else:
collate_fn = merge_second_batch
num_gpu = 1
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=multi_gpu)
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size * num_gpu,
shuffle=True,
num_workers=input_cfg.preprocess.num_workers * num_gpu,
pin_memory=False,
collate_fn=collate_fn,
worker_init_fn=_worker_init_fn,
drop_last=not multi_gpu)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size, # only support multi-gpu train
shuffle=False,
num_workers=eval_input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
######################
# TRAINING
######################
model_logging = SimpleModelLog(model_dir)
model_logging.open()
model_logging.log_text(proto_str + "\n", 0, tag="config")
start_step = net.get_global_step()
total_step = train_cfg.steps
t = time.time()
steps_per_eval = train_cfg.steps_per_eval
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
amp_optimizer.zero_grad()
step_times = []
step = start_step
try:
while True:
if clear_metrics_every_epoch:
net.clear_metrics()
for example in dataloader:
lr_scheduler.step(net.get_global_step())
time_metrics = example["metrics"]
example.pop("metrics")
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
ret_dict = net_parallel(example_torch)
cls_preds = ret_dict["cls_preds"]
loss = ret_dict["loss"].mean()
cls_loss_reduced = ret_dict["cls_loss_reduced"].mean()
loc_loss_reduced = ret_dict["loc_loss_reduced"].mean()
cls_pos_loss = ret_dict["cls_pos_loss"].mean()
cls_neg_loss = ret_dict["cls_neg_loss"].mean()
loc_loss = ret_dict["loc_loss"]
cls_loss = ret_dict["cls_loss"]
cared = ret_dict["cared"]
labels = example_torch["labels"]
if train_cfg.enable_mixed_precision:
with amp.scale_loss(loss, amp_optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)
amp_optimizer.step()
amp_optimizer.zero_grad()
net.update_global_step()
net_metrics = net.update_metrics(cls_loss_reduced,
loc_loss_reduced, cls_preds,
labels, cared)
step_time = (time.time() - t)
step_times.append(step_time)
t = time.time()
metrics = {}
num_pos = int((labels > 0)[0].float().sum().cpu().numpy())
num_neg = int((labels == 0)[0].float().sum().cpu().numpy())
if 'anchors_mask' not in example_torch:
num_anchors = example_torch['anchors'].shape[1]
else:
num_anchors = int(example_torch['anchors_mask'][0].sum())
global_step = net.get_global_step()
if global_step % display_step == 0:
if measure_time:
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
loc_loss_elem = [
float(loc_loss[:, :, i].sum().detach().cpu().numpy() /
batch_size) for i in range(loc_loss.shape[-1])
]
metrics["runtime"] = {
"step": global_step,
"steptime": np.mean(step_times),
}
metrics["runtime"].update(time_metrics[0])
step_times = []
metrics.update(net_metrics)
metrics["loss"]["loc_elem"] = loc_loss_elem
metrics["loss"]["cls_pos_rt"] = float(
cls_pos_loss.detach().cpu().numpy())
metrics["loss"]["cls_neg_rt"] = float(
cls_neg_loss.detach().cpu().numpy())
if model_cfg.use_direction_classifier:
dir_loss_reduced = ret_dict["dir_loss_reduced"].mean()
metrics["loss"]["dir_rt"] = float(
dir_loss_reduced.detach().cpu().numpy())
metrics["misc"] = {
"num_vox": int(example_torch["voxels"].shape[0]),
"num_pos": int(num_pos),
"num_neg": int(num_neg),
"num_anchors": int(num_anchors),
"lr": float(amp_optimizer.lr),
"mem_usage": psutil.virtual_memory().percent,
}
model_logging.log_metrics(metrics, global_step)
if global_step % steps_per_eval == 0:
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("# EVAL", global_step)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("Generate output labels...", global_step)
t = time.time()
detections = []
prog_bar = ProgressBar()
net.clear_timer()
prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)
// eval_input_cfg.batch_size)
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
detections += net(example)
prog_bar.print_bar()
sec_per_ex = len(eval_dataset) / (time.time() - t)
model_logging.log_text(
f'generate label finished({sec_per_ex:.2f}/s). start eval:',
global_step)
result_dict = eval_dataset.dataset.evaluation(
detections, str(result_path_step))
for k, v in result_dict["results"].items():
model_logging.log_text("Evaluation {}".format(k), global_step)
model_logging.log_text(v, global_step)
model_logging.log_metrics(result_dict["detail"], global_step)
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
net.train()
step += 1
if step >= total_step:
break
if step >= total_step:
break
except Exception as e:
print(json.dumps(example["metadata"], indent=2))
model_logging.log_text(str(e), step)
model_logging.log_text(json.dumps(example["metadata"], indent=2), step)
torchplus.train.save_models(model_dir, [net, amp_optimizer],
step)
raise e
finally:
model_logging.close()
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
|
train a VoxelNet model specified by a config file.
|
train
|
python
|
traveller59/second.pytorch
|
second/pytorch/train.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/train.py
|
MIT
|
def evaluate(config_path,
model_dir=None,
result_path=None,
ckpt_path=None,
measure_time=False,
batch_size=None,
**kwargs):
"""Don't support pickle_result anymore. if you want to generate kitti label file,
please use kitti_anno_to_label_file and convert_detection_to_kitti_annos
in second.data.kitti_dataset.
"""
assert len(kwargs) == 0
model_dir = str(Path(model_dir).resolve())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
result_name = 'eval_results'
if result_path is None:
model_dir = Path(model_dir)
result_path = model_dir / result_name
else:
result_path = Path(result_path)
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to eval with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, measure_time=measure_time).to(device)
if train_cfg.enable_mixed_precision:
net.half()
print("half inference!")
net.metrics_to_float()
net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
if ckpt_path is None:
assert model_dir is not None
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
batch_size = batch_size or input_cfg.batch_size
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
t = time.time()
detections = []
print("Generate output labels...")
bar = ProgressBar()
bar.start((len(eval_dataset) + batch_size - 1) // batch_size)
prep_example_times = []
prep_times = []
t2 = time.time()
for example in iter(eval_dataloader):
if measure_time:
prep_times.append(time.time() - t2)
torch.cuda.synchronize()
t1 = time.time()
example = example_convert_to_torch(example, float_dtype)
if measure_time:
torch.cuda.synchronize()
prep_example_times.append(time.time() - t1)
with torch.no_grad():
detections += net(example)
bar.print_bar()
if measure_time:
t2 = time.time()
sec_per_example = len(eval_dataset) / (time.time() - t)
print(f'generate label finished({sec_per_example:.2f}/s). start eval:')
if measure_time:
print(
f"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms"
)
print(f"avg prep time: {np.mean(prep_times) * 1000:.3f} ms")
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
result_dict = eval_dataset.dataset.evaluation(detections,
str(result_path_step))
if result_dict is not None:
for k, v in result_dict["results"].items():
print("Evaluation {}".format(k))
print(v)
|
Don't support pickle_result anymore. if you want to generate kitti label file,
please use kitti_anno_to_label_file and convert_detection_to_kitti_annos
in second.data.kitti_dataset.
|
evaluate
|
python
|
traveller59/second.pytorch
|
second/pytorch/train.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/train.py
|
MIT
|
def helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):
"""get information of target assign to tune thresholds in anchor generator.
"""
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, False)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=False,
collate_fn=merge_second_batch,
worker_init_fn=_worker_init_fn,
drop_last=False)
class_count = {}
anchor_count = {}
class_count_tune = {}
anchor_count_tune = {}
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
class_count_tune[c] = 0
anchor_count_tune[c] = 0
step = 0
classes = target_assigner.classes
if target_rate is None:
num_tune_epoch = 0
for epoch in range(num_tune_epoch):
for example in dataloader:
gt_names = example["gt_names"]
for name in gt_names:
class_count_tune[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count_tune[classes[i - 1]] += int(np.sum(labels == i))
if target_rate is not None:
for name, rate in target_rate.items():
if class_count_tune[name] > update_freq:
# calc rate
current_rate = anchor_count_tune[name] / class_count_tune[name]
if current_rate > rate:
target_assigner._anchor_generators[classes.index(name)].match_threshold += update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold += update_delta
else:
target_assigner._anchor_generators[classes.index(name)].match_threshold -= update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold -= update_delta
anchor_count_tune[name] = 0
class_count_tune[name] = 0
step += 1
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
total_voxel_gene_time = 0
count = 0
for example in dataloader:
gt_names = example["gt_names"]
total_voxel_gene_time += example["metrics"][0]["voxel_gene_time"]
count += 1
for name in gt_names:
class_count[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count[classes[i - 1]] += int(np.sum(labels == i))
print("avg voxel gene time", total_voxel_gene_time / count)
print(json.dumps(class_count, indent=2))
print(json.dumps(anchor_count, indent=2))
if target_rate is not None:
for ag in target_assigner._anchor_generators:
if ag.class_name in target_rate:
print(ag.class_name, ag.match_threshold, ag.unmatch_threshold)
|
get information of target assign to tune thresholds in anchor generator.
|
helper_tune_target_assigner
|
python
|
traveller59/second.pytorch
|
second/pytorch/train.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/train.py
|
MIT
|
def build(box_coder_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
box_coder_type = box_coder_config.WhichOneof('box_coder')
if box_coder_type == 'ground_box3d_coder':
cfg = box_coder_config.ground_box3d_coder
return GroundBox3dCoderTorch(cfg.linear_dim, cfg.encode_angle_vector)
elif box_coder_type == 'bev_box_coder':
cfg = box_coder_config.bev_box_coder
return BevBoxCoderTorch(cfg.linear_dim, cfg.encode_angle_vector, cfg.z_fixed, cfg.h_fixed)
else:
raise ValueError("unknown box_coder type")
|
Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
|
build
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/box_coder_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/box_coder_builder.py
|
MIT
|
def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None,
multi_gpu=False) -> DatasetWrapper:
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
dataset = dataset_builder.build(
input_reader_config,
model_config,
training,
voxel_generator,
target_assigner,
multi_gpu=multi_gpu)
dataset = DatasetWrapper(dataset)
return dataset
|
Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
|
build
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/input_reader_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/input_reader_builder.py
|
MIT
|
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
if loss_config.HasField('hard_example_miner'):
raise ValueError('Pytorch don\'t support HardExampleMiner')
return (classification_loss, localization_loss,
classification_weight,
localization_weight, hard_example_miner)
|
Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
|
build
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/losses_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py
|
MIT
|
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
|
Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
|
build_faster_rcnn_classification_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/losses_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py
|
MIT
|
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.LocalizationLoss):
raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')
loss_type = loss_config.WhichOneof('localization_loss')
if loss_type == 'weighted_l2':
config = loss_config.weighted_l2
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedL2LocalizationLoss(code_weight)
if loss_type == 'weighted_smooth_l1':
config = loss_config.weighted_smooth_l1
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedSmoothL1LocalizationLoss(config.sigma, code_weight)
if loss_type == 'weighted_ghm':
config = loss_config.weighted_ghm
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return GHMRLoss(config.mu, config.bins, config.momentum, code_weight)
raise ValueError('Empty loss config.')
|
Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
|
_build_localization_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/losses_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py
|
MIT
|
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
# alpha = None
# if config.HasField('alpha'):
# alpha = config.alpha
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if loss_type == 'weighted_softmax_focal':
config = loss_config.weighted_softmax_focal
# alpha = None
# if config.HasField('alpha'):
# alpha = config.alpha
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SoftmaxFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if loss_type == 'weighted_ghm':
config = loss_config.weighted_ghm
return GHMCLoss(
bins=config.bins,
momentum=config.momentum)
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
raise ValueError('Empty loss config.')
|
Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
|
_build_classification_loss
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/losses_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py
|
MIT
|
def build(optimizer_config, optimizer, total_step):
"""Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, total_step=total_step)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, total_step=total_step)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, total_step=total_step)
return lr_scheduler
|
Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
|
build
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/lr_scheduler_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/lr_scheduler_builder.py
|
MIT
|
def _create_learning_rate_scheduler(learning_rate_config, optimizer, total_step):
"""Create optimizer learning rate scheduler based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
lr_scheduler = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'multi_phase':
config = learning_rate_config.multi_phase
lr_phases = []
mom_phases = []
for phase_cfg in config.phases:
lr_phases.append((phase_cfg.start, phase_cfg.lambda_func))
mom_phases.append((phase_cfg.start, phase_cfg.momentum_lambda_func))
lr_scheduler = lsf.LRSchedulerStep(
optimizer,total_step, lr_phases, mom_phases)
if learning_rate_type == 'one_cycle':
config = learning_rate_config.one_cycle
lr_scheduler = lsf.OneCycle(
optimizer, total_step, config.lr_max, list(config.moms), config.div_factor, config.pct_start)
if learning_rate_type == 'exponential_decay':
config = learning_rate_config.exponential_decay
lr_scheduler = lsf.ExponentialDecay(
optimizer, total_step, config.initial_learning_rate, config.decay_length, config.decay_factor, config.staircase)
if learning_rate_type == 'manual_stepping':
config = learning_rate_config.manual_stepping
lr_scheduler = lsf.ManualStepping(
optimizer, total_step, list(config.boundaries), list(config.rates))
if lr_scheduler is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return lr_scheduler
|
Create optimizer learning rate scheduler based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
|
_create_learning_rate_scheduler
|
python
|
traveller59/second.pytorch
|
second/pytorch/builder/lr_scheduler_builder.py
|
https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/lr_scheduler_builder.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.