text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the implementation of the LoraPlus optimizer.
"""
from __future__ import annotations
from operator import attrgetter
import torch.nn as nn
from torch.optim import Optimizer
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
from ..peft_model import PeftModel
from ..tuners.lora.layer import Embedding
def create_loraplus_optimizer(
model: PeftModel, optimizer_cls: type[Optimizer], *, lr: float, loraplus_lr_ratio: float, **kwargs
) -> Optimizer:
"""
Creates a LoraPlus optimizer.
Efficient Low Rank Adaptation of Large Models: https://arxiv.org/abs/2402.12354
Reference: https://github.com/nikhil-ghosh-berkeley/loraplus/
Args:
model (`torch.nn.Module`): The model to be optimized.
optimizer_cls (`torch.optim.Optimizer`): The optimizer class to be used.
lr (`float`): The learning rate to be used for the optimizer.
loraplus_lr_ratio (`float`):
The ratio of learning ηB/ηA where ηA (lr) is passed in as the optimizer learning rate. Should be ≥1. Should
be set in tandem with the optimizer learning rate (lr); should be larger when the task is more difficult
and the model needs to update its features to learn well. In this case, it helps to make the learning rate
slightly smaller (e.g., by a factor of 2) than typical vanilla LoRA learning rates
loraplus_lr_embedding (optional `float`):
If LoRA modules are added to embedding layers your can specify a different learning rate for them. Default
value 1e-6.
kwargs (`dict`): Additional keyword arguments to be passed to the optimizer.
Returns:
`torch.optim.Optimizer`: An instance of the specified optimizer class configured with the model's parameters
organized into groups with custom learning rates.
"""
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
param_groups = {
"groupA": {},
"groupB": {},
"groupB_no_decay": {},
"embedding": {},
}
for name, param in model.named_parameters():
if not param.requires_grad:
continue
module = attrgetter(name)(model)
if isinstance(module, Embedding):
param_groups["embedding"][name] = param
elif "lora_B" in name or param.ndim == 1:
if name in decay_parameters:
param_groups["groupB"][name] = param
else:
param_groups["groupB_no_decay"][name] = param
else:
param_groups["groupA"][name] = param
kwargs["lr"] = lr
loraplus_weight_decay = kwargs.pop("loraplus_weight_decay", 0.0)
loraplus_lr_embedding = kwargs.pop("loraplus_lr_embedding", 1e-6)
optimizer_grouped_parameters = [
{
"params": list(param_groups["groupA"].values()),
"weight_decay": loraplus_weight_decay,
"lr": lr,
},
{
"params": list(param_groups["embedding"].values()),
"weight_decay": loraplus_weight_decay,
"lr": loraplus_lr_embedding,
},
{
"params": list(param_groups["groupB"].values()),
"weight_decay": loraplus_weight_decay,
"lr": lr * loraplus_lr_ratio,
},
{
"params": list(param_groups["groupB_no_decay"].values()),
"weight_decay": 0.0,
"lr": lr * loraplus_lr_ratio,
},
]
optimizer = optimizer_cls(optimizer_grouped_parameters, **kwargs)
eight_bit_names = ["Adam8bit", "AdamW8bit", "PagedAdam8bit", "PagedAdamW8bit"]
if optimizer_cls.__name__ in eight_bit_names:
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
for module in model.modules():
if isinstance(module, nn.Embedding):
manager.register_module_override(module, "weight", {"optim_bits": 32})
return optimizer
| peft/src/peft/optimizers/loraplus.py/0 | {
"file_path": "peft/src/peft/optimizers/loraplus.py",
"repo_id": "peft",
"token_count": 1910
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class FourierFTLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("fourierft_spectrum",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("fourierft_n_frequency", "fourierft_scaling", "fourierft_random_loc_seed")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.fourierft_n_frequency = {}
self.fourierft_scaling = {}
self.fourierft_spectrum = nn.ParameterDict({})
self.indices = {}
self.fourierft_random_loc_seed = {}
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, Conv1D):
self.in_features, self.out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def update_layer(self, adapter_name, n_frequency, scaling, init_weights, random_loc_seed):
if n_frequency <= 0:
raise ValueError(f"`n_frequency` should be a positive integer value but the value passed is {n_frequency}")
if n_frequency > self.in_features * self.out_features:
raise ValueError(
f"`n_frequency` should be less than or equal to the product of the input and output dimensions "
f"but the value passed is {n_frequency} and the product is {self.in_features * self.out_features}"
)
self.fourierft_n_frequency[adapter_name] = n_frequency
self.fourierft_random_loc_seed[adapter_name] = random_loc_seed
self.indices[adapter_name] = torch.randperm(
self.out_features * self.in_features,
generator=torch.Generator().manual_seed(self.fourierft_random_loc_seed[adapter_name]),
)[:n_frequency]
self.indices[adapter_name] = torch.stack(
[self.indices[adapter_name] // self.in_features, self.indices[adapter_name] % self.in_features], dim=0
)
self.fourierft_scaling[adapter_name] = scaling
# Actual trainable parameters
self.fourierft_spectrum[adapter_name] = nn.Parameter(torch.randn(n_frequency), requires_grad=True)
if init_weights:
self.reset_fourier_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
@torch.no_grad()
def reset_fourier_parameters(self, adapter_name):
if adapter_name in self.fourierft_spectrum.keys():
nn.init.zeros_(self.fourierft_spectrum[adapter_name])
def get_delta_weight(self, adapter) -> torch.Tensor:
spectrum = self.fourierft_spectrum[adapter]
indices = self.indices[adapter].to(spectrum.device)
dense_spectrum = torch.zeros(self.out_features, self.in_features, device=spectrum.device, dtype=spectrum.dtype)
dense_spectrum[indices[0, :], indices[1, :]] = spectrum
delta_weight = torch.fft.ifft2(dense_spectrum).real * self.fourierft_scaling[adapter]
return delta_weight
class FourierFTLinear(nn.Module, FourierFTLayer):
# FourierFT implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
n_frequency: int = 1000,
scaling: float = 150.0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: Union[bool, str] = False,
random_loc_seed: int = 777,
**kwargs,
) -> None:
super().__init__()
FourierFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, n_frequency, scaling, init_weights, random_loc_seed)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.fourierft_spectrum.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.fourierft_spectrum.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
return super().get_delta_weight(adapter)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.fourierft_spectrum.keys():
continue
delta_w = self.get_delta_weight(active_adapter)
x = x.to(delta_w.dtype)
result = result + F.linear(x, delta_w)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "fourierft." + rep
| peft/src/peft/tuners/fourierft/layer.py/0 | {
"file_path": "peft/src/peft/tuners/fourierft/layer.py",
"repo_id": "peft",
"token_count": 3638
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.tuners.lycoris_utils import LycorisConfig
from peft.utils import PeftType
@dataclass
class LoHaConfig(LycorisConfig):
"""
This is the configuration class to store the configuration of a [`LoHaModel`].
Args:
r (`int`):
LoHa rank.
alpha (`int`):
The alpha parameter for LoHa scaling.
rank_dropout (`float`):
The dropout probability for rank dimension during training.
module_dropout (`float`):
The dropout probability for disabling LoHa modules during training.
use_effective_conv2d (`bool`):
Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
excluding the output layer. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
init_weights (`bool`):
Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
discouraged.
layers_to_transform (`Union[List[int], int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the
`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
rank_pattern (`dict`):
The mapping from layer names or regexp expression to ranks which are different from the default rank
specified by `r`.
alpha_pattern (`dict`):
The mapping from layer names or regexp expression to alphas which are different from the default alpha
specified by `alpha`.
modules_to_save (`Optional[List[str]]`):
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
"""
r: int = field(default=8, metadata={"help": "LoHa rank"})
alpha: int = field(default=8, metadata={"help": "LoHa alpha"})
rank_dropout: float = field(
default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
)
module_dropout: float = field(
default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"}
)
use_effective_conv2d: bool = field(
default=False,
metadata={
"help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with LoHa."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from LoHa."},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the LoHa layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. "
"This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`."
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.LOHA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
| peft/src/peft/tuners/loha/config.py/0 | {
"file_path": "peft/src/peft/tuners/loha/config.py",
"repo_id": "peft",
"token_count": 2612
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_gptqmodel_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import get_auto_gptq_quant_linear, get_gptqmodel_quant_linear
class QuantLinear(torch.nn.Module, LoraLayer):
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
use_dora: bool = False,
lora_bias: bool = False,
**kwargs,
):
super().__init__()
LoraLayer.__init__(self, base_layer)
if use_dora:
raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
lora_bias=lora_bias,
)
def forward(self, x: torch.Tensor):
# note: logic differs from default Linear because merging is not supported
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
# TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
# def reset_lora_parameters(self, adapter_name):
# if adapter_name in self.lora_A.keys():
# torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
# torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
def dispatch_gptq(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
cfg = kwargs.get("gptq_quantization_config", None)
if is_gptqmodel_available():
device_map = kwargs.get("device_map", None)
quant_linear = get_gptqmodel_quant_linear(cfg, device_map=device_map)
else:
quant_linear = get_auto_gptq_quant_linear(cfg)
if quant_linear is not None and isinstance(target_base_layer, quant_linear):
new_module = QuantLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
| peft/src/peft/tuners/lora/gptq.py/0 | {
"file_path": "peft/src/peft/tuners/lora/gptq.py",
"repo_id": "peft",
"token_count": 1841
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class VBLoRAConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`VBLoRAConfig`].
Paper: https://arxiv.org/abs/2405.15179
Args:
r (`int`):
The rank of incremental matrices.
num_vectors (`int`):
Number of vectors in the vector bank. Use higher values when the model size increases.
vector_length (`int`):
The length of the vectors in the vector bank. The length of the vectors should be divisible by the hidden
dimension of the model.
topk (`int`):
The K value for top-K selection. A larger value of K increases the size of the saved model. In practice,
setting K=2 typically provides the best performance and parameter efficiency. For more details, refer to
the discussion in the paper.
target_modules (`Union[List[str], str]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
excluding the output layer. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
save_only_topk_weights (`bool`):
Whether to only save the topk weights. Setting `save_only_topk_weights = True` significantly reduces
storage space. However, models saved in this mode can be used for merging or inference only, not for
resuming training.
vblora_dropout (`float`):
The dropout probability for VBLoRA layers.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
bias (`str`):
Bias type for VBLoRA. Can be 'none', 'all' or 'vblora_only'. If 'all' or 'vblora_only', the corresponding
biases will be updated during training. Be aware that this means that, even when disabling the adapters,
the model will not produce the same output as the base model would have without adaptation.
modules_to_save (`List[str]`):
List of modules apart from VBLoRA layers to be set as trainable and saved in the final checkpoint.
init_vector_bank_bound (`float`):
The vector bank is initialized with a uniform distribution between -init_vector_bank_bound and
init_vector_bank_bound. Avoid initializing the vector bank with all zeros to prevent zero gradients. A
small value, such as 0.02, is typically effective. Initializing with a large value may cause training
instability.
init_logits_std (`float`):
The logits are initialized with a normal distribution with a standard deviation of init_logits_std. Default
is 0.1.
layers_to_transform (`Union[List[int],int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the
`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
"""
r: int = field(default=4, metadata={"help": "The rank of incremental matrices."})
num_vectors: int = field(
default=256,
metadata={"help": "Number of vectors in the vector bank. Use higher values when the model size increases."},
)
vector_length: int = field(
default=256,
metadata={
"help": "The length of the vectors in the vector bank. The length of the vectors should be divisible by "
"the hidden dimension of the model."
},
)
topk: int = field(
default=2,
metadata={
"help": "The K value for top-K selection. A larger value of K increases the size of the saved model. "
"In practice, setting K=2 typically provides the best performance and parameter efficiency. "
"For more details, refer to the discussion in the paper."
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with LoRA."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you should specify the target modules manually."
)
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from VBLoRA."},
)
save_only_topk_weights: bool = field(
default=False,
metadata={
"help": (
"Whether to only save the topk weights. Setting `save_only_topk_weights = True` significantly reduces "
"storage space. However, models saved in this mode can be used for merging or inference only, not for "
"resuming training."
)
},
)
vblora_dropout: float = field(default=0.0, metadata={"help": "VBLoRA dropout"})
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
bias: str = field(default="none", metadata={"help": "Bias type for VBLoRA. Can be 'none', 'all' or 'vblora_only'"})
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from VBLoRA layers to be set as trainable and saved in the final checkpoint. For"
" example, in Sequence Classification or Token Classification tasks, the final layer"
" `classifier/score` are randomly initialized and as such need to be trainable and saved."
)
},
)
init_vector_bank_bound: float = field(
default=0.02,
metadata={
"help": (
"The vector bank is initialized with a uniform distribution between -init_vector_bank_bound and"
" init_vector_bank_bound. Avoid initializing the vector bank with all zeros to prevent zero gradients."
" A small value, such as 0.02, is typically effective. Initializing with a large value may cause"
" training instability."
),
},
)
init_logits_std: float = field(
default=0.1,
metadata={
"help": (
"The logits are initialized with a normal distribution with a standard deviation of init_logits_std. "
"Default value 0.1 typically works well."
),
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. "
"This only works when target_modules is a list of str. This should target the `nn.ModuleList` of the "
"model, which is often called `'layers'` or `'h'`."
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
"This only works when target_modules is a list of str."
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.VBLORA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
| peft/src/peft/tuners/vblora/config.py/0 | {
"file_path": "peft/src/peft/tuners/vblora/config.py",
"repo_id": "peft",
"token_count": 3931
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
class IncrementalPCA:
"""
An implementation of Incremental Principal Components Analysis (IPCA) that leverages PyTorch for GPU acceleration.
Adapted from https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/decomposition/_incremental_pca.py
This class provides methods to fit the model on data incrementally in batches, and to transform new data based on
the principal components learned during the fitting process.
Args:
n_components (int, optional): Number of components to keep. If `None`, it's set to the minimum of the
number of samples and features. Defaults to None.
copy (bool): If False, input data will be overwritten. Defaults to True.
batch_size (int, optional): The number of samples to use for each batch. Only needed if self.fit is called.
If `None`, it's inferred from the data and set to `5 * n_features`. Defaults to None.
svd_driver (str, optional): name of the cuSOLVER method to be used for torch.linalg.svd. This keyword
argument only works on CUDA inputs. Available options are: None, gesvd, gesvdj, and gesvda. Defaults to
None.
lowrank (bool, optional): Whether to use torch.svd_lowrank instead of torch.linalg.svd which can be faster.
Defaults to False.
lowrank_q (int, optional): For an adequate approximation of n_components, this parameter defaults to
n_components * 2.
lowrank_niter (int, optional): Number of subspace iterations to conduct for torch.svd_lowrank.
Defaults to 4.
lowrank_seed (int, optional): Seed for making results of torch.svd_lowrank reproducible.
"""
def __init__(
self,
n_components: Optional[int] = None,
copy: Optional[bool] = True,
batch_size: Optional[int] = None,
svd_driver: Optional[str] = None,
lowrank: bool = False,
lowrank_q: Optional[int] = None,
lowrank_niter: int = 4,
lowrank_seed: Optional[int] = None,
):
self.n_components = n_components
self.copy = copy
self.batch_size = batch_size
self.svd_driver = svd_driver
self.lowrank = lowrank
self.lowrank_q = lowrank_q
self.lowrank_niter = lowrank_niter
self.lowrank_seed = lowrank_seed
self.n_features_ = None
if self.lowrank:
self._validate_lowrank_params()
def _validate_lowrank_params(self):
if self.lowrank_q is None:
if self.n_components is None:
raise ValueError("n_components must be specified when using lowrank mode with lowrank_q=None.")
self.lowrank_q = self.n_components * 2
elif self.lowrank_q < self.n_components:
raise ValueError("lowrank_q must be greater than or equal to n_components.")
def _svd_fn_full(self, X):
return torch.linalg.svd(X, full_matrices=False, driver=self.svd_driver)
def _svd_fn_lowrank(self, X):
seed_enabled = self.lowrank_seed is not None
with torch.random.fork_rng(enabled=seed_enabled):
if seed_enabled:
torch.manual_seed(self.lowrank_seed)
U, S, V = torch.svd_lowrank(X, q=self.lowrank_q, niter=self.lowrank_niter)
return U, S, V.mH
def _validate_data(self, X) -> torch.Tensor:
"""
Validates and converts the input data `X` to the appropriate tensor format.
Args:
X (torch.Tensor): Input data.
Returns:
torch.Tensor: Converted to appropriate format.
"""
valid_dtypes = [torch.float32, torch.float64]
if not isinstance(X, torch.Tensor):
X = torch.tensor(X, dtype=torch.float32)
elif self.copy:
X = X.clone()
n_samples, n_features = X.shape
if self.n_components is None:
pass
elif self.n_components > n_features:
raise ValueError(
f"n_components={self.n_components} invalid for n_features={n_features}, "
"need more rows than columns for IncrementalPCA processing."
)
elif self.n_components > n_samples:
raise ValueError(
f"n_components={self.n_components} must be less or equal to the batch number of samples {n_samples}"
)
if X.dtype not in valid_dtypes:
X = X.to(torch.float32)
return X
@staticmethod
def _incremental_mean_and_var(
X, last_mean, last_variance, last_sample_count
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes the incremental mean and variance for the data `X`.
Args:
X (torch.Tensor): The batch input data tensor with shape (n_samples, n_features).
last_mean (torch.Tensor): The previous mean tensor with shape (n_features,).
last_variance (torch.Tensor): The previous variance tensor with shape (n_features,).
last_sample_count (torch.Tensor): The count tensor of samples processed before the current batch.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Updated mean, variance tensors, and total sample count.
"""
if X.shape[0] == 0:
return last_mean, last_variance, last_sample_count
if last_sample_count > 0:
if last_mean is None:
raise ValueError("last_mean should not be None if last_sample_count > 0.")
if last_variance is None:
raise ValueError("last_variance should not be None if last_sample_count > 0.")
new_sample_count = torch.tensor([X.shape[0]], device=X.device)
updated_sample_count = last_sample_count + new_sample_count
if last_mean is None:
last_sum = torch.zeros(X.shape[1], dtype=torch.float64, device=X.device)
else:
last_sum = last_mean * last_sample_count
new_sum = X.sum(dim=0, dtype=torch.float64)
updated_mean = (last_sum + new_sum) / updated_sample_count
T = new_sum / new_sample_count
temp = X - T
correction = temp.sum(dim=0, dtype=torch.float64).square()
temp.square_()
new_unnormalized_variance = temp.sum(dim=0, dtype=torch.float64)
new_unnormalized_variance -= correction / new_sample_count
if last_variance is None:
updated_variance = new_unnormalized_variance / updated_sample_count
else:
last_unnormalized_variance = last_variance * last_sample_count
last_over_new_count = last_sample_count.double() / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance
+ new_unnormalized_variance
+ last_over_new_count / updated_sample_count * (last_sum / last_over_new_count - new_sum).square()
)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
@staticmethod
def _svd_flip(u, v, u_based_decision=True) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Adjusts the signs of the singular vectors from the SVD decomposition for deterministic output.
This method ensures that the output remains consistent across different runs.
Args:
u (torch.Tensor): Left singular vectors tensor.
v (torch.Tensor): Right singular vectors tensor.
u_based_decision (bool, optional): If True, uses the left singular vectors to determine the sign flipping.
Defaults to True.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Adjusted left and right singular vectors tensors.
"""
if u_based_decision:
max_abs_cols = torch.argmax(torch.abs(u), dim=0)
signs = torch.sign(u[max_abs_cols, range(u.shape[1])])
else:
max_abs_rows = torch.argmax(torch.abs(v), dim=1)
signs = torch.sign(v[range(v.shape[0]), max_abs_rows])
u *= signs[: u.shape[1]].view(1, -1)
v *= signs.view(-1, 1)
return u, v
def fit(self, X, check_input=True):
"""
Fits the model with data `X` using minibatches of size `batch_size`.
Args:
X (torch.Tensor): The input data tensor with shape (n_samples, n_features).
check_input (bool, optional): If True, validates the input. Defaults to True.
Returns:
IncrementalPCA: The fitted IPCA model.
"""
if check_input:
X = self._validate_data(X)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size = 5 * n_features
for batch in self.gen_batches(n_samples, self.batch_size, min_batch_size=self.n_components or 0):
self.partial_fit(X[batch], check_input=False)
return self
def partial_fit(self, X, check_input=True):
"""
Incrementally fits the model with batch data `X`.
Args:
X (torch.Tensor): The batch input data tensor with shape (n_samples, n_features).
check_input (bool, optional): If True, validates the input. Defaults to True.
Returns:
IncrementalPCA: The updated IPCA model after processing the batch.
"""
first_pass = not hasattr(self, "components_")
if check_input:
X = self._validate_data(X)
n_samples, n_features = X.shape
# Initialize attributes to avoid errors during the first call to partial_fit
if first_pass:
self.mean_ = None # Will be initialized properly in _incremental_mean_and_var based on data dimensions
self.var_ = None # Will be initialized properly in _incremental_mean_and_var based on data dimensions
self.n_samples_seen_ = torch.tensor([0], device=X.device)
self.n_features_ = n_features
if not self.n_components:
self.n_components = min(n_samples, n_features)
if n_features != self.n_features_:
raise ValueError(
"Number of features of the new batch does not match the number of features of the first batch."
)
col_mean, col_var, n_total_samples = self._incremental_mean_and_var(
X, self.mean_, self.var_, self.n_samples_seen_
)
if first_pass:
X -= col_mean
else:
col_batch_mean = torch.mean(X, dim=0)
X -= col_batch_mean
mean_correction_factor = torch.sqrt((self.n_samples_seen_.double() / n_total_samples) * n_samples)
mean_correction = mean_correction_factor * (self.mean_ - col_batch_mean)
X = torch.vstack(
(
self.singular_values_.view((-1, 1)) * self.components_,
X,
mean_correction,
)
)
if self.lowrank:
U, S, Vt = self._svd_fn_lowrank(X)
else:
U, S, Vt = self._svd_fn_full(X)
U, Vt = self._svd_flip(U, Vt, u_based_decision=False)
explained_variance = S**2 / (n_total_samples - 1)
explained_variance_ratio = S**2 / torch.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[: self.n_components]
self.singular_values_ = S[: self.n_components]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components]
self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components]
if self.n_components not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components :].mean()
else:
self.noise_variance_ = torch.tensor(0.0, device=X.device)
return self
def transform(self, X) -> torch.Tensor:
"""
Applies dimensionality reduction to `X`.
The input data `X` is projected on the first principal components previously extracted from a training set.
Args:
X (torch.Tensor): New data tensor with shape (n_samples, n_features) to be transformed.
Returns:
torch.Tensor: Transformed data tensor with shape (n_samples, n_components).
"""
X = X - self.mean_
return torch.mm(X.double(), self.components_.T).to(X.dtype)
@staticmethod
def gen_batches(n: int, batch_size: int, min_batch_size: int = 0):
"""Generator to create slices containing `batch_size` elements from 0 to `n`.
The last slice may contain less than `batch_size` elements, when `batch_size` does not divide `n`.
Args:
n (int): Size of the sequence.
batch_size (int): Number of elements in each batch.
min_batch_size (int, optional): Minimum number of elements in each batch. Defaults to 0.
Yields:
slice: A slice of `batch_size` elements.
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
if end + min_batch_size > n:
continue
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
| peft/src/peft/utils/incremental_pca.py/0 | {
"file_path": "peft/src/peft/utils/incremental_pca.py",
"repo_id": "peft",
"token_count": 6139
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Union
import pytest
import torch
from datasets import load_dataset
from torch.utils.data import Dataset
from tqdm import tqdm
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import CPTConfig, get_peft_model
TEMPLATE = {"input": "input: {}", "intra_seperator": " ", "output": "output: {}", "inter_seperator": "\n"}
MODEL_NAME = "hf-internal-testing/tiny-random-OPTForCausalLM"
MAX_INPUT_LENGTH = 1024
@pytest.fixture(scope="module")
def global_tokenizer():
"""Load the tokenizer fixture for the model."""
return AutoTokenizer.from_pretrained(MODEL_NAME, padding_side="right")
@pytest.fixture(scope="module")
def config_text():
"""Load the SST2 dataset and prepare it for testing."""
config = CPTConfig(
cpt_token_ids=[0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing
cpt_mask=[1, 1, 1, 1, 1, 1, 1, 1],
cpt_tokens_type_mask=[1, 2, 2, 2, 3, 3, 3, 4],
opt_weighted_loss_type="decay",
opt_loss_decay_factor=0.95,
opt_projection_epsilon=0.2,
opt_projection_format_epsilon=0.1,
tokenizer_name_or_path=MODEL_NAME,
)
return config
@pytest.fixture(scope="module")
def config_random():
"""Load the SST2 dataset and prepare it for testing."""
config = CPTConfig(
opt_weighted_loss_type="decay",
opt_loss_decay_factor=0.95,
opt_projection_epsilon=0.2,
opt_projection_format_epsilon=0.1,
tokenizer_name_or_path=MODEL_NAME,
)
return config
@pytest.fixture(scope="module")
def sst_data():
"""Load the SST2 dataset and prepare it for testing."""
data = load_dataset("glue", "sst2")
def add_string_labels(example):
if example["label"] == 0:
example["label_text"] = "negative"
elif example["label"] == 1:
example["label_text"] = "positive"
return example
train_dataset = data["train"].select(range(4)).map(add_string_labels)
test_dataset = data["validation"].select(range(10)).map(add_string_labels)
return {"train": train_dataset, "test": test_dataset}
@pytest.fixture(scope="module")
def collator(global_tokenizer):
class CPTDataCollatorForLanguageModeling(DataCollatorForLanguageModeling):
def __init__(self, tokenizer, training=True, mlm=False):
super().__init__(tokenizer, mlm=mlm)
self.training = training
self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # mk check why needed
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
# Handle dict or lists with proper padding and conversion to tensor.
list_sample_mask = []
for i in range(len(examples)):
if "sample_mask" in examples[i].keys():
list_sample_mask.append(examples[i].pop("sample_mask"))
max_len = max(len(ex["input_ids"]) for ex in examples)
def pad_sequence(sequence, max_len, pad_value=0):
return sequence + [pad_value] * (max_len - len(sequence))
input_ids = torch.tensor([pad_sequence(ex["input_ids"], max_len) for ex in examples])
attention_mask = torch.tensor([pad_sequence(ex["attention_mask"], max_len) for ex in examples])
input_type_mask = torch.tensor([pad_sequence(ex["input_type_mask"], max_len) for ex in examples])
batch = {"input_ids": input_ids, "attention_mask": attention_mask, "input_type_mask": input_type_mask}
tensor_sample_mask = batch["input_ids"].clone().long()
tensor_sample_mask[:, :] = 0
for i in range(len(list_sample_mask)):
tensor_sample_mask[i, : len(list_sample_mask[i])] = list_sample_mask[i]
batch["labels"] = batch["input_ids"].clone()
if not self.training:
batch["sample_mask"] = tensor_sample_mask
return batch
collator = CPTDataCollatorForLanguageModeling(global_tokenizer, training=True, mlm=False)
return collator
def dataset(data, tokenizer):
class CPTDataset(Dataset):
def __init__(self, samples, tokenizer, template, max_length=MAX_INPUT_LENGTH):
self.template = template
self.tokenizer = tokenizer
self.max_length = max_length
self.attention_mask = []
self.input_ids = []
self.input_type_mask = []
self.inter_seperator_ids = self._get_input_ids(template["inter_seperator"])
for sample_i in tqdm(samples):
input_text, label = sample_i["sentence"], sample_i["label_text"]
input_ids, attention_mask, input_type_mask = self.preprocess_sentence(input_text, label)
self.input_ids.append(input_ids)
self.attention_mask.append(attention_mask)
self.input_type_mask.append(input_type_mask)
def _get_input_ids(self, text):
return self.tokenizer(text, add_special_tokens=False)["input_ids"]
def preprocess_sentence(self, input_text, label):
input_template_part_1_text, input_template_part_2_text = self.template["input"].split("{}")
input_template_tokenized_part1 = self._get_input_ids(input_template_part_1_text)
input_tokenized = self._get_input_ids(input_text)
input_template_tokenized_part2 = self._get_input_ids(input_template_part_2_text)
sep_tokenized = self._get_input_ids(self.template["intra_seperator"])
label_template_part_1, label_template_part_2 = self.template["output"].split("{}")
label_template_part1_tokenized = self._get_input_ids(label_template_part_1)
label_tokenized = self._get_input_ids(label)
label_template_part2_tokenized = self._get_input_ids(label_template_part_2)
eos = [self.tokenizer.eos_token_id] if self.tokenizer.eos_token_id is not None else []
input_ids = (
input_template_tokenized_part1
+ input_tokenized
+ input_template_tokenized_part2
+ sep_tokenized
+ label_template_part1_tokenized
+ label_tokenized
+ label_template_part2_tokenized
+ eos
)
# determine label tokens, to calculate loss only over them when labels_loss == True
attention_mask = [1] * len(input_ids)
input_type_mask = (
[1] * len(input_template_tokenized_part1)
+ [2] * len(input_tokenized)
+ [1] * len(input_template_tokenized_part2)
+ [0] * len(sep_tokenized)
+ [3] * len(label_template_part1_tokenized)
+ [4] * len(label_tokenized)
+ [3] * len(label_template_part2_tokenized)
+ [0] * len(eos)
)
assert len(input_type_mask) == len(input_ids) == len(attention_mask)
return input_ids, attention_mask, input_type_mask
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
return {
"input_ids": self.input_ids[idx],
"attention_mask": self.attention_mask[idx],
"input_type_mask": self.input_type_mask[idx],
}
dataset = CPTDataset(data, tokenizer, TEMPLATE)
return dataset
def test_model_initialization_text(global_tokenizer, config_text):
"""Test model loading and PEFT model initialization."""
base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
model = get_peft_model(base_model, config_text)
assert model is not None, "PEFT model initialization failed"
def test_model_initialization_random(global_tokenizer, config_random):
"""Test model loading and PEFT model initialization."""
base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
model = get_peft_model(base_model, config_random)
assert model is not None, "PEFT model initialization failed"
def test_model_training_random(sst_data, global_tokenizer, collator, config_random):
"""Perform a short training run to verify the model and data integration."""
base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
model = get_peft_model(base_model, config_random)
emb = model.prompt_encoder.default.embedding.weight.data.clone().detach()
training_args = TrainingArguments(
output_dir="./results",
per_device_train_batch_size=1,
num_train_epochs=2,
remove_unused_columns=False,
save_strategy="no",
logging_steps=1,
)
train_dataset = dataset(sst_data["train"], global_tokenizer)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=collator)
trainer.train()
# Verify that the embedding tensor remains unchanged (frozen)
assert torch.all(model.prompt_encoder.default.embedding.weight.data.clone().detach().cpu() == emb.cpu())
delta_emb = model.prompt_encoder.default.get_projection().clone().detach()
norm_delta = delta_emb.norm(dim=1).cpu()
epsilon = model.prompt_encoder.default.get_epsilon().cpu()
# Verify that the change in tokens is constrained to epsilon
assert torch.all(norm_delta <= epsilon)
def test_model_batch_training_text(sst_data, global_tokenizer, collator, config_text):
"""Perform a short training run to verify the model and data integration."""
base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
model = get_peft_model(base_model, config_text)
emb = model.prompt_encoder.default.embedding.weight.data.clone().detach()
training_args = TrainingArguments(
output_dir="./results",
per_device_train_batch_size=2,
num_train_epochs=2,
remove_unused_columns=False,
save_strategy="no",
logging_steps=1,
)
train_dataset = dataset(sst_data["train"], global_tokenizer)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=collator)
trainer.train()
# Verify that the embedding tensor remains unchanged (frozen)
assert torch.all(model.prompt_encoder.default.embedding.weight.data.clone().detach().cpu() == emb.cpu())
cpt_tokens_type_mask = torch.Tensor(config_text.cpt_tokens_type_mask).long()
non_label_idx = (cpt_tokens_type_mask == 1) | (cpt_tokens_type_mask == 2) | (cpt_tokens_type_mask == 3)
delta_emb = model.prompt_encoder.default.get_projection().clone().detach()
norm_delta = delta_emb.norm(dim=1).cpu()
epsilon = model.prompt_encoder.default.get_epsilon().cpu()
# Verify that the change in tokens is constrained to epsilon
assert torch.all(norm_delta <= epsilon)
# Ensure that label tokens remain unchanged
assert torch.all((norm_delta == 0) == (~non_label_idx))
| peft/tests/test_cpt.py/0 | {
"file_path": "peft/tests/test_cpt.py",
"repo_id": "peft",
"token_count": 4903
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import os
import platform
import re
import tempfile
import unittest
import pytest
import torch
from parameterized import parameterized
from torch import nn
from transformers import AutoModelForCausalLM
from peft import (
AdaLoraConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
PeftMixedModel,
PrefixTuningConfig,
get_peft_model,
)
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import infer_device
class SimpleNet(nn.Module):
def __init__(self, bias=True):
super().__init__()
# note: out_features must be > rank or else OFT will be an identity transform
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.lin1 = nn.Linear(20, 16, bias=bias)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
return X
def _param_name_func(testcase_func, param_num, params):
# for parameterized tests in TextMixedAdapterTypes
config0, config1 = params[0]
name0 = config0.__class__.__name__[: -len("Config")]
name1 = config1.__class__.__name__[: -len("Config")]
if name0 != name1:
return f"{testcase_func.__name__}_{param_num}_{name0}_{name1}"
return f"{testcase_func.__name__}_{param_num}_{name0}_x2"
class TestMixedAdapterTypes(unittest.TestCase):
torch_device = infer_device()
def _get_model(self, model_cls, peft_config=None, adapter_name=None, seed=0, mixed=True):
torch.manual_seed(0) # always use seed 0 for base model, seed for adapters may differ
base_model = model_cls().eval().to(self.torch_device)
if peft_config is None:
return base_model
torch.manual_seed(seed)
assert adapter_name is not None
peft_model = get_peft_model(base_model, peft_config, adapter_name=adapter_name, mixed=mixed)
return peft_model.eval().to(self.torch_device)
def _check_mixed_outputs(self, model_cls, config0, config1, input, *, is_commutative):
# This test checks different combinations of adapter0, adapter1, or combinations of the two, and whether
# outputs are the same/different, depending on context. If we pass is_commutative=True, it means that the order
# of adapters does not matter, and we expect the same output regardless of the order in which adapters are
# applied.
# We have to very careful with resetting the random seed each time it is used, otherwise the adapters may be
# initialized with different values, and the test will fail.
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
# base model
base_model = self._get_model(model_cls)
output_base = base_model(input)
assert torch.isfinite(output_base).all()
# adapter 0
peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
output_config0 = peft_model_0(input)
assert torch.isfinite(output_config0).all()
assert not torch.allclose(output_base, output_config0, atol=atol, rtol=rtol)
# adapter 1
peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
output_config1 = peft_model_1(input)
assert torch.isfinite(output_config1).all()
assert not torch.allclose(output_base, output_config1, atol=atol, rtol=rtol)
assert not torch.allclose(output_config0, output_config1, atol=atol, rtol=rtol)
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
# check the number of tuner layer types
tuner_layers = [mod for mod in peft_model_01.modules() if isinstance(mod, BaseTunerLayer)]
tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers}
if type(config0) is type(config1):
assert len(tuner_types) == 1
else:
assert len(tuner_types) == 2
assert peft_model_01.active_adapters == ["adapter0", "adapter1"]
assert torch.isfinite(output_mixed_01).all()
assert not torch.allclose(output_config0, output_mixed_01, atol=atol, rtol=rtol)
assert not torch.allclose(output_config1, output_mixed_01, atol=atol, rtol=rtol)
if is_commutative:
delta0 = output_config0 - output_base
delta1 = output_config1 - output_base
delta_mixed_01 = output_mixed_01 - output_base
assert torch.allclose((delta0 + delta1), delta_mixed_01, atol=atol, rtol=rtol)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
# check the number of tuner layer types
tuner_layers = [mod for mod in peft_model_10.modules() if isinstance(mod, BaseTunerLayer)]
tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers}
if type(config0) is type(config1):
assert len(tuner_types) == 1
else:
assert len(tuner_types) == 2
assert peft_model_10.active_adapters == ["adapter1", "adapter0"]
assert torch.isfinite(output_mixed_10).all()
assert not torch.allclose(output_config0, output_mixed_10, atol=atol, rtol=rtol)
assert not torch.allclose(output_config1, output_mixed_10, atol=atol, rtol=rtol)
if is_commutative:
assert torch.allclose(output_mixed_01, output_mixed_10, atol=atol, rtol=rtol)
# turn around the order of the adapters of the 0 + 1 mixed model, should behave like the 0 + 1 mixed model
peft_model_10.set_adapter(["adapter0", "adapter1"])
output_mixed_reversed = peft_model_10(input)
# check the number of tuner layer types
tuner_layers = [mod for mod in peft_model_10.modules() if isinstance(mod, BaseTunerLayer)]
tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers}
if type(config0) is type(config1):
assert len(tuner_types) == 1
else:
assert len(tuner_types) == 2
assert peft_model_10.active_adapters == ["adapter0", "adapter1"]
assert torch.isfinite(output_mixed_reversed).all()
assert not torch.allclose(output_mixed_reversed, output_config0, atol=atol, rtol=rtol)
assert not torch.allclose(output_mixed_reversed, output_config1, atol=atol, rtol=rtol)
if is_commutative:
assert torch.allclose(output_mixed_reversed, output_mixed_01, atol=atol, rtol=rtol)
assert torch.allclose(output_mixed_reversed, output_mixed_10, atol=atol, rtol=rtol)
def _check_merging(self, model_cls, config0, config1, input):
# Ensure that when merging mixed adapters, the result is the same as when applying the adapters separately.
# Merging requires a bit higher tolerance for some adapters, which can also vary depending on CPU vs GPU.
atol = 1e-4
rtol = 1e-4
seed0 = 0
seed1 = 1
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
model_merged_01 = peft_model_01.merge_and_unload()
output_merged_01 = model_merged_01(input)
assert torch.allclose(output_mixed_01, output_merged_01, atol=atol, rtol=rtol)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
model_merged_10 = peft_model_10.merge_and_unload()
output_merged_10 = model_merged_10(input)
assert torch.allclose(output_mixed_10, output_merged_10, atol=atol, rtol=rtol)
def _check_unload(self, model_cls, config0, config1, input):
# Ensure that we can unload the base model without merging
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
base_model = self._get_model(model_cls)
output_base = base_model(input)
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed = peft_model_01(input)
# unload
model_unloaded = peft_model_01.unload()
output_unloaded = model_unloaded(input)
assert not torch.allclose(output_mixed, output_unloaded, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_unloaded, atol=atol, rtol=rtol)
def _check_disable(self, model_cls, config0, config1, input):
# Ensure that we can disable adapters
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
# base model
base_model = self._get_model(model_cls)
output_base = base_model(input)
# adapter 0
peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
output_config0 = peft_model_0(input)
with peft_model_0.disable_adapter():
output_disabled0 = peft_model_0(input)
assert not torch.allclose(output_base, output_config0, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled0, atol=atol, rtol=rtol)
# adapter 1
peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
output_config1 = peft_model_1(input)
with peft_model_1.disable_adapter():
output_disabled1 = peft_model_1(input)
assert not torch.allclose(output_base, output_config1, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled1, atol=atol, rtol=rtol)
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
with peft_model_01.disable_adapter():
output_disabled01 = peft_model_01(input)
assert not torch.allclose(output_base, output_mixed_01, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled01, atol=atol, rtol=rtol)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
with peft_model_10.disable_adapter():
output_disabled10 = peft_model_10(input)
assert not torch.allclose(output_base, output_mixed_10, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled10, atol=atol, rtol=rtol)
def _check_loading(self, model_cls, config0, config1, input, *, is_commutative):
# Check that we can load two adapters into the same model
# Note that we save the adapters using a normal PeftModel because PeftMixModel doesn't support saving yet
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
with tempfile.TemporaryDirectory() as tmp_dirname:
# SAVING
# adapter 0: note that we set mixed=False because mixed models don't support saving (yet)
peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0, mixed=False)
output_config0 = peft_model_0(input)
peft_model_0.save_pretrained(os.path.join(tmp_dirname, "adapter0"))
# adapter 1: note that we set mixed=False because mixed models don't support saving (yet)
peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1, mixed=False)
output_config1 = peft_model_1(input)
peft_model_1.save_pretrained(os.path.join(tmp_dirname, "adapter1"))
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
# LOADING
# adapter 0
base_model = self._get_model(model_cls)
# Notes:
# Path is tmp_dirname/adapter0/adapter0 because non-default adapters are saved in a subfolder.
# As a sanity check, we should set a completely different seed here. That way, we ensure that the the
# weights are not just randomly initialized exactly to the same values as before.
torch.manual_seed(123456)
peft_model_loaded0 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0"
)
output_loaded0 = peft_model_loaded0(input)
assert torch.allclose(output_config0, output_loaded0, atol=atol, rtol=rtol)
# adapter 1
base_model = self._get_model(model_cls)
torch.manual_seed(654321) # setting a completely different seed here should not affect the result
peft_model_loaded1 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1"
)
output_loaded1 = peft_model_loaded1(input)
assert torch.allclose(output_config1, output_loaded1, atol=atol, rtol=rtol)
# adapter 0 + 1
base_model = self._get_model(model_cls)
torch.manual_seed(97531) # setting a completely different seed here should not affect the result
peft_model_loaded_01 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0"
)
peft_model_loaded_01.load_adapter(os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1")
# at this point, "adapter0" should still be active
assert peft_model_loaded_01.active_adapters == ["adapter0"]
output_loaded01_0 = peft_model_loaded_01(input)
assert torch.allclose(output_config0, output_loaded01_0, atol=atol, rtol=rtol)
# activate adapter1
peft_model_loaded_01.set_adapter(["adapter1"])
assert peft_model_loaded_01.active_adapters == ["adapter1"]
output_loaded01_1 = peft_model_loaded_01(input)
assert torch.allclose(output_config1, output_loaded01_1, atol=atol, rtol=rtol)
# activate both adapters
peft_model_loaded_01.set_adapter(["adapter0", "adapter1"])
output_loaded01 = peft_model_loaded_01(input)
assert torch.allclose(output_mixed_01, output_loaded01, atol=atol, rtol=rtol)
# adapter 1 + 0
base_model = self._get_model(model_cls)
torch.manual_seed(445566) # setting a completely different seed here should not affect the result
peft_model_loaded_10 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1"
)
peft_model_loaded_10.load_adapter(os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0")
# at this point, "adapter1" should still be active
assert peft_model_loaded_10.active_adapters == ["adapter1"]
output_loaded10_1 = peft_model_loaded_10(input)
assert torch.allclose(output_config1, output_loaded10_1, atol=atol, rtol=rtol)
# activate adapter1
peft_model_loaded_10.set_adapter(["adapter0"])
assert peft_model_loaded_10.active_adapters == ["adapter0"]
output_loaded10_0 = peft_model_loaded_10(input)
assert torch.allclose(output_config0, output_loaded10_0, atol=atol, rtol=rtol)
# activate both adapters
peft_model_loaded_10.set_adapter(["adapter1", "adapter0"])
output_loaded10 = peft_model_loaded_10(input)
assert torch.allclose(output_mixed_10, output_loaded10, atol=atol, rtol=rtol)
if is_commutative:
assert torch.allclose(output_loaded01, output_loaded10, atol=atol, rtol=rtol)
assert torch.allclose(output_loaded10, output_mixed_01, atol=atol, rtol=rtol)
@parameterized.expand(
itertools.combinations(
[
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
LoHaConfig(target_modules=["lin0"], init_weights=False),
LoKrConfig(target_modules=["lin0"], init_weights=False),
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False, total_step=1),
],
r=2,
),
name_func=_param_name_func,
)
def test_target_first_layer(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=False)
@parameterized.expand(
itertools.combinations(
[
LoraConfig(target_modules=["lin1"], init_lora_weights=False),
LoHaConfig(target_modules=["lin1"], init_weights=False),
LoKrConfig(target_modules=["lin1"], init_weights=False),
AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False, total_step=1),
],
r=2,
),
name_func=_param_name_func,
)
def test_target_last_layer(self, config0, config1):
# We are targeting the last layer of the SimpleNet. Therefore, since the adapters only add their activations
# to the output, the results should be commutative. This would *not* work if the adapters do something more
# complex or if we target an earlier layer, because of the non-linearity would destroy the commutativity.
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=True)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=True)
@parameterized.expand(
itertools.combinations(
[
LoraConfig(init_lora_weights=False),
LoHaConfig(init_weights=False),
LoKrConfig(init_weights=False),
AdaLoraConfig(init_lora_weights=False, total_step=1),
],
r=2,
),
name_func=_param_name_func,
)
def test_target_different_layers(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
config0.target_modules = ["lin0"]
config1.target_modules = ["lin1"]
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config0, config1, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=False)
# same, but switch target_modules around
config0.target_modules = ["lin1"]
config1.target_modules = ["lin0"]
self._check_mixed_outputs(SimpleNet, config1, config0, input, is_commutative=False)
self._check_merging(SimpleNet, config1, config0, input)
self._check_unload(SimpleNet, config1, config0, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config1, config0, input, is_commutative=False)
@parameterized.expand(
[
(
LoraConfig(target_modules=["lin1"], init_lora_weights=False),
LoraConfig(target_modules=["lin1"], init_lora_weights=False),
),
(
LoHaConfig(target_modules=["lin1"], init_weights=False),
LoHaConfig(target_modules=["lin1"], init_weights=False),
),
(
LoKrConfig(target_modules=["lin1"], init_weights=False),
LoKrConfig(target_modules=["lin1"], init_weights=False),
),
(
AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False, total_step=1),
AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False, total_step=1),
),
],
name_func=_param_name_func,
)
def test_target_last_layer_same_type(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=True)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
@parameterized.expand(
[
(
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
),
(
LoHaConfig(target_modules=["lin0"], init_weights=False),
LoHaConfig(target_modules=["lin0"], init_weights=False),
),
(
LoKrConfig(target_modules=["lin0"], init_weights=False),
LoKrConfig(target_modules=["lin0"], init_weights=False),
),
(
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False, total_step=1),
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False, total_step=1),
),
],
name_func=_param_name_func,
)
def test_target_first_layer_same_type(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=False)
def test_deeply_nested(self):
# a somewhat absurdly nested model using different adapter types
if platform.system() == "Linux":
self.skipTest("This test fails but only on GitHub CI with Linux systems.")
atol = 1e-5
rtol = 1e-5
torch.manual_seed(0)
model = SimpleNet().eval().to(self.torch_device)
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
output_base = model(input)
config0 = LoraConfig(r=4, lora_alpha=4, target_modules=["lin0", "lin1"], init_lora_weights=False)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
config1 = LoHaConfig(r=4, alpha=4, target_modules=["lin0"], init_weights=False)
peft_model.add_adapter("adapter1", config1)
config2 = AdaLoraConfig(r=4, lora_alpha=4, target_modules=["lin1"], init_lora_weights=False, total_step=1)
peft_model.add_adapter("adapter2", config2)
config3 = LoKrConfig(r=4, alpha=4, target_modules=["lin0", "lin1"], init_weights=False)
peft_model.add_adapter("adapter3", config3)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"])
output_mixed = peft_model(input)
assert torch.isfinite(output_base).all()
assert not torch.allclose(output_base, output_mixed, atol=atol, rtol=rtol)
# test disabling all adapters
with peft_model.disable_adapter():
output_disabled = peft_model(input)
assert torch.isfinite(output_disabled).all()
assert torch.allclose(output_base, output_disabled, atol=atol, rtol=rtol)
assert not torch.allclose(output_mixed, output_disabled, atol=atol, rtol=rtol)
# merge and unload all adapters
model_copy = copy.deepcopy(peft_model)
model = model_copy.merge_and_unload()
output_merged = model(input)
assert torch.isfinite(output_merged).all()
assert torch.allclose(output_mixed, output_merged, atol=atol, rtol=rtol)
# merge and unload only adapter1 and adapter3
model_copy = copy.deepcopy(peft_model)
model_copy.set_adapter(["adapter1", "adapter3"])
output_13 = model_copy(input)
assert torch.isfinite(output_13).all()
assert not torch.allclose(output_mixed, output_13, atol=atol, rtol=rtol)
model_copy.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"])
model_merged_unloaded = model_copy.merge_and_unload(adapter_names=["adapter1", "adapter3"])
output_merged_13 = model_merged_unloaded(input)
assert torch.isfinite(output_merged_13).all()
assert torch.allclose(output_13, output_merged_13, atol=atol, rtol=rtol)
# test unloading
model_copy = copy.deepcopy(peft_model)
model_unloaded = model_copy.unload()
output_unloaded = model_unloaded(input)
assert torch.isfinite(output_unloaded).all()
assert torch.allclose(output_base, output_unloaded, atol=atol, rtol=rtol)
def test_delete_adapter(self):
atol = 1e-5
rtol = 1e-5
torch.manual_seed(0)
model = SimpleNet().eval().to(self.torch_device)
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
output_base = model(input)
# create adapter0
torch.manual_seed(0)
config0 = LoraConfig(r=4, lora_alpha=4, target_modules=["lin0", "lin1"], init_lora_weights=False)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
output_0 = peft_model(input)
assert not torch.allclose(output_base, output_0, atol=atol, rtol=rtol)
# add adapter1
torch.manual_seed(1)
config1 = LoHaConfig(r=4, alpha=4, target_modules=["lin0"], init_weights=False)
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
output_01 = peft_model(input)
assert not torch.allclose(output_base, output_01, atol=atol, rtol=rtol)
assert not torch.allclose(output_0, output_01, atol=atol, rtol=rtol)
# delete adapter1
peft_model.delete_adapter("adapter1")
assert peft_model.active_adapters == ["adapter0"]
output_deleted_1 = peft_model(input)
assert torch.allclose(output_0, output_deleted_1, atol=atol, rtol=rtol)
msg = re.escape("Adapter(s) ['adapter1'] not found, available adapters: ['adapter0']")
with pytest.raises(ValueError, match=msg):
peft_model.set_adapter(["adapter0", "adapter1"])
# re-add adapter1
torch.manual_seed(1)
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
output_01_readded = peft_model(input)
assert not torch.allclose(output_base, output_01_readded, atol=atol, rtol=rtol)
# same as above, but this time delete adapter0 first
torch.manual_seed(0)
model = SimpleNet().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
torch.manual_seed(1)
peft_model.add_adapter("adapter1", config1)
peft_model.delete_adapter("adapter0")
assert peft_model.active_adapters == ["adapter1"]
output_deleted_0 = peft_model(input)
assert not torch.allclose(output_deleted_0, output_base, atol=atol, rtol=rtol)
assert not torch.allclose(output_deleted_0, output_01, atol=atol, rtol=rtol)
msg = re.escape("Adapter(s) ['adapter0'] not found, available adapters: ['adapter1']")
with pytest.raises(ValueError, match=msg):
peft_model.set_adapter(["adapter0", "adapter1"])
peft_model.delete_adapter("adapter1")
assert peft_model.active_adapters == []
output_deleted_01 = peft_model(input)
assert torch.allclose(output_deleted_01, output_base, atol=atol, rtol=rtol)
def test_modules_to_save(self):
model = SimpleNet().eval().to(self.torch_device)
config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
# adding a second adapter with same modules_to_save is not allowed
# TODO: theoretically, we could allow this if it's the same target layer
config1 = LoHaConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model.add_adapter("adapter1", config1)
with pytest.raises(ValueError, match="Only one adapter can be set at a time for modules_to_save"):
peft_model.set_adapter(["adapter0", "adapter1"])
def test_get_nb_trainable_parameters(self):
model = SimpleNet().eval().to(self.torch_device)
params_base = sum(p.numel() for p in model.parameters())
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
trainable_params0, all_param0 = peft_model.get_nb_trainable_parameters()
params_lora = sum(p.numel() for n, p in model.named_parameters() if "adapter0" in n)
assert trainable_params0 == params_lora
assert all_param0 == (params_base + params_lora)
config1 = LoHaConfig(target_modules=["lin1"])
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
params_loha = sum(p.numel() for n, p in model.named_parameters() if "adapter1" in n)
trainable_params1, all_param1 = peft_model.get_nb_trainable_parameters()
assert trainable_params1 == (params_lora + params_loha)
assert all_param1 == ((params_base + params_lora) + params_loha)
config2 = AdaLoraConfig(target_modules=["lin0", "lin1"], total_step=1)
peft_model.add_adapter("adapter2", config2)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2"])
params_adalora = sum(p.numel() for n, p in model.named_parameters() if "adapter2" in n)
trainable_params2, all_param2 = peft_model.get_nb_trainable_parameters()
# remove 2 params because we need to exclude "ranknum" for AdaLora trainable params
assert trainable_params2 == (((params_lora + params_loha) + params_adalora) - 2)
assert all_param2 == (((params_base + params_lora) + params_loha) + params_adalora)
def test_incompatible_config_raises(self):
model = SimpleNet().eval().to(self.torch_device)
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
config1 = PrefixTuningConfig()
msg = "The provided `peft_type` 'PREFIX_TUNING' is not compatible with the `PeftMixedModel`."
with pytest.raises(ValueError, match=msg):
peft_model.add_adapter("adapter1", config1)
def test_decoder_model(self):
# test a somewhat realistic model instead of a toy model
torch.manual_seed(0)
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
output_base = model.generate(**input_dict)
torch.manual_seed(0)
config0 = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
output0 = peft_model.generate(**input_dict)
assert torch.isfinite(output0).all()
assert not torch.allclose(output_base, output0)
torch.manual_seed(1)
config1 = LoHaConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False)
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
output1 = peft_model.generate(**input_dict)
assert torch.isfinite(output1).all()
assert not torch.allclose(output0, output1)
torch.manual_seed(2)
config2 = AdaLoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, total_step=1)
peft_model.add_adapter("adapter2", config2)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2"])
output2 = peft_model.generate(**input_dict)
assert torch.isfinite(output2).all()
assert not torch.allclose(output1, output2)
torch.manual_seed(3)
config3 = LoKrConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False)
peft_model.add_adapter("adapter3", config3)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"])
output3 = peft_model.generate(**input_dict)
assert torch.isfinite(output3).all()
assert not torch.allclose(output2, output3)
torch.manual_seed(4)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"])
with peft_model.disable_adapter():
output_disabled = peft_model.generate(**input_dict)
assert torch.isfinite(output_disabled).all()
assert torch.allclose(output_base, output_disabled)
model_unloaded = peft_model.merge_and_unload()
output_unloaded = model_unloaded.generate(**input_dict)
assert torch.isfinite(output_unloaded).all()
with tempfile.TemporaryDirectory() as tmp_dir:
# save adapter0 (use normal PeftModel, because PeftMixedModel does not support saving)
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
torch.manual_seed(0)
peft_model = get_peft_model(model, config0, "adapter0")
output0_save = peft_model(**input_dict).logits
assert torch.isfinite(output0_save).all()
peft_model.save_pretrained(tmp_dir)
# save adapter1
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
torch.manual_seed(1)
peft_model = get_peft_model(model, config1, "adapter1")
output1_save = peft_model(**input_dict).logits
assert torch.isfinite(output1_save).all()
peft_model.save_pretrained(tmp_dir)
# load adapter0 and adapter1
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
peft_model = PeftMixedModel.from_pretrained(model, os.path.join(tmp_dir, "adapter0"), "adapter0")
peft_model.load_adapter(os.path.join(tmp_dir, "adapter1"), "adapter1")
peft_model.set_adapter(["adapter0", "adapter1"])
output01_loaded = peft_model(**input_dict).logits
atol, rtol = 1e-3, 1e-3
assert torch.isfinite(output01_loaded).all()
assert not torch.allclose(output0_save, output01_loaded, atol=atol, rtol=rtol)
assert not torch.allclose(output1_save, output01_loaded, atol=atol, rtol=rtol)
| peft/tests/test_mixed.py/0 | {
"file_path": "peft/tests/test_mixed.py",
"repo_id": "peft",
"token_count": 16988
} |
# Deep Layer Aggregation
Extending “shallow” skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks.
IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('dla102', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `dla102`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('dla102', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{yu2019deep,
title={Deep Layer Aggregation},
author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell},
year={2019},
eprint={1707.06484},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: DLA
Paper:
Title: Deep Layer Aggregation
URL: https://paperswithcode.com/paper/deep-layer-aggregation
Models:
- Name: dla102
In Collection: DLA
Metadata:
FLOPs: 7192952808
Parameters: 33270000
File Size: 135290579
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L410
Weights: http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.03%
Top 5 Accuracy: 93.95%
- Name: dla102x
In Collection: DLA
Metadata:
FLOPs: 5886821352
Parameters: 26310000
File Size: 107552695
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L418
Weights: http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.51%
Top 5 Accuracy: 94.23%
- Name: dla102x2
In Collection: DLA
Metadata:
FLOPs: 9343847400
Parameters: 41280000
File Size: 167645295
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x2
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L426
Weights: http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.44%
Top 5 Accuracy: 94.65%
- Name: dla169
In Collection: DLA
Metadata:
FLOPs: 11598004200
Parameters: 53390000
File Size: 216547113
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla169
LR: 0.1
Epochs: 120
Layers: 169
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L434
Weights: http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.69%
Top 5 Accuracy: 94.33%
- Name: dla34
In Collection: DLA
Metadata:
FLOPs: 3070105576
Parameters: 15740000
File Size: 63228658
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla34
LR: 0.1
Epochs: 120
Layers: 32
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L362
Weights: http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.62%
Top 5 Accuracy: 92.06%
- Name: dla46_c
In Collection: DLA
Metadata:
FLOPs: 583277288
Parameters: 1300000
File Size: 5307963
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L369
Weights: http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 64.87%
Top 5 Accuracy: 86.29%
- Name: dla46x_c
In Collection: DLA
Metadata:
FLOPs: 544052200
Parameters: 1070000
File Size: 4387641
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46x_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L378
Weights: http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.98%
Top 5 Accuracy: 86.99%
- Name: dla60
In Collection: DLA
Metadata:
FLOPs: 4256251880
Parameters: 22040000
File Size: 89560235
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60
LR: 0.1
Epochs: 120
Layers: 60
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L394
Weights: http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.04%
Top 5 Accuracy: 93.32%
- Name: dla60_res2net
In Collection: DLA
Metadata:
FLOPs: 4147578504
Parameters: 20850000
File Size: 84886593
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2net
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L346
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.46%
Top 5 Accuracy: 94.21%
- Name: dla60_res2next
In Collection: DLA
Metadata:
FLOPs: 3485335272
Parameters: 17030000
File Size: 69639245
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2next
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L354
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.44%
Top 5 Accuracy: 94.16%
- Name: dla60x
In Collection: DLA
Metadata:
FLOPs: 3544204264
Parameters: 17350000
File Size: 70883139
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L402
Weights: http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.25%
Top 5 Accuracy: 94.02%
- Name: dla60x_c
In Collection: DLA
Metadata:
FLOPs: 593325032
Parameters: 1320000
File Size: 5454396
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x_c
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L386
Weights: http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.91%
Top 5 Accuracy: 88.42%
--> | pytorch-image-models/hfdocs/source/models/dla.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/dla.mdx",
"repo_id": "pytorch-image-models",
"token_count": 6760
} |
# Inception ResNet v2
**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('inception_resnet_v2', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{szegedy2016inceptionv4,
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
year={2016},
eprint={1602.07261},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Inception ResNet v2
Paper:
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
Models:
- Name: inception_resnet_v2
In Collection: Inception ResNet v2
Metadata:
FLOPs: 16959133120
Parameters: 55850000
File Size: 223774238
Architecture:
- Average Pooling
- Dropout
- Inception-ResNet-v2 Reduction-B
- Inception-ResNet-v2-A
- Inception-ResNet-v2-B
- Inception-ResNet-v2-C
- Reduction-A
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 20x NVIDIA Kepler GPUs
ID: inception_resnet_v2
LR: 0.045
Dropout: 0.2
Crop Pct: '0.897'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.95%
Top 5 Accuracy: 17.29%
--> | pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1684
} |
# Res2NeXt
**Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('res2next50', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `res2next50`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('res2next50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{Gao_2021,
title={Res2Net: A New Multi-Scale Backbone Architecture},
volume={43},
ISSN={1939-3539},
url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
DOI={10.1109/tpami.2019.2938758},
number={2},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
year={2021},
month={Feb},
pages={652–662}
}
```
<!--
Type: model-index
Collections:
- Name: Res2NeXt
Paper:
Title: 'Res2Net: A New Multi-scale Backbone Architecture'
URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
Models:
- Name: res2next50
In Collection: Res2NeXt
Metadata:
FLOPs: 5396798208
Parameters: 24670000
File Size: 99019592
Architecture:
- Batch Normalization
- Convolution
- Global Average Pooling
- ReLU
- Res2NeXt Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x Titan Xp GPUs
ID: res2next50
LR: 0.1
Epochs: 100
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L207
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.24%
Top 5 Accuracy: 93.91%
--> | pytorch-image-models/hfdocs/source/models/res2next.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/res2next.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1713
} |
# (Tensorflow) EfficientNet Lite
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2).
EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation).
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_efficientnet_lite0', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_efficientnet_lite0`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_efficientnet_lite0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2020efficientnet,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: TF EfficientNet Lite
Paper:
Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
Models:
- Name: tf_efficientnet_lite0
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 488052032
Parameters: 4650000
File Size: 18820223
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite0
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1596
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.83%
Top 5 Accuracy: 92.17%
- Name: tf_efficientnet_lite1
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 773639520
Parameters: 5420000
File Size: 21939331
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite1
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1607
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.67%
Top 5 Accuracy: 93.24%
- Name: tf_efficientnet_lite2
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 1068494432
Parameters: 6090000
File Size: 24658687
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite2
Crop Pct: '0.89'
Image Size: '260'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1618
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.48%
Top 5 Accuracy: 93.75%
- Name: tf_efficientnet_lite3
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 2011534304
Parameters: 8199999
File Size: 33161413
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite3
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1629
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.83%
Top 5 Accuracy: 94.91%
- Name: tf_efficientnet_lite4
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 5164802912
Parameters: 13010000
File Size: 52558819
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite4
Crop Pct: '0.92'
Image Size: '380'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1640
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.54%
Top 5 Accuracy: 95.66%
-->
| pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3372
} |
#!/usr/bin/env python3
"""PyTorch Inference Script
An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import json
import logging
import os
import time
from contextlib import suppress
from functools import partial
from sys import maxsize
import numpy as np
import pandas as pd
import torch
from timm.data import create_dataset, create_loader, resolve_data_config, ImageNetInfo, infer_imagenet_subset
from timm.layers import apply_test_time_pool
from timm.models import create_model
from timm.utils import AverageMeter, setup_default_logging, set_jit_fuser, ParseKwargs
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_FMT_EXT = {
'json': '.json',
'json-record': '.json',
'json-split': '.json',
'parquet': '.parquet',
'csv': '.csv',
}
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('inference')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--model', '-m', metavar='MODEL', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--model-dtype', default=None, type=str,
help='Model dtype override (non-AMP) (default: float32)')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
parser.add_argument('--torchcompile-mode', type=str, default=None,
help="torch.compile mode (default: None).")
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-dir', type=str, default=None,
help='folder for output results')
parser.add_argument('--results-file', type=str, default=None,
help='results filename (relative to results-dir)')
parser.add_argument('--results-format', type=str, nargs='+', default=['csv'],
help='results format (one of "csv", "json", "json-split", "parquet")')
parser.add_argument('--results-separate-col', action='store_true', default=False,
help='separate output columns per result index.')
parser.add_argument('--topk', default=1, type=int,
metavar='N', help='Top-k to output to CSV')
parser.add_argument('--fullname', action='store_true', default=False,
help='use full sample name in output (not just basename).')
parser.add_argument('--filename-col', type=str, default='filename',
help='name for filename / sample name column')
parser.add_argument('--index-col', type=str, default='index',
help='name for output indices column(s)')
parser.add_argument('--label-col', type=str, default='label',
help='name for output indices column(s)')
parser.add_argument('--output-col', type=str, default=None,
help='name for logit/probs output column(s)')
parser.add_argument('--output-type', type=str, default='prob',
help='output type colum ("prob" for probabilities, "logit" for raw logits)')
parser.add_argument('--label-type', type=str, default='description',
help='type of label to output, one of "none", "name", "description", "detailed"')
parser.add_argument('--include-index', action='store_true', default=False,
help='include the class index in results')
parser.add_argument('--exclude-output', action='store_true', default=False,
help='exclude logits/probs from results, just indices. topk must be set !=0.')
parser.add_argument('--no-console-results', action='store_true', default=False,
help='disable printing the inference results to the console')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
model_dtype = None
if args.model_dtype:
assert args.model_dtype in ('float32', 'float16', 'bfloat16')
model_dtype = getattr(torch, args.model_dtype)
# resolve AMP arguments based on PyTorch / Apex availability
amp_autocast = suppress
if args.amp:
assert model_dtype is None or model_dtype == torch.float32, 'float32 model dtype must be used with AMP'
assert args.amp_dtype in ('float16', 'bfloat16')
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Running inference in mixed precision with native PyTorch AMP.')
else:
_logger.info('Running inference in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=in_chans,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
_logger.info(
f'Model {args.model} created, param count: {sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device=device, dtype=model_dtype)
model.eval()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile, mode=args.torchcompile_mode)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
root_dir = args.data or args.data_dir
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
class_map=args.class_map,
)
if test_time_pool:
data_config['crop_pct'] = 1.0
workers = 1 if 'tfds' in args.dataset or 'wds' in args.dataset else args.workers
loader = create_loader(
dataset,
batch_size=args.batch_size,
use_prefetcher=True,
num_workers=workers,
device=device,
img_dtype=model_dtype or torch.float32,
**data_config,
)
to_label = None
if args.label_type in ('name', 'description', 'detail'):
imagenet_subset = infer_imagenet_subset(model)
if imagenet_subset is not None:
dataset_info = ImageNetInfo(imagenet_subset)
if args.label_type == 'name':
to_label = lambda x: dataset_info.index_to_label_name(x)
elif args.label_type == 'detail':
to_label = lambda x: dataset_info.index_to_description(x, detailed=True)
else:
to_label = lambda x: dataset_info.index_to_description(x)
to_label = np.vectorize(to_label)
else:
_logger.error("Cannot deduce ImageNet subset from model, no labelling will be performed.")
top_k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
all_indices = []
all_labels = []
all_outputs = []
use_probs = args.output_type == 'prob'
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
with amp_autocast():
output = model(input)
if use_probs:
output = output.softmax(-1)
if top_k:
output, indices = output.topk(top_k)
np_indices = indices.cpu().numpy()
if args.include_index:
all_indices.append(np_indices)
if to_label is not None:
np_labels = to_label(np_indices)
all_labels.append(np_labels)
all_outputs.append(output.float().cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
all_indices = np.concatenate(all_indices, axis=0) if all_indices else None
all_labels = np.concatenate(all_labels, axis=0) if all_labels else None
all_outputs = np.concatenate(all_outputs, axis=0).astype(np.float32)
filenames = loader.dataset.filenames(basename=not args.fullname)
output_col = args.output_col or ('prob' if use_probs else 'logit')
data_dict = {args.filename_col: filenames}
if args.results_separate_col and all_outputs.shape[-1] > 1:
if all_indices is not None:
for i in range(all_indices.shape[-1]):
data_dict[f'{args.index_col}_{i}'] = all_indices[:, i]
if all_labels is not None:
for i in range(all_labels.shape[-1]):
data_dict[f'{args.label_col}_{i}'] = all_labels[:, i]
for i in range(all_outputs.shape[-1]):
data_dict[f'{output_col}_{i}'] = all_outputs[:, i]
else:
if all_indices is not None:
if all_indices.shape[-1] == 1:
all_indices = all_indices.squeeze(-1)
data_dict[args.index_col] = list(all_indices)
if all_labels is not None:
if all_labels.shape[-1] == 1:
all_labels = all_labels.squeeze(-1)
data_dict[args.label_col] = list(all_labels)
if all_outputs.shape[-1] == 1:
all_outputs = all_outputs.squeeze(-1)
data_dict[output_col] = list(all_outputs)
df = pd.DataFrame(data=data_dict)
results_filename = args.results_file
if results_filename:
filename_no_ext, ext = os.path.splitext(results_filename)
if ext and ext in _FMT_EXT.values():
# if filename provided with one of expected ext,
# remove it as it will be added back
results_filename = filename_no_ext
else:
# base default filename on model name + img-size
img_size = data_config["input_size"][1]
results_filename = f'{args.model}-{img_size}'
if args.results_dir:
results_filename = os.path.join(args.results_dir, results_filename)
for fmt in args.results_format:
save_results(df, results_filename, fmt)
if not args.no_console_results:
print(f'--result')
print(df.set_index(args.filename_col).to_json(orient='index', indent=4))
def save_results(df, results_filename, results_format='csv', filename_col='filename'):
np.set_printoptions(threshold=maxsize)
results_filename += _FMT_EXT[results_format]
if results_format == 'parquet':
df.set_index(filename_col).to_parquet(results_filename)
elif results_format == 'json':
df.set_index(filename_col).to_json(results_filename, indent=4, orient='index')
elif results_format == 'json-records':
df.to_json(results_filename, lines=True, orient='records')
elif results_format == 'json-split':
df.to_json(results_filename, indent=4, orient='split', index=False)
else:
df.to_csv(results_filename, index=False)
if __name__ == '__main__':
main()
| pytorch-image-models/inference.py/0 | {
"file_path": "pytorch-image-models/inference.py",
"repo_id": "pytorch-image-models",
"token_count": 7092
} |
""" Dataset Factory
Hacked together by / Copyright 2021, Ross Wightman
"""
import os
from typing import Optional
from torchvision.datasets import CIFAR100, CIFAR10, MNIST, KMNIST, FashionMNIST, ImageFolder
try:
from torchvision.datasets import Places365
has_places365 = True
except ImportError:
has_places365 = False
try:
from torchvision.datasets import INaturalist
has_inaturalist = True
except ImportError:
has_inaturalist = False
try:
from torchvision.datasets import QMNIST
has_qmnist = True
except ImportError:
has_qmnist = False
try:
from torchvision.datasets import ImageNet
has_imagenet = True
except ImportError:
has_imagenet = False
from .dataset import IterableImageDataset, ImageDataset
_TORCH_BASIC_DS = dict(
cifar10=CIFAR10,
cifar100=CIFAR100,
mnist=MNIST,
kmnist=KMNIST,
fashion_mnist=FashionMNIST,
)
_TRAIN_SYNONYM = dict(train=None, training=None)
_EVAL_SYNONYM = dict(val=None, valid=None, validation=None, eval=None, evaluation=None)
def _search_split(root, split):
# look for sub-folder with name of split in root and use that if it exists
split_name = split.split('[')[0]
try_root = os.path.join(root, split_name)
if os.path.exists(try_root):
return try_root
def _try(syn):
for s in syn:
try_root = os.path.join(root, s)
if os.path.exists(try_root):
return try_root
return root
if split_name in _TRAIN_SYNONYM:
root = _try(_TRAIN_SYNONYM)
elif split_name in _EVAL_SYNONYM:
root = _try(_EVAL_SYNONYM)
return root
def create_dataset(
name: str,
root: Optional[str] = None,
split: str = 'validation',
search_split: bool = True,
class_map: dict = None,
load_bytes: bool = False,
is_training: bool = False,
download: bool = False,
batch_size: int = 1,
num_samples: Optional[int] = None,
seed: int = 42,
repeats: int = 0,
input_img_mode: str = 'RGB',
trust_remote_code: bool = False,
**kwargs,
):
""" Dataset factory method
In parentheses after each arg are the type of dataset supported for each arg, one of:
* Folder - default, timm folder (or tar) based ImageDataset
* Torch - torchvision based datasets
* HFDS - Hugging Face Datasets
* HFIDS - Hugging Face Datasets Iterable (streaming mode, with IterableDataset)
* TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset
* WDS - Webdataset
* All - any of the above
Args:
name: Dataset name, empty is okay for folder based datasets
root: Root folder of dataset (All)
split: Dataset split (All)
search_split: Search for split specific child fold from root so one can specify
`imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (Folder, Torch)
class_map: Specify class -> index mapping via text file or dict (Folder)
load_bytes: Load data, return images as undecoded bytes (Folder)
download: Download dataset if not present and supported (HFIDS, TFDS, Torch)
is_training: Create dataset in train mode, this is different from the split.
For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS, WDS, HFIDS)
batch_size: Batch size hint for iterable datasets (TFDS, WDS, HFIDS)
seed: Seed for iterable datasets (TFDS, WDS, HFIDS)
repeats: Dataset repeats per iteration i.e. epoch (TFDS, WDS, HFIDS)
input_img_mode: Input image color conversion mode e.g. 'RGB', 'L' (folder, TFDS, WDS, HFDS, HFIDS)
trust_remote_code: Trust remote code in Hugging Face Datasets if True (HFDS, HFIDS)
**kwargs: Other args to pass through to underlying Dataset and/or Reader classes
Returns:
Dataset object
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
name = name.lower()
if name.startswith('torch/'):
name = name.split('/', 2)[-1]
torch_kwargs = dict(root=root, download=download, **kwargs)
if name in _TORCH_BASIC_DS:
ds_class = _TORCH_BASIC_DS[name]
use_train = split in _TRAIN_SYNONYM
ds = ds_class(train=use_train, **torch_kwargs)
elif name == 'inaturalist' or name == 'inat':
assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist'
target_type = 'full'
split_split = split.split('/')
if len(split_split) > 1:
target_type = split_split[0].split('_')
if len(target_type) == 1:
target_type = target_type[0]
split = split_split[-1]
if split in _TRAIN_SYNONYM:
split = '2021_train'
elif split in _EVAL_SYNONYM:
split = '2021_valid'
ds = INaturalist(version=split, target_type=target_type, **torch_kwargs)
elif name == 'places365':
assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.'
if split in _TRAIN_SYNONYM:
split = 'train-standard'
elif split in _EVAL_SYNONYM:
split = 'val'
ds = Places365(split=split, **torch_kwargs)
elif name == 'qmnist':
assert has_qmnist, 'Please update to a newer PyTorch and torchvision for QMNIST dataset.'
use_train = split in _TRAIN_SYNONYM
ds = QMNIST(train=use_train, **torch_kwargs)
elif name == 'imagenet':
assert has_imagenet, 'Please update to a newer PyTorch and torchvision for ImageNet dataset.'
if split in _EVAL_SYNONYM:
split = 'val'
ds = ImageNet(split=split, **torch_kwargs)
elif name == 'image_folder' or name == 'folder':
# in case torchvision ImageFolder is preferred over timm ImageDataset for some reason
if search_split and os.path.isdir(root):
# look for split specific sub-folder in root
root = _search_split(root, split)
ds = ImageFolder(root, **kwargs)
else:
assert False, f"Unknown torchvision dataset {name}"
elif name.startswith('hfds/'):
# NOTE right now, HF datasets default arrow format is a random-access Dataset,
# There will be a IterableDataset variant too, TBD
ds = ImageDataset(
root,
reader=name,
split=split,
class_map=class_map,
input_img_mode=input_img_mode,
trust_remote_code=trust_remote_code,
**kwargs,
)
elif name.startswith('hfids/'):
ds = IterableImageDataset(
root,
reader=name,
split=split,
class_map=class_map,
is_training=is_training,
download=download,
batch_size=batch_size,
num_samples=num_samples,
repeats=repeats,
seed=seed,
input_img_mode=input_img_mode,
trust_remote_code=trust_remote_code,
**kwargs,
)
elif name.startswith('tfds/'):
ds = IterableImageDataset(
root,
reader=name,
split=split,
class_map=class_map,
is_training=is_training,
download=download,
batch_size=batch_size,
num_samples=num_samples,
repeats=repeats,
seed=seed,
input_img_mode=input_img_mode,
**kwargs
)
elif name.startswith('wds/'):
ds = IterableImageDataset(
root,
reader=name,
split=split,
class_map=class_map,
is_training=is_training,
batch_size=batch_size,
num_samples=num_samples,
repeats=repeats,
seed=seed,
input_img_mode=input_img_mode,
**kwargs
)
else:
# FIXME support more advance split cfg for ImageFolder/Tar datasets in the future
if search_split and os.path.isdir(root):
# look for split specific sub-folder in root
root = _search_split(root, split)
ds = ImageDataset(
root,
reader=name,
class_map=class_map,
load_bytes=load_bytes,
input_img_mode=input_img_mode,
**kwargs,
)
return ds
| pytorch-image-models/timm/data/dataset_factory.py/0 | {
"file_path": "pytorch-image-models/timm/data/dataset_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 4027
} |
""" A dataset reader that reads single tarfile based datasets
This reader can read datasets consisting if a single tarfile containing images.
I am planning to deprecated it in favour of ParerImageInTar.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import tarfile
from timm.utils.misc import natural_key
from .class_map import load_class_map
from .img_extensions import get_img_extensions
from .reader import Reader
def extract_tarinfo(tarfile, class_to_idx=None, sort=True):
extensions = get_img_extensions(as_set=True)
files = []
labels = []
for ti in tarfile.getmembers():
if not ti.isfile():
continue
dirname, basename = os.path.split(ti.path)
label = os.path.basename(dirname)
ext = os.path.splitext(basename)[1]
if ext.lower() in extensions:
files.append(ti)
labels.append(label)
if class_to_idx is None:
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx]
if sort:
tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path))
return tarinfo_and_targets, class_to_idx
class ReaderImageTar(Reader):
""" Single tarfile dataset where classes are mapped to folders within tar
NOTE: This class is being deprecated in favour of the more capable ReaderImageInTar that can
operate on folders of tars or tars in tars.
"""
def __init__(self, root, class_map=''):
super().__init__()
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
assert os.path.isfile(root)
self.root = root
with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later
self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx)
self.imgs = self.samples
self.tarfile = None # lazy init in __getitem__
def __getitem__(self, index):
if self.tarfile is None:
self.tarfile = tarfile.open(self.root)
tarinfo, target = self.samples[index]
fileobj = self.tarfile.extractfile(tarinfo)
return fileobj, target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0].name
if basename:
filename = os.path.basename(filename)
return filename
| pytorch-image-models/timm/data/readers/reader_image_tar.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader_image_tar.py",
"repo_id": "pytorch-image-models",
"token_count": 1071
} |
""" Bottleneck Self Attention (Bottleneck Transformers)
Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
@misc{2101.11605,
Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani},
Title = {Bottleneck Transformers for Visual Recognition},
Year = {2021},
}
Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
This impl is a WIP but given that it is based on the ref gist likely not too far off.
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from .helpers import to_2tuple, make_divisible
from .weight_init import trunc_normal_
from .trace_utils import _assert
def rel_logits_1d(q, rel_k, permute_mask: List[int]):
""" Compute relative logits along one dimension
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
Args:
q: (batch, heads, height, width, dim)
rel_k: (2 * width - 1, dim)
permute_mask: permute output dim according to this
"""
B, H, W, dim = q.shape
x = (q @ rel_k.transpose(-1, -2))
x = x.reshape(-1, W, 2 * W -1)
# pad to shift from relative to absolute indexing
x_pad = F.pad(x, [0, 1]).flatten(1)
x_pad = F.pad(x_pad, [0, W - 1])
# reshape and slice out the padded elements
x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1)
x = x_pad[:, :W, W - 1:]
# reshape and tile
x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1)
return x.permute(permute_mask)
class PosEmbedRel(nn.Module):
""" Relative Position Embedding
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
"""
def __init__(self, feat_size, dim_head, scale):
super().__init__()
self.height, self.width = to_2tuple(feat_size)
self.dim_head = dim_head
self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale)
self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale)
def forward(self, q):
B, HW, _ = q.shape
# relative logits in width dimension.
q = q.reshape(B, self.height, self.width, -1)
rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
# relative logits in height dimension.
q = q.transpose(1, 2)
rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
rel_logits = rel_logits_h + rel_logits_w
rel_logits = rel_logits.reshape(B, HW, HW)
return rel_logits
class BottleneckAttn(nn.Module):
""" Bottleneck Attention
Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
The internal dimensions of the attention module are controlled by the interaction of several arguments.
* the output dimension of the module is specified by dim_out, which falls back to input dim if not set
* the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
* the query and key (qk) dimensions are determined by
* num_heads * dim_head if dim_head is not None
* num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
* as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
Args:
dim (int): input dimension to the module
dim_out (int): output dimension of the module, same as dim if not set
stride (int): output stride of the module, avg pool used if stride == 2 (default: 1).
num_heads (int): parallel attention heads (default: 4)
dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
qkv_bias (bool): add bias to q, k, and v projections
scale_pos_embed (bool): scale the position embedding as well as Q @ K
"""
def __init__(
self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None,
qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False):
super().__init__()
assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required'
dim_out = dim_out or dim
assert dim_out % num_heads == 0
self.num_heads = num_heads
self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
self.dim_head_v = dim_out // self.num_heads
self.dim_out_qk = num_heads * self.dim_head_qk
self.dim_out_v = num_heads * self.dim_head_v
self.scale = self.dim_head_qk ** -0.5
self.scale_pos_embed = scale_pos_embed
self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias)
# NOTE I'm only supporting relative pos embedding for now
self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale)
self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity()
self.reset_parameters()
def reset_parameters(self):
trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in
trunc_normal_(self.pos_embed.height_rel, std=self.scale)
trunc_normal_(self.pos_embed.width_rel, std=self.scale)
def forward(self, x):
B, C, H, W = x.shape
_assert(H == self.pos_embed.height, '')
_assert(W == self.pos_embed.width, '')
x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W
# NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v
# So, this is more verbose than if heads were before qkv splits, but throughput is not impacted.
q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1)
q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2)
k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k
v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2)
if self.scale_pos_embed:
attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W
else:
attn = (q @ k) * self.scale + self.pos_embed(q)
attn = attn.softmax(dim=-1)
out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W
out = self.pool(out)
return out
| pytorch-image-models/timm/layers/bottleneck_attn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/bottleneck_attn.py",
"repo_id": "pytorch-image-models",
"token_count": 2907
} |
""" Filter Response Norm in PyTorch
Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
import torch.nn as nn
from .create_act import create_act_layer
from .trace_utils import _assert
def inv_instance_rms(x, eps: float = 1e-5):
rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype)
return rms.expand(x.shape)
class FilterResponseNormTlu2d(nn.Module):
def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_):
super(FilterResponseNormTlu2d, self).__init__()
self.apply_act = apply_act # apply activation (non-linearity)
self.rms = rms
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if self.tau is not None:
nn.init.zeros_(self.tau)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
x = x * inv_instance_rms(x, self.eps)
x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x
class FilterResponseNormAct2d(nn.Module):
def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_):
super(FilterResponseNormAct2d, self).__init__()
if act_layer is not None and apply_act:
self.act = create_act_layer(act_layer, inplace=inplace)
else:
self.act = nn.Identity()
self.rms = rms
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
x = x * inv_instance_rms(x, self.eps)
x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
return self.act(x)
| pytorch-image-models/timm/layers/filter_response_norm.py/0 | {
"file_path": "pytorch-image-models/timm/layers/filter_response_norm.py",
"repo_id": "pytorch-image-models",
"token_count": 1182
} |
from typing import Optional
import torch
from torch import nn
from torch import nn, Tensor
from torch.nn.modules.transformer import _get_activation_fn
def add_ml_decoder_head(model):
if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50
model.global_pool = nn.Identity()
del model.fc
num_classes = model.num_classes
num_features = model.num_features
model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features)
elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet
model.global_pool = nn.Identity()
del model.classifier
num_classes = model.num_classes
num_features = model.num_features
model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features)
elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head')
del model.head
num_classes = model.num_classes
num_features = model.num_features
model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features)
else:
print("Model code-writing is not aligned currently with ml-decoder")
exit(-1)
if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout
model.drop_rate = 0
return model
class TransformerDecoderLayerOptimal(nn.Module):
def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu",
layer_norm_eps=1e-5) -> None:
super(TransformerDecoderLayerOptimal, self).__init__()
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = torch.nn.functional.relu
super(TransformerDecoderLayerOptimal, self).__setstate__(state)
def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
tgt = tgt + self.dropout1(tgt)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
# class ExtrapClasses(object):
# def __init__(self, num_queries: int, group_size: int):
# self.num_queries = num_queries
# self.group_size = group_size
#
# def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap:
# torch.Tensor):
# # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size)
# h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups])
# w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size))
# out = (h * w).sum(dim=2) + class_embed_b
# out = out.view((h.shape[0], self.group_size * self.num_queries))
# return out
class MLDecoder(nn.Module):
def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048):
super(MLDecoder, self).__init__()
embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups
if embed_len_decoder > num_classes:
embed_len_decoder = num_classes
self.embed_len_decoder = embed_len_decoder
# switching to 768 initial embeddings
decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding
self.embed_standart = nn.Linear(initial_num_features, decoder_embedding)
# decoder
decoder_dropout = 0.1
num_layers_decoder = 1
dim_feedforward = 2048
layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding,
dim_feedforward=dim_feedforward, dropout=decoder_dropout)
self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder)
# non-learnable queries
self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding)
self.query_embed.requires_grad_(False)
# group fully-connected
self.num_classes = num_classes
self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999)
self.duplicate_pooling = torch.nn.Parameter(
torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor))
self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes))
torch.nn.init.xavier_normal_(self.duplicate_pooling)
torch.nn.init.constant_(self.duplicate_pooling_bias, 0)
def forward(self, x):
if len(x.shape) == 4: # [bs,2048, 7,7]
embedding_spatial = x.flatten(2).transpose(1, 2)
else: # [bs, 197,468]
embedding_spatial = x
embedding_spatial_786 = self.embed_standart(embedding_spatial)
embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True)
bs = embedding_spatial_786.shape[0]
query_embed = self.query_embed.weight
# tgt = query_embed.unsqueeze(1).repeat(1, bs, 1)
tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand
h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768]
h = h.transpose(0, 1)
out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype)
for i in range(self.embed_len_decoder): # group FC
h_i = h[:, i, :]
w_i = self.duplicate_pooling[i, :, :]
out_extrap[:, i, :] = torch.matmul(h_i, w_i)
h_out = out_extrap.flatten(1)[:, :self.num_classes]
h_out += self.duplicate_pooling_bias
logits = h_out
return logits
| pytorch-image-models/timm/layers/ml_decoder.py/0 | {
"file_path": "pytorch-image-models/timm/layers/ml_decoder.py",
"repo_id": "pytorch-image-models",
"token_count": 3048
} |
""" Split BatchNorm
A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
a separate BN layer. The first split is passed through the parent BN layers with weight/bias
keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
namespace.
This allows easily removing the auxiliary BN layers after training to efficiently
achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
'Disentangled Learning via An Auxiliary BN'
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
class SplitBatchNorm2d(torch.nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True, num_splits=2):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'
self.num_splits = num_splits
self.aux_bn = nn.ModuleList([
nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])
def forward(self, input: torch.Tensor):
if self.training: # aux BN only relevant while training
split_size = input.shape[0] // self.num_splits
assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits"
split_input = input.split(split_size)
x = [super().forward(split_input[0])]
for i, a in enumerate(self.aux_bn):
x.append(a(split_input[i + 1]))
return torch.cat(x, dim=0)
else:
return super().forward(input)
def convert_splitbn_model(module, num_splits=2):
"""
Recursively traverse module and its children to replace all instances of
``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.
Args:
module (torch.nn.Module): input module
num_splits: number of separate batchnorm layers to split input across
Example::
>>> # model is an instance of torch.nn.Module
>>> model = timm.models.convert_splitbn_model(model, num_splits=2)
"""
mod = module
if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
return module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
mod = SplitBatchNorm2d(
module.num_features, module.eps, module.momentum, module.affine,
module.track_running_stats, num_splits=num_splits)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
mod.num_batches_tracked = module.num_batches_tracked
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for aux in mod.aux_bn:
aux.running_mean = module.running_mean.clone()
aux.running_var = module.running_var.clone()
aux.num_batches_tracked = module.num_batches_tracked.clone()
if module.affine:
aux.weight.data = module.weight.data.clone().detach()
aux.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))
del module
return mod
| pytorch-image-models/timm/layers/split_batchnorm.py/0 | {
"file_path": "pytorch-image-models/timm/layers/split_batchnorm.py",
"repo_id": "pytorch-image-models",
"token_count": 1394
} |
import os
from pathlib import Path
from typing import Any, Dict, Optional, Union
from urllib.parse import urlsplit
from timm.layers import set_layer_config
from ._helpers import load_checkpoint
from ._hub import load_model_config_from_hf
from ._pretrained import PretrainedCfg
from ._registry import is_model, model_entrypoint, split_model_name_tag
__all__ = ['parse_model_name', 'safe_model_name', 'create_model']
def parse_model_name(model_name: str):
if model_name.startswith('hf_hub'):
# NOTE for backwards compat, deprecate hf_hub use
model_name = model_name.replace('hf_hub', 'hf-hub')
parsed = urlsplit(model_name)
assert parsed.scheme in ('', 'timm', 'hf-hub')
if parsed.scheme == 'hf-hub':
# FIXME may use fragment as revision, currently `@` in URI path
return parsed.scheme, parsed.path
else:
model_name = os.path.split(parsed.path)[-1]
return 'timm', model_name
def safe_model_name(model_name: str, remove_source: bool = True):
# return a filename / path safe model name
def make_safe(name):
return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_')
if remove_source:
model_name = parse_model_name(model_name)[-1]
return make_safe(model_name)
def create_model(
model_name: str,
pretrained: bool = False,
pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None,
pretrained_cfg_overlay: Optional[Dict[str, Any]] = None,
checkpoint_path: Optional[Union[str, Path]] = None,
cache_dir: Optional[Union[str, Path]] = None,
scriptable: Optional[bool] = None,
exportable: Optional[bool] = None,
no_jit: Optional[bool] = None,
**kwargs,
):
"""Create a model.
Lookup model's entrypoint function and pass relevant args to create a new model.
Tip:
**kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()``
and then the model class __init__(). kwargs values set to None are pruned before passing.
Args:
model_name: Name of model to instantiate.
pretrained: If set to `True`, load pretrained ImageNet-1k weights.
pretrained_cfg: Pass in an external pretrained_cfg for model.
pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these.
checkpoint_path: Path of checkpoint to load _after_ the model is initialized.
cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints.
scriptable: Set layer config so that model is jit scriptable (not working for all models yet).
exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet).
no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only).
Keyword Args:
drop_rate (float): Classifier dropout rate for training.
drop_path_rate (float): Stochastic depth drop rate for training.
global_pool (str): Classifier global pooling type.
Example:
```py
>>> from timm import create_model
>>> # Create a MobileNetV3-Large model with no pretrained weights.
>>> model = create_model('mobilenetv3_large_100')
>>> # Create a MobileNetV3-Large model with pretrained weights.
>>> model = create_model('mobilenetv3_large_100', pretrained=True)
>>> model.num_classes
1000
>>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes.
>>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10)
>>> model.num_classes
10
>>> # Create a Dinov2 small model with pretrained weights and save weights in a custom directory.
>>> model = create_model('vit_small_patch14_dinov2.lvd142m', pretrained=True, cache_dir="/data/my-models")
>>> # Data will be stored at `/data/my-models/models--timm--vit_small_patch14_dinov2.lvd142m/`
```
"""
# Parameters that aren't supported by all models or are intended to only override model defaults if set
# should default to None in command line args/cfg. Remove them if they are present and not set so that
# non-supporting models don't break and default args remain in effect.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
model_source, model_name = parse_model_name(model_name)
if model_source == 'hf-hub':
assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.'
# For model names specified in the form `hf-hub:path/architecture_name@revision`,
# load model weights + pretrained_cfg from Hugging Face hub.
pretrained_cfg, model_name, model_args = load_model_config_from_hf(
model_name,
cache_dir=cache_dir,
)
if model_args:
for k, v in model_args.items():
kwargs.setdefault(k, v)
else:
model_name, pretrained_tag = split_model_name_tag(model_name)
if pretrained_tag and not pretrained_cfg:
# a valid pretrained_cfg argument takes priority over tag in model name
pretrained_cfg = pretrained_tag
if not is_model(model_name):
raise RuntimeError('Unknown model (%s)' % model_name)
create_fn = model_entrypoint(model_name)
with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit):
model = create_fn(
pretrained=pretrained,
pretrained_cfg=pretrained_cfg,
pretrained_cfg_overlay=pretrained_cfg_overlay,
cache_dir=cache_dir,
**kwargs,
)
if checkpoint_path:
load_checkpoint(model, checkpoint_path)
return model
| pytorch-image-models/timm/models/_factory.py/0 | {
"file_path": "pytorch-image-models/timm/models/_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 2169
} |
""" Bring-Your-Own-Blocks Network
A flexible network w/ dataclass based config for stacking those NN blocks.
This model is currently used to implement the following networks:
GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).
Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0
RepVGG - repvgg_*
Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT
MobileOne - mobileone_*
Paper: `MobileOne: An Improved One millisecond Mobile Backbone` - https://arxiv.org/abs/2206.04040
Code and weights: https://github.com/apple/ml-mobileone, licensed MIT
In all cases the models have been modified to fit within the design of ByobNet. I've remapped
the original weights and verified accuracies.
For GPU Efficient nets, I used the original names for the blocks since they were for the most part
the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some
changes introduced in RegNet were also present in the stem and bottleneck blocks for this model.
A significant number of different network archs can be implemented here, including variants of the
above nets that include attention.
Hacked together by / copyright Ross Wightman, 2021.
"""
import math
from dataclasses import dataclass, field, replace
from functools import partial
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import (
ClassifierHead, NormMlpClassifierHead, ConvNormAct, BatchNormAct2d, EvoNorm2dS0a,
AttentionPool2d, RotAttentionPool2d, DropPath, AvgPool2dSame,
create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple,
)
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int # block depth (number of block repeats in stage)
c: int # number of output channels for each block in stage
s: int = 2 # stride of stage (first block)
gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1
br: float = 1. # bottleneck-ratio of blocks in stage
# NOTE: these config items override the model cfgs that are applied to all blocks by default
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: Union[int, List[int], Tuple[int, ...]] = 32
width_factor: float = 1.0
num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation
# layer config
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
aa_layer: str = ''
# Head config
head_hidden_size: Optional[int] = None # feat dim of MLP head or AttentionPool output
head_type: str = 'classifier'
# Block config
# NOTE: these config items will be overridden by the block cfg (per-block) if they are set there
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def _mobileone_bcfg(d=(2, 8, 10, 1), wf=(1., 1., 1., 1.), se_blocks=(), num_conv_branches=1):
c = (64, 128, 256, 512)
prev_c = min(64, c[0] * wf[0])
se_blocks = se_blocks or (0,) * len(d)
bcfg = []
for d, c, w, se in zip(d, c, wf, se_blocks):
scfg = []
for i in range(d):
out_c = c * w
bk = dict(num_conv_branches=num_conv_branches)
ak = {}
if i >= d - se:
ak['attn_layer'] = 'se'
scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] # depthwise block
scfg += [ByoBlockCfg(
type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] # pointwise block
prev_c = out_c
bcfg += [scfg]
return bcfg
def interleave_blocks(
types: Tuple[str, str], d,
every: Union[int, List[int]] = 1,
first: bool = False,
**kwargs,
) -> Tuple[ByoBlockCfg]:
""" interleave 2 block types in stack
"""
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvNormAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: int = 1,
apply_act: bool = False,
layers: LayerFn = None,
):
""" AvgPool Downsampling as in 'D' ResNet variants."""
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_shortcut(
downsample_type: str,
in_chs: int,
out_chs: int,
stride: int,
dilation: Tuple[int, int],
layers: LayerFn,
**kwargs,
):
assert downsample_type in ('avg', 'conv1x1', '')
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
if not downsample_type:
return None # no shortcut
elif downsample_type == 'avg':
return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs)
else:
return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs)
else:
return nn.Identity() # identity shortcut
class BasicBlock(nn.Module):
""" ResNet Basic Block - kxk + kxk
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
group_size: Optional[int] = None,
bottle_ratio: float = 1.0,
downsample: str = 'avg',
attn_last: bool = True,
linear_out: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size,
dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False,
)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class BottleneckBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.,
group_size: Optional[int] = None,
downsample: str = 'avg',
attn_last: bool = False,
linear_out: bool = False,
extra_conv: bool = False,
bottle_in: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block,
)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class DarkBlock(nn.Module):
""" DarkNet-like (1x1 + 3x3 w/ stride) block
The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.
This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet
uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).
If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)
for more optimal compute.
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
downsample: str = 'avg',
attn_last: bool = True,
linear_out: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False,
)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class EdgeBlock(nn.Module):
""" EdgeResidual-like (3x3 + 1x1) block
A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.
Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is
intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.
FIXME is there a more common 3x3 + 1x1 conv block to name this after?
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
downsample: str = 'avg',
attn_last: bool = False,
linear_out: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block,
)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv2_1x1.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class RepVggBlock(nn.Module):
""" RepVGG Block.
Adapted from impl at https://github.com/DingXiaoH/RepVGG
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
downsample: str = '',
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
inference_mode: bool = False
):
super(RepVggBlock, self).__init__()
self.groups = groups = num_groups(group_size, in_chs)
layers = layers or LayerFn()
if inference_mode:
self.reparam_conv = nn.Conv2d(
in_channels=in_chs,
out_channels=out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=True,
)
else:
self.reparam_conv = None
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False,
)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
# NOTE this init overrides that base model init with specific changes for the block type
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.reparam_conv is not None:
return self.act(self.attn(self.reparam_conv(x)))
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x) # not in the paper / official impl, experimental
x += identity
x = self.attn(x) # no attn in the paper / official impl, experimental
return self.act(x)
def reparameterize(self):
""" Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = nn.Conv2d(
in_channels=self.conv_kxk.conv.in_channels,
out_channels=self.conv_kxk.conv.out_channels,
kernel_size=self.conv_kxk.conv.kernel_size,
stride=self.conv_kxk.conv.stride,
padding=self.conv_kxk.conv.padding,
dilation=self.conv_kxk.conv.dilation,
groups=self.conv_kxk.conv.groups,
bias=True,
)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__('conv_kxk')
self.__delattr__('conv_1x1')
self.__delattr__('identity')
self.__delattr__('drop_path')
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
"""
# get weights and bias of scale branch
kernel_1x1 = 0
bias_1x1 = 0
if self.conv_1x1 is not None:
kernel_1x1, bias_1x1 = self._fuse_bn_tensor(self.conv_1x1)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.conv_kxk.conv.kernel_size[0] // 2
kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv, bias_conv = self._fuse_bn_tensor(self.conv_kxk)
kernel_final = kernel_conv + kernel_1x1 + kernel_identity
bias_final = bias_conv + bias_1x1 + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to fuse batchnorm layer with preceding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, 'id_tensor'):
in_chs = self.conv_kxk.conv.in_channels
input_dim = in_chs // self.groups
kernel_size = self.conv_kxk.conv.kernel_size
kernel_value = torch.zeros_like(self.conv_kxk.conv.weight)
for i in range(in_chs):
kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class MobileOneBlock(nn.Module):
""" MobileOne building block.
This block has a multi-branched architecture at train-time
and plain-CNN style architecture at inference time
For more details, please refer to our paper:
`An Improved One millisecond Mobile Backbone` -
https://arxiv.org/pdf/2206.04040.pdf
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0, # unused
group_size: Optional[int] = None,
downsample: str = '', # unused
inference_mode: bool = False,
num_conv_branches: int = 1,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
) -> None:
""" Construct a MobileOneBlock module.
"""
super(MobileOneBlock, self).__init__()
self.num_conv_branches = num_conv_branches
self.groups = groups = num_groups(group_size, in_chs)
layers = layers or LayerFn()
if inference_mode:
self.reparam_conv = nn.Conv2d(
in_channels=in_chs,
out_channels=out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=True)
else:
self.reparam_conv = None
# Re-parameterizable skip connection
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
# Re-parameterizable conv branches
convs = []
for _ in range(self.num_conv_branches):
convs.append(layers.conv_norm_act(
in_chs, out_chs, kernel_size=kernel_size,
stride=stride, groups=groups, apply_act=False))
self.conv_kxk = nn.ModuleList(convs)
# Re-parameterizable scale branch
self.conv_scale = None
if kernel_size > 1:
self.conv_scale = layers.conv_norm_act(
in_chs, out_chs, kernel_size=1,
stride=stride, groups=groups, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.act = layers.act(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
""" Apply forward pass. """
# Inference mode forward pass.
if self.reparam_conv is not None:
return self.act(self.attn(self.reparam_conv(x)))
# Multi-branched train-time forward pass.
# Skip branch output
identity_out = 0
if self.identity is not None:
identity_out = self.identity(x)
# Scale branch output
scale_out = 0
if self.conv_scale is not None:
scale_out = self.conv_scale(x)
# Other branches
out = scale_out
for ck in self.conv_kxk:
out += ck(x)
out = self.drop_path(out)
out += identity_out
return self.act(self.attn(out))
def reparameterize(self):
""" Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = nn.Conv2d(
in_channels=self.conv_kxk[0].conv.in_channels,
out_channels=self.conv_kxk[0].conv.out_channels,
kernel_size=self.conv_kxk[0].conv.kernel_size,
stride=self.conv_kxk[0].conv.stride,
padding=self.conv_kxk[0].conv.padding,
dilation=self.conv_kxk[0].conv.dilation,
groups=self.conv_kxk[0].conv.groups,
bias=True)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__('conv_kxk')
self.__delattr__('conv_scale')
self.__delattr__('identity')
self.__delattr__('drop_path')
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
"""
# get weights and bias of scale branch
kernel_scale = 0
bias_scale = 0
if self.conv_scale is not None:
kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.conv_kxk[0].conv.kernel_size[0] // 2
kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv = 0
bias_conv = 0
for ix in range(self.num_conv_branches):
_kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix])
kernel_conv += _kernel
bias_conv += _bias
kernel_final = kernel_conv + kernel_scale + kernel_identity
bias_final = bias_conv + bias_scale + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to fuse batchnorm layer with preceding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, 'id_tensor'):
in_chs = self.conv_kxk[0].conv.in_channels
input_dim = in_chs // self.groups
kernel_size = self.conv_kxk[0].conv.kernel_size
kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight)
for i in range(in_chs):
kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class SelfAttnBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.,
group_size: Optional[int] = None,
downsample: str = 'avg',
extra_conv: bool = False,
linear_out: bool = False,
bottle_in: bool = False,
post_attn_na: bool = True,
feat_size: Optional[Tuple[int, int]] = None,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block,
)
stride = 1 # striding done via conv if enabled
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
# FIXME need to dilate self attn to have dilated network support, moop moop
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
one=MobileOneBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(
self,
in_chs: int,
out_chs: Union[int, List[int], Tuple[int, ...]],
kernel_size: int = 3,
stride: int = 4,
pool: str = 'maxpool',
num_rep: int = 3,
num_act: Optional[int] = None,
chs_decay: float = 0.5,
layers: LayerFn = None,
):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = [] # track intermediate features
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
# set last conv in stack to be strided if stride == 4 and no pooling layer
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
# if num_act < num_rep, first convs in stack won't have bn + act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
last_feat_idx = -1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
last_feat_idx = i - 1
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool:
pool = pool.lower()
assert pool in ('max', 'maxpool', 'avg', 'avgpool', 'max2', 'avg2')
last_feat_idx = i
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0))
if pool == 'max2':
self.add_module('pool', nn.MaxPool2d(2))
elif pool == 'avg2':
self.add_module('pool', nn.AvgPool2d(2))
elif 'max' in pool:
self.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
elif 'avg' in pool:
self.add_module('pool', nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False))
curr_stride *= 2
prev_feat = 'pool'
self.last_feat_idx = last_feat_idx if last_feat_idx >= 0 else None
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0))
assert curr_stride == stride
def forward_intermediates(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
intermediate: Optional[torch.Tensor] = None
for i, m in enumerate(self):
x = m(x)
if self.last_feat_idx is not None and i == self.last_feat_idx:
intermediate = x
return x, intermediate
def create_byob_stem(
in_chs: int,
out_chs: int,
stem_type: str = '',
pool_type: str = '',
feat_prefix: str = 'stem',
layers: LayerFn = None,
):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif 'one' in stem_type:
stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
if isinstance(out_chs, (tuple, list)):
stem = Stem(in_chs, out_chs, 3, pool=pool_type, layers=layers)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix, stage=0)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
""" Override model level attn/self-attn/block kwargs w/ block level
NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs
for the block if set to anything that isn't None.
i.e. an empty block_kwargs dict will remove kwargs set at model level for that block
"""
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
# override attn layer / args with block local config
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
# override attn layer config
if attn_set and not block_cfg.attn_layer:
# empty string for attn_layer type will disable attn for this block
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
# override self-attn layer / args with block local cfg
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
# override attn layer config
if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''
# empty string for self_attn_layer type will disable attn for this block
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
# add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg,
drop_path_rate: float,
output_stride: int,
stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs,
):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict( # Blocks used in this model must accept these arguments
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
# add feat_size arg for blocks that support/need it
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}', stage=stage_idx + 1)
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info, feat_size
def get_layer_fns(cfg: ByoModelCfg, allow_aa: bool = True):
act = get_act_layer(cfg.act_layer)
norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act)
if cfg.aa_layer and allow_aa:
conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act, aa_layer=cfg.aa_layer)
else:
conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
""" 'Bring-your-own-blocks' Net
A flexible network backbone that allows building model stem + blocks via
dataclass cfg definition w/ factory functions for module instantiation.
Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).
"""
def __init__(
self,
cfg: ByoModelCfg,
num_classes: int = 1000,
in_chans: int = 3,
global_pool: Optional[str] = None,
output_stride: int = 32,
img_size: Optional[Union[int, Tuple[int, int]]] = None,
drop_rate: float = 0.,
drop_path_rate: float =0.,
zero_init_last: bool = True,
**kwargs,
):
"""
Args:
cfg: Model architecture configuration.
num_classes: Number of classifier classes.
in_chans: Number of input channels.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
img_size: Image size for fixed image size models (i.e. self-attn).
drop_rate: Classifier dropout rate.
drop_path_rate: Stochastic depth drop-path rate.
zero_init_last: Zero-init last weight of residual path.
**kwargs: Extra kwargs overlayed onto cfg.
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg
stem_layers = get_layer_fns(cfg, allow_aa=False) # keep aa off for stem-layers
stage_layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
if isinstance(cfg.stem_chs, (list, tuple)):
stem_chs = [int(round(c * cfg.width_factor)) for c in cfg.stem_chs]
else:
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(
in_chs=in_chans,
out_chs=stem_chs,
stem_type=cfg.stem_type,
pool_type=cfg.stem_pool,
layers=stem_layers,
)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat, feat_size = create_byob_stages(
cfg,
drop_path_rate,
output_stride,
stem_feat[-1],
layers=stage_layers,
feat_size=feat_size,
)
self.feature_info.extend(stage_feat[:-1])
reduction = stage_feat[-1]['reduction']
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = stage_layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=reduction, module='final_conv', stage=len(self.stages))]
self.stage_ends = [f['stage'] for f in self.feature_info]
self.head_hidden_size = self.num_features
assert cfg.head_type in ('', 'classifier', 'mlp', 'attn_abs', 'attn_rot')
if cfg.head_type == 'mlp':
if global_pool is None:
global_pool = 'avg'
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=cfg.head_hidden_size,
pool_type=global_pool,
norm_layer=cfg.norm_layer,
act_layer=cfg.act_layer,
drop_rate=self.drop_rate,
)
self.head_hidden_size = self.head.hidden_size
elif cfg.head_type == 'attn_abs':
if global_pool is None:
global_pool = 'token'
assert global_pool in ('', 'token')
self.head = AttentionPool2d(
self.num_features,
embed_dim=cfg.head_hidden_size,
out_features=num_classes,
feat_size=feat_size,
pool_type=global_pool,
drop_rate=self.drop_rate,
qkv_separate=True,
)
self.head_hidden_size = self.head.embed_dim
elif cfg.head_type =='attn_rot':
if global_pool is None:
global_pool = 'token'
assert global_pool in ('', 'token')
self.head = RotAttentionPool2d(
self.num_features,
embed_dim=cfg.head_hidden_size,
out_features=num_classes,
ref_feat_size=feat_size,
pool_type=global_pool,
drop_rate=self.drop_rate,
qkv_separate=True,
)
self.head_hidden_size = self.head.embed_dim
else:
if global_pool is None:
global_pool = 'avg'
assert cfg.head_hidden_size is None
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
self.global_pool = global_pool
# init weights
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None),
(r'^final_conv', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
exclude_final_conv: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
exclude_final_conv: Exclude final_conv from last intermediate
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# forward pass
feat_idx = 0 # stem is index 0
if hasattr(self.stem, 'forward_intermediates'):
# returns last intermediate features in stem (before final stride in stride > 2 stems)
x, x_inter = self.stem.forward_intermediates(x)
else:
x, x_inter = self.stem(x), None
if feat_idx in take_indices:
intermediates.append(x if x_inter is None else x_inter)
last_idx = self.stage_ends[-1]
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index]
for stage in stages:
feat_idx += 1
x = stage(x)
if not exclude_final_conv and feat_idx == last_idx:
# default feature_info for this model uses final_conv as the last feature output (if present)
x = self.final_conv(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if exclude_final_conv and feat_idx == last_idx:
x = self.final_conv(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0
if max_index < self.stage_ends[-1]:
self.final_conv = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.final_conv(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a0=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)),
stem_type='rep',
stem_chs=48,
),
repvgg_a1=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_d2se=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1),
),
# 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks
# DW convs in last block, 2048 pre-FC, silu act
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
# 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks
# DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
# A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,
# and a tiered stem w/ maxpool
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
# ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
# ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
# A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat
# and a tiered stem w/ no maxpool
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
# experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW
regnetz_b16=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_c16=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_d32=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4),
ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4),
ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4),
ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
downsample='',
num_features=1792,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_d8=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4),
ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
downsample='',
num_features=1792,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_e8=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4),
ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
downsample='',
num_features=2048,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
# experimental EvoNorm configs
regnetz_b16_evos=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
norm_layer=partial(EvoNorm2dS0a, group_size=16),
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_c16_evos=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
norm_layer=partial(EvoNorm2dS0a, group_size=16),
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_d8_evos=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4),
ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4),
),
stem_chs=64,
stem_type='deep',
stem_pool='',
downsample='',
num_features=1792,
act_layer='silu',
norm_layer=partial(EvoNorm2dS0a, group_size=16),
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
mobileone_s0=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.), num_conv_branches=4),
stem_type='one',
stem_chs=48,
),
mobileone_s1=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)),
stem_type='one',
stem_chs=64,
),
mobileone_s2=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)),
stem_type='one',
stem_chs=64,
),
mobileone_s3=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)),
stem_type='one',
stem_chs=64,
),
mobileone_s4=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)),
stem_type='one',
stem_chs=64,
),
resnet50_clip=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=(32, 32, 64),
stem_type='',
stem_pool='avg2',
downsample='avg',
aa_layer='avg',
head_type='attn_abs',
),
resnet101_clip=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=23, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=(32, 32, 64),
stem_type='',
stem_pool='avg2',
downsample='avg',
aa_layer='avg',
head_type='attn_abs',
),
resnet50x4_clip=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=4, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=10, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=2048, s=2, br=0.25),
),
width_factor=1.25,
stem_chs=(32, 32, 64),
stem_type='',
stem_pool='avg2',
downsample='avg',
aa_layer='avg',
head_type='attn_abs',
),
resnet50x16_clip=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=6, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=8, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=18, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=8, c=2048, s=2, br=0.25),
),
width_factor=1.5,
stem_chs=(32, 32, 64),
stem_type='',
stem_pool='avg2',
downsample='avg',
aa_layer='avg',
head_type='attn_abs',
),
resnet50x64_clip=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=15, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=36, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=10, c=2048, s=2, br=0.25),
),
width_factor=2.0,
stem_chs=(32, 32, 64),
stem_type='',
stem_pool='avg2',
downsample='avg',
aa_layer='avg',
head_type='attn_abs',
),
resnet50_mlp=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=(32, 32, 64),
stem_type='',
stem_pool='avg2',
downsample='avg',
aa_layer='avg',
head_hidden_size=1024,
head_type='mlp',
),
test_byobnet=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=32, s=2, gs=0, br=0.5),
ByoBlockCfg(type='dark', d=1, c=64, s=2, gs=0, br=0.5),
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=1, c=256, s=2, gs=64, br=0.25),
),
stem_chs=24,
downsample='avg',
stem_pool='',
act_layer='relu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
),
)
for k in ('resnet50_clip', 'resnet101_clip', 'resnet50x4_clip', 'resnet50x16_clip', 'resnet50x64_clip'):
model_cfgs[k + '_gap'] = replace(model_cfgs[k], head_type='classifier')
def _convert_openai_clip(
state_dict: Dict[str, torch.Tensor],
model: ByobNet,
prefix: str = 'visual.',
) -> Dict[str, torch.Tensor]:
model_has_attn_pool = isinstance(model.head, (RotAttentionPool2d, AttentionPool2d))
import re
def _stage_sub(m):
stage_idx = int(m.group(1)) - 1
layer_idx, layer_type, layer_id = int(m.group(2)), m.group(3), int(m.group(4))
prefix_str = f'stages.{stage_idx}.{layer_idx}.'
id_map = {1: 'conv1_1x1.', 2: 'conv2_kxk.', 3: 'conv3_1x1.'}
suffix_str = id_map[layer_id] + layer_type
return prefix_str + suffix_str
def _down_sub(m):
stage_idx = int(m.group(1)) - 1
layer_idx, layer_id = int(m.group(2)), int(m.group(3))
return f'stages.{stage_idx}.{layer_idx}.shortcut.' + ('conv.conv' if layer_id == 0 else 'conv.bn')
out_dict = {}
for k, v in state_dict.items():
if not k.startswith(prefix):
continue
k = re.sub(rf'{prefix}conv([0-9])', r'stem.conv\1.conv', k)
k = re.sub(rf'{prefix}bn([0-9])', r'stem.conv\1.bn', k)
k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.([a-z]+)([0-9])', _stage_sub, k)
k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.downsample\.([0-9])', _down_sub, k)
if k.startswith(f'{prefix}attnpool'):
if not model_has_attn_pool:
continue
k = k.replace(prefix + 'attnpool', 'head') #'attn_pool')
k = k.replace('positional_embedding', 'pos_embed')
k = k.replace('q_proj', 'q')
k = k.replace('k_proj', 'k')
k = k.replace('v_proj', 'v')
k = k.replace('c_proj', 'proj')
out_dict[k] = v
return out_dict
def checkpoint_filter_fn(
state_dict: Dict[str, torch.Tensor],
model: ByobNet
):
if 'visual.conv1.weight' in state_dict:
state_dict = _convert_openai_clip(state_dict, model)
return state_dict
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
def _cfgr(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# GPU-Efficient (ResNet) weights
'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'),
'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'),
'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)),
# RepVGG weights
'repvgg_a0.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_a1.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_a2.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b0.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b1.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b1g4.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b2.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b2g4.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b3.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b3g4.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_d2se.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0,
),
# experimental ResNet configs
'resnet51q.ra2_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnet61q.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
# ResNeXt-26 models with different attention in Bottleneck blocks
'resnext26ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'seresnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_resnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'bat_resnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
min_input_size=(3, 256, 256)),
# ResNet-32 / 33 models with different attention in Bottleneck blocks
'resnet32ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'seresnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_resnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnet50t.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnext50ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
# custom `timm` specific RegNetZ inspired models w/ different sizing from paper
'regnetz_b16.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth',
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'regnetz_c16.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth',
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'regnetz_d32.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)),
'regnetz_d8.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'regnetz_e8.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'regnetz_b16_evos.untrained': _cfgr(
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)),
'regnetz_c16_evos.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth',
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
crop_pct=0.95, test_input_size=(3, 320, 320)),
'regnetz_d8_evos.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'mobileone_s0.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.875,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s1.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s2.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s3.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s4.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
# original attention pool head variants
'resnet50_clip.openai': _cfgr(
hf_hub_id='timm/',
num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7),
classifier='head.proj',
),
'resnet101_clip.openai': _cfgr(
hf_hub_id='timm/',
num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7),
classifier='head.proj',
),
'resnet50x4_clip.openai': _cfgr(
hf_hub_id='timm/',
num_classes=640, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 288, 288), pool_size=(9, 9),
classifier='head.proj',
),
'resnet50x16_clip.openai': _cfgr(
hf_hub_id='timm/',
num_classes=768, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 384, 384), pool_size=(12, 12),
classifier='head.proj',
),
'resnet50x64_clip.openai': _cfgr(
hf_hub_id='timm/',
num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 448, 448), pool_size=(14, 14),
classifier='head.proj',
),
'resnet50_clip.cc12m': _cfgr(
hf_hub_id='timm/',
num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7),
classifier='head.proj',
),
'resnet50_clip.yfcc15m': _cfgr(
hf_hub_id='timm/',
num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7),
classifier='head.proj',
),
'resnet101_clip.yfcc15m': _cfgr(
hf_hub_id='timm/',
num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7),
classifier='head.proj',
),
# avg-pool w/ optional standard classifier head variants
'resnet50_clip_gap.openai': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 224, 224), pool_size=(7, 7),
),
'resnet101_clip_gap.openai': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 224, 224), pool_size=(7, 7),
),
'resnet50x4_clip_gap.openai': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 288, 288), pool_size=(9, 9),
),
'resnet50x16_clip_gap.openai': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12),
),
'resnet50x64_clip_gap.openai': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 448, 448), pool_size=(14, 14),
),
'resnet50_clip_gap.cc12m': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 224, 224), pool_size=(7, 7),
),
'resnet50_clip_gap.yfcc15m': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 224, 224), pool_size=(7, 7),
),
'resnet101_clip_gap.yfcc15m': _cfgr(
hf_hub_id='timm/',
num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 224, 224), pool_size=(7, 7),
),
'resnet50_mlp.untrained': _cfgr(
input_size=(3, 256, 256), pool_size=(8, 8),
),
'test_byobnet.r160_in1k': _cfgr(
hf_hub_id='timm/',
first_conv='stem.conv',
input_size=(3, 160, 160), crop_pct=0.95, pool_size=(5, 5),
),
})
@register_model
def gernet_l(pretrained=False, **kwargs) -> ByobNet:
""" GEResNet-Large (GENet-Large from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs) -> ByobNet:
""" GEResNet-Medium (GENet-Normal from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs) -> ByobNet:
""" EResNet-Small (GENet-Small from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a0(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-A0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a1(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-A1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-A2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B1g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B2g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B3
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B3g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-D2se
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
@register_model
def regnetz_b16(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs)
@register_model
def regnetz_c16(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs)
@register_model
def regnetz_d32(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs)
@register_model
def regnetz_d8(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs)
@register_model
def regnetz_e8(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs)
@register_model
def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs)
@register_model
def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs)
@register_model
def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s0(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s1(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s2(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s3(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s4(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs)
@register_model
def resnet50_clip(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50 CLIP image tower
"""
return _create_byobnet('resnet50_clip', pretrained=pretrained, **kwargs)
@register_model
def resnet101_clip(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-101 CLIP image tower
"""
return _create_byobnet('resnet101_clip', pretrained=pretrained, **kwargs)
@register_model
def resnet50x4_clip(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50x4 CLIP image tower
"""
return _create_byobnet('resnet50x4_clip', pretrained=pretrained, **kwargs)
@register_model
def resnet50x16_clip(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50x16 CLIP image tower
"""
return _create_byobnet('resnet50x16_clip', pretrained=pretrained, **kwargs)
@register_model
def resnet50x64_clip(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50x64 CLIP image tower
"""
return _create_byobnet('resnet50x64_clip', pretrained=pretrained, **kwargs)
@register_model
def resnet50_clip_gap(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50 CLIP image tower w/ avg pool (no attention pool)
"""
return _create_byobnet('resnet50_clip_gap', pretrained=pretrained, **kwargs)
@register_model
def resnet101_clip_gap(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-101 CLIP image tower w/ avg pool (no attention pool)
"""
return _create_byobnet('resnet101_clip_gap', pretrained=pretrained, **kwargs)
@register_model
def resnet50x4_clip_gap(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50x4 CLIP image tower w/ avg pool (no attention pool)
"""
return _create_byobnet('resnet50x4_clip_gap', pretrained=pretrained, **kwargs)
@register_model
def resnet50x16_clip_gap(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50x16 CLIP image tower w/ avg pool (no attention pool)
"""
return _create_byobnet('resnet50x16_clip_gap', pretrained=pretrained, **kwargs)
@register_model
def resnet50x64_clip_gap(pretrained=False, **kwargs) -> ByobNet:
""" OpenAI Modified ResNet-50x64 CLIP image tower w/ avg pool (no attention pool)
"""
return _create_byobnet('resnet50x64_clip_gap', pretrained=pretrained, **kwargs)
@register_model
def resnet50_mlp(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet50_mlp', pretrained=pretrained, **kwargs)
@register_model
def test_byobnet(pretrained=False, **kwargs) -> ByobNet:
""" Minimal test ResNet (BYOB based) model.
"""
return _create_byobnet('test_byobnet', pretrained=pretrained, **kwargs)
| pytorch-image-models/timm/models/byobnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/byobnet.py",
"repo_id": "pytorch-image-models",
"token_count": 52981
} |
""" The EfficientNet Family in PyTorch
An implementation of EfficienNet that covers variety of related models with efficient architectures:
* EfficientNet-V2
- `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports)
- EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946
- CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971
- Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665
- Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252
* MixNet (Small, Medium, and Large)
- MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595
* MNasNet B1, A1 (SE), Small
- MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626
* FBNet-C
- FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443
* Single-Path NAS Pixel1
- Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877
* TinyNet
- Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819
- Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch
* And likely more...
The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available
by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing
the models and weights open source!
Hacked together by / Copyright 2019, Ross Wightman
"""
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, LayerType, \
GroupNormAct, LayerNormAct2d, EvoNorm2dS0
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \
round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from ._features import FeatureInfo, FeatureHooks, feature_take_indices
from ._manipulate import checkpoint_seq, checkpoint
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['EfficientNet', 'EfficientNetFeatures']
class EfficientNet(nn.Module):
""" EfficientNet
A flexible and performant PyTorch implementation of efficient network architectures, including:
* EfficientNet-V2 Small, Medium, Large, XL & B0-B3
* EfficientNet B0-B8, L2
* EfficientNet-EdgeTPU
* EfficientNet-CondConv
* MixNet S, M, L, XL
* MnasNet A1, B1, and small
* MobileNet-V2
* FBNet C
* Single-Path NAS Pixel1
* TinyNet
"""
def __init__(
self,
block_args: BlockArgs,
num_classes: int = 1000,
num_features: int = 1280,
in_chans: int = 3,
stem_size: int = 32,
stem_kernel_size: int = 3,
fix_stem: bool = False,
output_stride: int = 32,
pad_type: str = '',
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
global_pool: str = 'avg'
):
super(EfficientNet, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
self.stage_ends = [f['stage'] for f in self.feature_info]
head_chs = builder.in_chs
# Head + Pooling
if num_features > 0:
self.conv_head = create_conv2d(head_chs, num_features, 1, padding=pad_type)
self.bn2 = norm_act_layer(num_features, inplace=True)
self.num_features = self.head_hidden_size = num_features
else:
self.conv_head = nn.Identity()
self.bn2 = nn.Identity()
self.num_features = self.head_hidden_size = head_chs
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.conv_head, self.bn2, self.global_pool])
layers.extend([nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^conv_stem|bn1',
blocks=[
(r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
(r'conv_head|bn2', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
extra_blocks: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# forward pass
feat_idx = 0 # stem is index 0
x = self.conv_stem(x)
x = self.bn1(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index]
for blk in blocks:
feat_idx += 1
x = blk(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == self.stage_ends[-1]:
x = self.conv_head(x)
x = self.bn2(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
extra_blocks: bool = False,
):
""" Prune layers not required for specified intermediates.
"""
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0
if prune_norm or max_index < len(self.blocks):
self.conv_head = nn.Identity()
self.bn2 = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
x = self.conv_head(x)
x = self.bn2(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.classifier(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
class EfficientNetFeatures(nn.Module):
""" EfficientNet Feature Extractor
A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation
and object detection models.
"""
def __init__(
self,
block_args: BlockArgs,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
feature_location: str = 'bottleneck',
in_chans: int = 3,
stem_size: int = 32,
stem_kernel_size: int = 3,
fix_stem: bool = False,
output_stride: int = 32,
pad_type: str = '',
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
):
super(EfficientNetFeatures, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_effnet(variant, pretrained=False, **kwargs):
features_mode = ''
model_cls = EfficientNet
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs or 'feature_cls' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool')
model_cls = EfficientNetFeatures
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg)
return model
def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a mnasnet-a1 model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
Paper: https://arxiv.org/pdf/1807.11626.pdf.
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_noskip'],
# stage 1, 112x112 in
['ir_r2_k3_s2_e6_c24'],
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25'],
# stage 3, 28x28 in
['ir_r4_k3_s2_e6_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'],
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e6_c320'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a mnasnet-b1 model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
Paper: https://arxiv.org/pdf/1807.11626.pdf.
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_c16_noskip'],
# stage 1, 112x112 in
['ir_r3_k3_s2_e3_c24'],
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40'],
# stage 3, 28x28 in
['ir_r3_k5_s2_e6_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c96'],
# stage 5, 14x14in
['ir_r4_k5_s2_e6_c192'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e6_c320_noskip']
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a mnasnet-b1 model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
Paper: https://arxiv.org/pdf/1807.11626.pdf.
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
['ds_r1_k3_s1_c8'],
['ir_r1_k3_s2_e3_c16'],
['ir_r2_k3_s2_e6_c16'],
['ir_r4_k5_s2_e6_c32_se0.25'],
['ir_r3_k3_s1_e6_c32_se0.25'],
['ir_r3_k5_s2_e6_c88_se0.25'],
['ir_r1_k3_s1_e6_c144']
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=8,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v1(
variant, channel_multiplier=1.0, depth_multiplier=1.0,
group_size=None, fix_stem_head=False, head_conv=False, pretrained=False, **kwargs
):
"""
Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
Paper: https://arxiv.org/abs/1801.04381
"""
arch_def = [
['dsa_r1_k3_s1_c64'],
['dsa_r2_k3_s2_c128'],
['dsa_r2_k3_s2_c256'],
['dsa_r6_k3_s2_c512'],
['dsa_r2_k3_s2_c1024'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
head_features = (1024 if fix_stem_head else max(1024, round_chs_fn(1024))) if head_conv else 0
model_kwargs = dict(
block_args=decode_arch_def(
arch_def,
depth_multiplier=depth_multiplier,
fix_first_last=fix_stem_head,
group_size=group_size,
),
num_features=head_features,
stem_size=32,
fix_stem=fix_stem_head,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'relu6'),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v2(
variant, channel_multiplier=1.0, depth_multiplier=1.0,
group_size=None, fix_stem_head=False, pretrained=False, **kwargs
):
""" Generate MobileNet-V2 network
Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
Paper: https://arxiv.org/abs/1801.04381
"""
arch_def = [
['ds_r1_k3_s1_c16'],
['ir_r2_k3_s2_e6_c24'],
['ir_r3_k3_s2_e6_c32'],
['ir_r4_k3_s2_e6_c64'],
['ir_r3_k3_s1_e6_c96'],
['ir_r3_k3_s2_e6_c160'],
['ir_r1_k3_s1_e6_c320'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(
arch_def,
depth_multiplier=depth_multiplier,
fix_first_last=fix_stem_head,
group_size=group_size,
),
num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)),
stem_size=32,
fix_stem=fix_stem_head,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'relu6'),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
""" FBNet-C
Paper: https://arxiv.org/abs/1812.03443
Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py
NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper,
it was used to confirm some building block details
"""
arch_def = [
['ir_r1_k3_s1_e1_c16'],
['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'],
['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'],
['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'],
['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'],
['ir_r4_k5_s2_e6_c184'],
['ir_r1_k3_s1_e6_c352'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=16,
num_features=1984, # paper suggests this, but is not 100% clear
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates the Single-Path NAS model from search targeted for Pixel1 phone.
Paper: https://arxiv.org/abs/1904.02877
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_c16_noskip'],
# stage 1, 112x112 in
['ir_r3_k3_s2_e3_c24'],
# stage 2, 56x56 in
['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'],
# stage 3, 28x28 in
['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'],
# stage 4, 14x14in
['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'],
# stage 5, 14x14in
['ir_r4_k5_s2_e6_c192'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e6_c320_noskip']
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet(
variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8,
group_size=None, pretrained=False, **kwargs
):
"""Creates an EfficientNet model.
Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'],
['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'],
['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'],
['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
act_layer=resolve_act_layer(kwargs, 'swish'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_edge(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs
):
""" Creates an EfficientNet-EdgeTPU model
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu
"""
arch_def = [
# NOTE `fc` is present to override a mismatch between stem channels and in chs not
# present in other models
['er_r1_k3_s1_e4_c24_fc24_noskip'],
['er_r2_k3_s2_e8_c32'],
['er_r4_k3_s2_e8_c48'],
['ir_r5_k5_s2_e8_c96'],
['ir_r4_k5_s1_e8_c144'],
['ir_r2_k5_s2_e8_c192'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'relu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_condconv(
variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs
):
"""Creates an EfficientNet-CondConv model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'],
['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'],
['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25_cc4'],
['ir_r4_k5_s2_e6_c192_se0.25_cc4'],
['ir_r1_k3_s1_e6_c320_se0.25_cc4'],
]
# NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and
# the expert_multiplier increases that on a per-model basis as with depth/channel multipliers
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'swish'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
"""Creates an EfficientNet-Lite model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-lite0': (1.0, 1.0, 224, 0.2),
'efficientnet-lite1': (1.0, 1.1, 240, 0.2),
'efficientnet-lite2': (1.1, 1.2, 260, 0.3),
'efficientnet-lite3': (1.2, 1.4, 280, 0.3),
'efficientnet-lite4': (1.4, 1.8, 300, 0.3),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
arch_def = [
['ds_r1_k3_s1_e1_c16'],
['ir_r2_k3_s2_e6_c24'],
['ir_r2_k5_s2_e6_c40'],
['ir_r3_k3_s2_e6_c80'],
['ir_r3_k5_s1_e6_c112'],
['ir_r4_k5_s2_e6_c192'],
['ir_r1_k3_s1_e6_c320'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True),
num_features=1280,
stem_size=32,
fix_stem=True,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
act_layer=resolve_act_layer(kwargs, 'relu6'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_base(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs
):
""" Creates an EfficientNet-V2 base model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r1_k3_s1_e1_c16_skip'],
['er_r2_k3_s2_e4_c32'],
['er_r2_k3_s2_e4_c48'],
['ir_r3_k3_s2_e4_c96_se0.25'],
['ir_r5_k3_s1_e6_c112_se0.25'],
['ir_r8_k3_s2_e6_c192_se0.25'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_s(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs
):
""" Creates an EfficientNet-V2 Small model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model,
before ref the impl was released.
"""
arch_def = [
['cn_r2_k3_s1_e1_c24_skip'],
['er_r4_k3_s2_e4_c48'],
['er_r4_k3_s2_e4_c64'],
['ir_r6_k3_s2_e4_c128_se0.25'],
['ir_r9_k3_s1_e6_c160_se0.25'],
['ir_r15_k3_s2_e6_c256_se0.25'],
]
num_features = 1280
if rw:
# my original variant, based on paper figure differs from the official release
arch_def[0] = ['er_r2_k3_s1_e1_c24']
arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25']
num_features = 1792
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(num_features),
stem_size=24,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_m(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs
):
""" Creates an EfficientNet-V2 Medium model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r3_k3_s1_e1_c24_skip'],
['er_r5_k3_s2_e4_c48'],
['er_r5_k3_s2_e4_c80'],
['ir_r7_k3_s2_e4_c160_se0.25'],
['ir_r14_k3_s1_e6_c176_se0.25'],
['ir_r18_k3_s2_e6_c304_se0.25'],
['ir_r5_k3_s1_e6_c512_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=1280,
stem_size=24,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_l(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs
):
""" Creates an EfficientNet-V2 Large model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r4_k3_s1_e1_c32_skip'],
['er_r7_k3_s2_e4_c64'],
['er_r7_k3_s2_e4_c96'],
['ir_r10_k3_s2_e4_c192_se0.25'],
['ir_r19_k3_s1_e6_c224_se0.25'],
['ir_r25_k3_s2_e6_c384_se0.25'],
['ir_r7_k3_s1_e6_c640_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=1280,
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_xl(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs
):
""" Creates an EfficientNet-V2 Xtra-Large model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r4_k3_s1_e1_c32_skip'],
['er_r8_k3_s2_e4_c64'],
['er_r8_k3_s2_e4_c96'],
['ir_r16_k3_s2_e4_c192_se0.25'],
['ir_r24_k3_s1_e6_c256_se0.25'],
['ir_r32_k3_s2_e6_c512_se0.25'],
['ir_r8_k3_s1_e6_c640_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=1280,
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_x(
variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8,
group_size=None, version=1, pretrained=False, **kwargs
):
"""Creates an EfficientNet model.
Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-x-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-x-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-x-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-x-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-x-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-x-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-x-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-x-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-x-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
"""
if version == 1:
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25_d1_a0',
'r2_k3_s22_e6_i16_o24_se0.25_f1_d2_a1',
'r2_k5_s22_e6_i24_o40_se0.25_f1_a1',
'r3_k3_s22_e6_i40_o80_se0.25_a0',
'r3_k5_s11_e6_i80_o112_se0.25_a0',
'r4_k5_s22_e6_i112_o192_se0.25_a0',
'r1_k3_s11_e6_i192_o320_se0.25_a0',
]
elif version == 2:
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25_d1_a0',
'r2_k3_s22_e4_i16_o24_se0.25_f1_d2_a1',
'r2_k5_s22_e4_i24_o40_se0.25_f1_a1',
'r3_k3_s22_e4_i40_o80_se0.25_a0',
'r3_k5_s11_e6_i80_o112_se0.25_a0',
'r4_k5_s22_e6_i112_o192_se0.25_a0',
'r1_k3_s11_e6_i192_o320_se0.25_a0',
]
"""
if version == 1:
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25_d1'],
['er_r2_k3_s2_e6_c24_se0.25_nre'],
['er_r2_k5_s2_e6_c40_se0.25_nre'],
['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'],
['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
else:
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25_d1'],
['er_r2_k3_s2_e4_c24_se0.25_nre'],
['er_r2_k5_s2_e4_c40_se0.25_nre'],
['ir_r3_k3_s2_e4_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'],
['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
act_layer=resolve_act_layer(kwargs, 'silu'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a MixNet Small model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'], # relu
# stage 1, 112x112 in
['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=1536,
stem_size=16,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a MixNet Medium-Large model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c24'], # relu
# stage 1, 112x112 in
['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),
num_features=1536,
stem_size=24,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_tinynet(variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a TinyNet model.
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),
num_features=max(1280, round_channels(1280, model_width, 8, None)),
stem_size=32,
fix_stem=True,
round_chs_fn=partial(round_channels, multiplier=model_width),
act_layer=resolve_act_layer(kwargs, 'swish'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_edgetpu(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
"""
Based on definitions in: https://github.com/tensorflow/models/tree/d2427a562f401c9af118e47af2f030a0a5599f55/official/projects/edgetpu/vision
"""
if 'edgetpu_v2' in variant:
stem_size = 64
stem_kernel_size = 5
group_size = 64
num_features = 1280
act_layer = resolve_act_layer(kwargs, 'relu')
def _arch_def(chs: List[int], group_size: int):
return [
# stage 0, 112x112 in
[f'cn_r1_k1_s1_c{chs[0]}'], # NOTE with expansion==1, official impl block ends just 1x1 pwl
# stage 1, 112x112 in
[f'er_r1_k3_s2_e8_c{chs[1]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[1]}'],
# stage 2, 56x56 in
[
f'er_r1_k3_s2_e8_c{chs[2]}',
f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}',
f'er_r1_k3_s1_e4_c{chs[2]}',
f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}',
],
# stage 3, 28x28 in
[f'er_r1_k3_s2_e8_c{chs[3]}', f'ir_r3_k3_s1_e4_c{chs[3]}'],
# stage 4, 14x14in
[f'ir_r1_k3_s1_e8_c{chs[4]}', f'ir_r3_k3_s1_e4_c{chs[4]}'],
# stage 5, 14x14in
[f'ir_r1_k3_s2_e8_c{chs[5]}', f'ir_r3_k3_s1_e4_c{chs[5]}'],
# stage 6, 7x7 in
[f'ir_r1_k3_s1_e8_c{chs[6]}'],
]
if 'edgetpu_v2_xs' in variant:
stem_size = 32
stem_kernel_size = 3
channels = [16, 32, 48, 96, 144, 160, 192]
elif 'edgetpu_v2_s' in variant:
channels = [24, 48, 64, 128, 160, 192, 256]
elif 'edgetpu_v2_m' in variant:
channels = [32, 64, 80, 160, 192, 240, 320]
num_features = 1344
elif 'edgetpu_v2_l' in variant:
stem_kernel_size = 7
group_size = 128
channels = [32, 64, 96, 192, 240, 256, 384]
num_features = 1408
else:
assert False
arch_def = _arch_def(channels, group_size)
else:
# v1
stem_size = 32
stem_kernel_size = 3
num_features = 1280
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['cn_r1_k1_s1_c16'],
# stage 1, 112x112 in
['er_r1_k3_s2_e8_c32', 'er_r3_k3_s1_e4_c32'],
# stage 2, 56x56 in
['er_r1_k3_s2_e8_c48', 'er_r3_k3_s1_e4_c48'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e8_c96', 'ir_r3_k3_s1_e4_c96'],
# stage 4, 14x14in
['ir_r1_k3_s1_e8_c96_noskip', 'ir_r3_k3_s1_e4_c96'],
# stage 5, 14x14in
['ir_r1_k5_s2_e8_c160', 'ir_r3_k5_s1_e4_c160'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e8_c192'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier),
num_features=num_features,
stem_size=stem_size,
stem_kernel_size=stem_kernel_size,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_test_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
""" Minimal test EfficientNet generator.
"""
arch_def = [
['cn_r1_k3_s1_e1_c16_skip'],
['er_r1_k3_s2_e4_c24'],
['er_r1_k3_s2_e4_c32'],
['ir_r1_k3_s2_e4_c48_se0.25'],
['ir_r1_k3_s2_e4_c64_se0.25'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier),
num_features=round_chs_fn(256),
stem_size=24,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'mnasnet_050.untrained': _cfg(),
'mnasnet_075.untrained': _cfg(),
'mnasnet_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth',
hf_hub_id='timm/'),
'mnasnet_140.untrained': _cfg(),
'semnasnet_050.untrained': _cfg(),
'semnasnet_075.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth',
hf_hub_id='timm/'),
'semnasnet_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth',
hf_hub_id='timm/'),
'semnasnet_140.untrained': _cfg(),
'mnasnet_small.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth',
hf_hub_id='timm/'),
'mobilenetv1_100.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
test_input_size=(3, 256, 256), test_crop_pct=0.95,
),
'mobilenetv1_100h.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
test_input_size=(3, 256, 256), test_crop_pct=0.95,
),
'mobilenetv1_125.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0,
),
'mobilenetv2_035.untrained': _cfg(),
'mobilenetv2_050.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
'mobilenetv2_075.untrained': _cfg(),
'mobilenetv2_100.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth',
hf_hub_id='timm/'),
'mobilenetv2_110d.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth',
hf_hub_id='timm/'),
'mobilenetv2_120d.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth',
hf_hub_id='timm/'),
'mobilenetv2_140.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth',
hf_hub_id='timm/'),
'fbnetc_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth',
hf_hub_id='timm/',
interpolation='bilinear'),
'spnasnet_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth',
hf_hub_id='timm/',
interpolation='bilinear'),
# NOTE experimenting with alternate attention
'efficientnet_b0.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth',
hf_hub_id='timm/'),
'efficientnet_b0.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0),
'efficientnet_b1.ra4_e3600_r240_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), crop_pct=0.9, pool_size=(8, 8),
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'efficientnet_b1.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), test_crop_pct=1.0),
'efficientnet_b2.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0),
'efficientnet_b3.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth',
hf_hub_id='timm/',
input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0),
'efficientnet_b4.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth',
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0),
'efficientnet_b5.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'),
'efficientnet_b5.sw_in12k': _cfg(
hf_hub_id='timm/',
input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821),
'efficientnet_b6.untrained': _cfg(
url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'efficientnet_b7.untrained': _cfg(
url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'efficientnet_b8.untrained': _cfg(
url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),
'efficientnet_l2.untrained': _cfg(
url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961),
# FIXME experimental
'efficientnet_b0_gn.untrained': _cfg(),
'efficientnet_b0_g8_gn.untrained': _cfg(),
'efficientnet_b0_g16_evos.untrained': _cfg(),
'efficientnet_b3_gn.untrained': _cfg(
input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0),
'efficientnet_b3_g8_gn.untrained': _cfg(
input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0),
'efficientnet_blur_b0.untrained': _cfg(),
'efficientnet_es.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth',
hf_hub_id='timm/'),
'efficientnet_em.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_el.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_es_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth',
hf_hub_id='timm/'),
'efficientnet_el_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_cc_b0_4e.untrained': _cfg(),
'efficientnet_cc_b0_8e.untrained': _cfg(),
'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_lite0.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth',
hf_hub_id='timm/'),
'efficientnet_lite1.untrained': _cfg(
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_lite2.untrained': _cfg(
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'efficientnet_lite3.untrained': _cfg(
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_lite4.untrained': _cfg(
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'efficientnet_b1_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8),
crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'efficientnet_b2_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9),
crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'efficientnet_b3_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10),
crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'efficientnetv2_rw_t.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0),
'gc_efficientnetv2_rw_t.agc_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0),
'efficientnetv2_rw_s.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth',
hf_hub_id='timm/',
input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0),
'efficientnetv2_rw_m.agc_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth',
hf_hub_id='timm/',
input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0),
'efficientnetv2_s.untrained': _cfg(
input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0),
'efficientnetv2_m.untrained': _cfg(
input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0),
'efficientnetv2_l.untrained': _cfg(
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
'efficientnetv2_xl.untrained': _cfg(
input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0),
'tf_efficientnet_b0.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224)),
'tf_efficientnet_b1.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth',
hf_hub_id='timm/',
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b6.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth',
hf_hub_id='timm/',
input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'tf_efficientnet_b7.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth',
hf_hub_id='timm/',
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_l2.ns_jft_in1k_475': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth',
hf_hub_id='timm/',
input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936),
'tf_efficientnet_l2.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth',
hf_hub_id='timm/',
input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96),
'tf_efficientnet_b0.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)),
'tf_efficientnet_b1.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b6.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'tf_efficientnet_b7.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_b8.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),
'tf_efficientnet_b5.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b7.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth',
hf_hub_id='timm/',
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_b8.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth',
hf_hub_id='timm/',
input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),
'tf_efficientnet_b0.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224)),
'tf_efficientnet_b1.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth',
hf_hub_id='timm/',
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.aa_in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b6.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth',
hf_hub_id='timm/',
input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'tf_efficientnet_b7.aa_in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth',
hf_hub_id='timm/',
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_b0.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224)),
'tf_efficientnet_b1.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth',
hf_hub_id='timm/',
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_es.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 224, 224), ),
'tf_efficientnet_em.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_el.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_cc_b0_4e.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_efficientnet_cc_b0_8e.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_efficientnet_cc_b1_8e.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_lite0.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res
),
'tf_efficientnet_lite1.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882,
interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res
),
'tf_efficientnet_lite2.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890,
interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res
),
'tf_efficientnet_lite3.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'),
'tf_efficientnet_lite4.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'),
'tf_efficientnetv2_s.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_l.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_s.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_l.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_s.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_l.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_xl.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_b0.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth',
hf_hub_id='timm/',
input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)),
'tf_efficientnetv2_b1.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth',
hf_hub_id='timm/',
input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882),
'tf_efficientnetv2_b2.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth',
hf_hub_id='timm/',
input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890),
'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'),
'tf_efficientnetv2_b3.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904),
'tf_efficientnetv2_b3.in21k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843,
input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904),
'mixnet_s.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth',
hf_hub_id='timm/'),
'mixnet_m.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth',
hf_hub_id='timm/'),
'mixnet_l.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth',
hf_hub_id='timm/'),
'mixnet_xl.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth',
hf_hub_id='timm/'),
'mixnet_xxl.untrained': _cfg(),
'tf_mixnet_s.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth',
hf_hub_id='timm/'),
'tf_mixnet_m.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth',
hf_hub_id='timm/'),
'tf_mixnet_l.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth',
hf_hub_id='timm/'),
"tinynet_a.in1k": _cfg(
input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth',
hf_hub_id='timm/'),
"tinynet_b.in1k": _cfg(
input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth',
hf_hub_id='timm/'),
"tinynet_c.in1k": _cfg(
input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth',
hf_hub_id='timm/'),
"tinynet_d.in1k": _cfg(
input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth',
hf_hub_id='timm/'),
"tinynet_e.in1k": _cfg(
input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth',
hf_hub_id='timm/'),
'mobilenet_edgetpu_100.untrained': _cfg(
# hf_hub_id='timm/',
input_size=(3, 224, 224), crop_pct=0.9),
'mobilenet_edgetpu_v2_xs.untrained': _cfg(
# hf_hub_id='timm/',
input_size=(3, 224, 224), crop_pct=0.9),
'mobilenet_edgetpu_v2_s.untrained': _cfg(
#hf_hub_id='timm/',
input_size=(3, 224, 224), crop_pct=0.9),
'mobilenet_edgetpu_v2_m.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=0.95,
),
'mobilenet_edgetpu_v2_l.untrained': _cfg(
#hf_hub_id='timm/',
input_size=(3, 224, 224), crop_pct=0.9),
"test_efficientnet.r160_in1k": _cfg(
hf_hub_id='timm/',
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
"test_efficientnet_ln.r160_in1k": _cfg(
hf_hub_id='timm/',
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
"test_efficientnet_gn.r160_in1k": _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
"test_efficientnet_evos.r160_in1k": _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
})
@register_model
def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 0.5. """
model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 0.75. """
model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 1.0. """
model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 1.4 """
model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 0.5 """
model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 0.75. """
model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 1.0. """
model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 1.4. """
model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet Small, depth multiplier of 1.0. """
model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv1_100(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V1 """
model = _gen_mobilenet_v1('mobilenetv1_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv1_100h(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V1 """
model = _gen_mobilenet_v1('mobilenetv1_100h', 1.0, head_conv=True, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv1_125(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V1 """
model = _gen_mobilenet_v1('mobilenetv1_125', 1.25, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 0.35 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 0.5 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 0.75 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.0 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.4 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers"""
model = _gen_mobilenet_v2(
'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """
model = _gen_mobilenet_v2(
'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet:
""" FBNet-C """
if pretrained:
# pretrained model trained with non-default BN epsilon
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet:
""" Single-Path NAS Pixel1"""
model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B1 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B2 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B4 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B5 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B6 """
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B7 """
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B8 """
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-L2."""
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
# FIXME experimental group cong / GroupNorm / EvoNorm experiments
@register_model
def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 + GroupNorm"""
model = _gen_efficientnet(
'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 w/ group conv + GroupNorm"""
model = _gen_efficientnet(
'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8),
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 w/ group 16 conv + EvoNorm"""
model = _gen_efficientnet(
'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16,
pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16),
return model
@register_model
def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 w/ GroupNorm """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16,
norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 w/ grouped conv + BN"""
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16,
norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_blur_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 w/ BlurPool """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_blur_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained,
aa_layer='blurpc', **kwargs
)
return model
@register_model
def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge Small. """
model = _gen_efficientnet_edge(
'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0"""
model = _gen_efficientnet_edge(
'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Medium. """
model = _gen_efficientnet_edge(
'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Large. """
model = _gen_efficientnet_edge(
'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0"""
model = _gen_efficientnet_edge(
'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 8 Experts """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_condconv(
'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 8 Experts """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_condconv(
'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B1 w/ 8 Experts """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_condconv(
'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite0 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite1 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite2 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite4 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
variant = 'efficientnet_b1_pruned'
model = _gen_efficientnet(
variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """
model = _gen_efficientnetv2_s(
'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs)
return model
@register_model
def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """
model = _gen_efficientnetv2_s(
'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9,
rw=False, se_layer='gc', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Small (RW variant).
NOTE: This is my initial (pre official code release) w/ some differences.
See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding
"""
model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Medium (RW variant).
"""
model = _gen_efficientnetv2_s(
'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Small. """
model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Medium. """
model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Large. """
model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Xtra-Large. """
model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B1. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B2. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B4. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B5. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B6. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B7. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B8. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge Small. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_edge(
'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Medium. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_edge(
'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Large. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_edge(
'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_condconv(
'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_condconv(
'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_condconv(
'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite0 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite1 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite2 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite4 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Small. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Medium. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Large. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Xtra-Large. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B0. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B1. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base(
'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B2. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base(
'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B3. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base(
'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_x_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet_x(
'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_x_b5(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B5 """
model = _gen_efficientnet_x(
'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_h_b5(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B5 """
model = _gen_efficientnet_x(
'efficientnet_b5', channel_multiplier=1.92, depth_multiplier=2.2, version=2, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_s(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Small model.
"""
model = _gen_mixnet_s(
'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_m(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Medium model.
"""
model = _gen_mixnet_m(
'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_l(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Large model.
"""
model = _gen_mixnet_m(
'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Extra-Large model.
Not a paper spec, experimental def by RW w/ depth scaling.
"""
model = _gen_mixnet_m(
'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Double Extra Large model.
Not a paper spec, experimental def by RW w/ depth scaling.
"""
model = _gen_mixnet_m(
'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Small model. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mixnet_s(
'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Medium model. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mixnet_m(
'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Large model. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mixnet_m(
'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_a(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_b(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_c(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_d(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_e(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenet_edgetpu_100(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet-EdgeTPU-v1 100. """
model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_100', pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenet_edgetpu_v2_xs(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet-EdgeTPU-v2 Extra Small. """
model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_xs', pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenet_edgetpu_v2_s(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet-EdgeTPU-v2 Small. """
model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_s', pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenet_edgetpu_v2_m(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet-EdgeTPU-v2 Medium. """
model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_m', pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenet_edgetpu_v2_l(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet-EdgeTPU-v2 Large. """
model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_l', pretrained=pretrained, **kwargs)
return model
@register_model
def test_efficientnet(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_test_efficientnet('test_efficientnet', pretrained=pretrained, **kwargs)
return model
@register_model
def test_efficientnet_gn(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_test_efficientnet(
'test_efficientnet_gn', pretrained=pretrained, norm_layer=partial(GroupNormAct, group_size=8), **kwargs)
return model
@register_model
def test_efficientnet_ln(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_test_efficientnet(
'test_efficientnet_ln', pretrained=pretrained, norm_layer=LayerNormAct2d, **kwargs)
return model
@register_model
def test_efficientnet_evos(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_test_efficientnet(
'test_efficientnet_evos', pretrained=pretrained, norm_layer=partial(EvoNorm2dS0, group_size=8), **kwargs)
return model
register_model_deprecations(__name__, {
'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k',
'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k',
'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k',
'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k',
'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k',
'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k',
'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k',
'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k',
'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k',
'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k',
'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k',
'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k',
'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k',
'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k',
'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k',
'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k',
'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k',
'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475',
'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k',
'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k',
'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k',
'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k',
'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k',
'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k',
'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k',
'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k',
'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k',
'efficientnet_b2a': 'efficientnet_b2',
'efficientnet_b3a': 'efficientnet_b3',
'mnasnet_a1': 'semnasnet_100',
'mnasnet_b1': 'mnasnet_100',
})
| pytorch-image-models/timm/models/efficientnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/efficientnet.py",
"repo_id": "pytorch-image-models",
"token_count": 57808
} |
""" HRNet
Copied from https://github.com/HRNet/HRNet-Image-Classification
Original header:
Copyright (c) Microsoft
Licensed under the MIT License.
Written by Bin Xiao ([email protected])
Modified by Ke Sun ([email protected])
"""
import logging
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._features import FeatureInfo
from ._registry import register_model, generate_default_cfgs
from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE
__all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this
_BN_MOMENTUM = 0.1
_logger = logging.getLogger(__name__)
cfg_cls = dict(
hrnet_w18_small=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(1,),
num_channels=(32,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(2, 2),
num_channels=(16, 32),
fuse_method='SUM'
),
stage3=dict(
num_modules=1,
num_branches=3,
block_type='BASIC',
num_blocks=(2, 2, 2),
num_channels=(16, 32, 64),
fuse_method='SUM'
),
stage4=dict(
num_modules=1,
num_branches=4,
block_type='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(16, 32, 64, 128),
fuse_method='SUM',
),
),
hrnet_w18_small_v2=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36),
fuse_method='SUM'
),
stage3=dict(
num_modules=3,
num_branches=3,
block_type='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72),
fuse_method='SUM'
),
stage4=dict(
num_modules=2,
num_branches=4,
block_type='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144),
fuse_method='SUM',
),
),
hrnet_w18=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144),
fuse_method='SUM',
),
),
hrnet_w30=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(30, 60),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(30, 60, 120),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(30, 60, 120, 240),
fuse_method='SUM',
),
),
hrnet_w32=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256),
fuse_method='SUM',
),
),
hrnet_w40=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(40, 80),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(40, 80, 160),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(40, 80, 160, 320),
fuse_method='SUM',
),
),
hrnet_w44=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(44, 88),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(44, 88, 176),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(44, 88, 176, 352),
fuse_method='SUM',
),
),
hrnet_w48=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384),
fuse_method='SUM',
),
),
hrnet_w64=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(64, 128),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(64, 128, 256),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(64, 128, 256, 512),
fuse_method='SUM',
),
)
)
class HighResolutionModule(nn.Module):
def __init__(
self,
num_branches,
block_types,
num_blocks,
num_in_chs,
num_channels,
fuse_method,
multi_scale_output=True,
):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches,
block_types,
num_blocks,
num_in_chs,
num_channels,
)
self.num_in_chs = num_in_chs
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches,
block_types,
num_blocks,
num_channels,
)
self.fuse_layers = self._make_fuse_layers()
self.fuse_act = nn.ReLU(False)
def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels):
error_msg = ''
if num_branches != len(num_blocks):
error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks))
elif num_branches != len(num_channels):
error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels))
elif num_branches != len(num_in_chs):
error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs))
if error_msg:
_logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1):
downsample = None
if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM),
)
layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)]
self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block_type, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return nn.Identity()
num_branches = self.num_branches
num_in_chs = self.num_in_chs
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False),
nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM),
nn.Upsample(scale_factor=2 ** (j - i), mode='nearest')))
elif j == i:
fuse_layer.append(nn.Identity())
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_out_chs_conv3x3 = num_in_chs[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM)
))
else:
num_out_chs_conv3x3 = num_in_chs[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM),
nn.ReLU(False)
))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_in_chs(self):
return self.num_in_chs
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i, branch in enumerate(self.branches):
x[i] = branch(x[i])
x_fuse = []
for i, fuse_outer in enumerate(self.fuse_layers):
y = None
for j, f in enumerate(fuse_outer):
if y is None:
y = f(x[j])
else:
y = y + f(x[j])
x_fuse.append(self.fuse_act(y))
return x_fuse
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward
pass
block_types_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.0,
head='classification',
**kwargs,
):
super(HighResolutionNet, self).__init__()
self.num_classes = num_classes
assert output_stride == 32 # FIXME support dilation
cfg.update(**kwargs)
stem_width = cfg['stem_width']
self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM)
self.act2 = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = block_types_dict[self.stage1_cfg['block_type']]
num_blocks = self.stage1_cfg['num_blocks'][0]
self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks)
stage1_out_channel = block_type.expansion * num_channels
self.stage2_cfg = cfg['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = block_types_dict[self.stage2_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = block_types_dict[self.stage3_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = block_types_dict[self.stage4_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
self.head = head
self.head_channels = None # set if _make_head called
head_conv_bias = cfg.pop('head_conv_bias', True)
if head == 'classification':
# Classification Head
self.num_features = self.head_hidden_size = 2048
self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(
pre_stage_channels,
conv_bias=head_conv_bias,
)
self.global_pool, self.head_drop, self.classifier = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
else:
if head == 'incre':
self.num_features = self.head_hidden_size = 2048
self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True)
else:
self.num_features = self.head_hidden_size = 256
self.incre_modules = None
self.global_pool = nn.Identity()
self.head_drop = nn.Identity()
self.classifier = nn.Identity()
curr_stride = 2
# module names aren't actually valid here, hook or FeatureNet based extraction would not work
self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')]
for i, c in enumerate(self.head_channels if self.head_channels else num_channels):
curr_stride *= 2
c = c * 4 if self.head_channels else c # head block_type expansion factor of 4
self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')]
self.init_weights()
def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True):
head_block_type = Bottleneck
self.head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1))
incre_modules = nn.ModuleList(incre_modules)
if incre_only:
return incre_modules, None, None
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = self.head_channels[i] * head_block_type.expansion
out_channels = self.head_channels[i + 1] * head_block_type.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=2, padding=1, bias=conv_bias),
nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features,
kernel_size=1, stride=1, padding=0, bias=conv_bias),
nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False),
nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(nn.Identity())
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
_in_chs = num_channels_pre_layer[-1]
_out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs
conv3x3s.append(nn.Sequential(
nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False),
nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block_type, inplanes, planes, block_types, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block_type.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM),
)
layers = [block_type(inplanes, planes, stride, downsample)]
inplanes = planes * block_type.expansion
for i in range(1, block_types):
layers.append(block_type(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block_type = block_types_dict[layer_config['block_type']]
fuse_method = layer_config['fuse_method']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
reset_multi_scale_output = multi_scale_output or i < num_modules - 1
modules.append(HighResolutionModule(
num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output)
)
num_in_chs = modules[-1].get_num_in_chs()
return SequentialList(*modules), num_in_chs
@torch.jit.ignore
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv[12]|bn[12]',
block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [
(r'^layer(\d+)\.(\d+)', None),
(r'^stage(\d+)\.(\d+)', None),
(r'^transition(\d+)', (99999,)),
],
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, "gradient checkpointing not supported"
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def stages(self, x) -> List[torch.Tensor]:
x = self.layer1(x)
xl = [t(x) for i, t in enumerate(self.transition1)]
yl = self.stage2(xl)
xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)]
yl = self.stage3(xl)
xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)]
yl = self.stage4(xl)
return yl
def forward_features(self, x):
# Stem
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
# Stages
yl = self.stages(x)
if self.incre_modules is None or self.downsamp_modules is None:
return yl
y = None
for i, incre in enumerate(self.incre_modules):
if y is None:
y = incre(yl[i])
else:
down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing
y = incre(yl[i]) + down.forward(y)
y = self.final_layer(y)
return y
def forward_head(self, x, pre_logits: bool = False):
# Classification Head
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.classifier(x)
def forward(self, x):
y = self.forward_features(x)
x = self.forward_head(y)
return x
class HighResolutionNetFeatures(HighResolutionNet):
"""HighResolutionNet feature extraction
The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so.
It would be more complicated to use the FeatureNet helpers.
The `feature_location=incre` allows grabbing increased channel count features using part of the
classification head. If `feature_location=''` the default HRNet features are returned. First stem
conv is used for stride 2 features.
"""
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.0,
feature_location='incre',
out_indices=(0, 1, 2, 3, 4),
**kwargs,
):
assert feature_location in ('incre', '')
super(HighResolutionNetFeatures, self).__init__(
cfg,
in_chans=in_chans,
num_classes=num_classes,
output_stride=output_stride,
global_pool=global_pool,
drop_rate=drop_rate,
head=feature_location,
**kwargs,
)
self.feature_info = FeatureInfo(self.feature_info, out_indices)
self._out_idx = {f['index'] for f in self.feature_info.get_dicts()}
def forward_features(self, x):
assert False, 'Not supported'
def forward(self, x) -> List[torch.Tensor]:
out = []
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
if 0 in self._out_idx:
out.append(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
x = self.stages(x)
if self.incre_modules is not None:
x = [incre(f) for f, incre in zip(x, self.incre_modules)]
for i, f in enumerate(x):
if i + 1 in self._out_idx:
out.append(f)
return out
def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs):
model_cls = HighResolutionNet
features_only = False
kwargs_filter = None
if model_kwargs.pop('features_only', False):
model_cls = HighResolutionNetFeatures
kwargs_filter = ('num_classes', 'global_pool')
features_only = True
cfg_variant = cfg_variant or variant
pretrained_strict = model_kwargs.pop(
'pretrained_strict',
not features_only and model_kwargs.get('head', 'classification') == 'classification'
)
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
model_cfg=cfg_cls[cfg_variant],
pretrained_strict=pretrained_strict,
kwargs_filter=kwargs_filter,
**model_kwargs,
)
if features_only:
model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg)
model.default_cfg = model.pretrained_cfg # backwards compat
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'),
'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'),
'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18.ms_aug_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95,
),
'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18_ssld.paddle_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)
),
'hrnet_w48_ssld.paddle_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)
),
})
@register_model
def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18_small', pretrained, **kwargs)
@register_model
def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs)
@register_model
def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18', pretrained, **kwargs)
@register_model
def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w30', pretrained, **kwargs)
@register_model
def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w32', pretrained, **kwargs)
@register_model
def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w40', pretrained, **kwargs)
@register_model
def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w44', pretrained, **kwargs)
@register_model
def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w48', pretrained, **kwargs)
@register_model
def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w64', pretrained, **kwargs)
@register_model
def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet:
kwargs.setdefault('head_conv_bias', False)
return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs)
@register_model
def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet:
kwargs.setdefault('head_conv_bias', False)
return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
| pytorch-image-models/timm/models/hrnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/hrnet.py",
"repo_id": "pytorch-image-models",
"token_count": 17688
} |
""" Nested Transformer (NesT) in PyTorch
A PyTorch implement of Aggregating Nested Transformers as described in:
'Aggregating Nested Transformers'
- https://arxiv.org/abs/2105.12723
The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights
have been converted with convert/convert_nest_flax.py
Acknowledgments:
* The paper authors for sharing their research, code, and model weights
* Ross Wightman's existing code off which I based this
Copyright 2021 Alexander Soare
"""
import collections.abc
import logging
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert
from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq, named_apply
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['Nest'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
class Attention(nn.Module):
"""
This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with
an extra "image block" dim
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
"""
x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim)
"""
B, T, N, C = x.shape
# result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head)
qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1) # (B, H, T, N, N)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
# (B, H, T, N, C'), permute -> (B, T, N, C', H)
x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x # (B, T, N, C)
class TransformerLayer(nn.Module):
"""
This is much like `.vision_transformer.Block` but:
- Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks")
- Uses modified Attention layer that handles the "block" dimension
"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x):
y = self.norm1(x)
x = x + self.drop_path(self.attn(y))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConvPool(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, pad_type=''):
super().__init__()
self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True)
self.norm = norm_layer(out_channels)
self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type)
def forward(self, x):
"""
x is expected to have shape (B, C, H, W)
"""
_assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims')
_assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims')
x = self.conv(x)
# Layer norm done over channel dim only
x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
x = self.pool(x)
return x # (B, C, H//2, W//2)
def blockify(x, block_size: int):
"""image to blocks
Args:
x (Tensor): with shape (B, H, W, C)
block_size (int): edge length of a single square block in units of H, W
"""
B, H, W, C = x.shape
_assert(H % block_size == 0, '`block_size` must divide input height evenly')
_assert(W % block_size == 0, '`block_size` must divide input width evenly')
grid_height = H // block_size
grid_width = W // block_size
x = x.reshape(B, grid_height, block_size, grid_width, block_size, C)
x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C)
return x # (B, T, N, C)
@register_notrace_function # reason: int receives Proxy
def deblockify(x, block_size: int):
"""blocks to image
Args:
x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block
block_size (int): edge length of a single square block in units of desired H, W
"""
B, T, _, C = x.shape
grid_size = int(math.sqrt(T))
height = width = grid_size * block_size
x = x.reshape(B, grid_size, grid_size, block_size, block_size, C)
x = x.transpose(2, 3).reshape(B, height, width, C)
return x # (B, H, W, C)
class NestLevel(nn.Module):
""" Single hierarchical level of a Nested Transformer
"""
def __init__(
self,
num_blocks,
block_size,
seq_length,
num_heads,
depth,
embed_dim,
prev_embed_dim=None,
mlp_ratio=4.,
qkv_bias=True,
proj_drop=0.,
attn_drop=0.,
drop_path=[],
norm_layer=None,
act_layer=None,
pad_type='',
):
super().__init__()
self.block_size = block_size
self.grad_checkpointing = False
self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim))
if prev_embed_dim is not None:
self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type)
else:
self.pool = nn.Identity()
# Transformer encoder
if len(drop_path):
assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers'
self.transformer_encoder = nn.Sequential(*[
TransformerLayer(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i],
norm_layer=norm_layer,
act_layer=act_layer,
)
for i in range(depth)])
def forward(self, x):
"""
expects x as (B, C, H, W)
"""
x = self.pool(x)
x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer
x = blockify(x, self.block_size) # (B, T, N, C')
x = x + self.pos_embed
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.transformer_encoder, x)
else:
x = self.transformer_encoder(x) # (B, T, N, C')
x = deblockify(x, self.block_size) # (B, H', W', C')
# Channel-first for block aggregation, and generally to replicate convnet feature map at each stage
return x.permute(0, 3, 1, 2) # (B, C, H', W')
class Nest(nn.Module):
""" Nested Transformer (NesT)
A PyTorch impl of : `Aggregating Nested Transformers`
- https://arxiv.org/abs/2105.12723
"""
def __init__(
self,
img_size=224,
in_chans=3,
patch_size=4,
num_levels=3,
embed_dims=(128, 256, 512),
num_heads=(4, 8, 16),
depths=(2, 2, 20),
num_classes=1000,
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.5,
norm_layer=None,
act_layer=None,
pad_type='',
weight_init='',
global_pool='avg',
):
"""
Args:
img_size (int, tuple): input image size
in_chans (int): number of input channels
patch_size (int): patch size
num_levels (int): number of block hierarchies (T_d in the paper)
embed_dims (int, tuple): embedding dimensions of each level
num_heads (int, tuple): number of attention heads for each level
depths (int, tuple): number of transformer layers for each level
num_classes (int): number of classes for classification head
mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers
qkv_bias (bool): enable bias for qkv if True
drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer for transformer layers
act_layer: (nn.Module): activation layer in MLP of transformer layers
pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME
weight_init: (str): weight init scheme
global_pool: (str): type of pooling operation to apply to final feature map
Notes:
- Default values follow NesT-B from the original Jax code.
- `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`.
- For those following the paper, Table A1 may have errors!
- https://github.com/google-research/nested-transformer/issues/2
"""
super().__init__()
for param_name in ['embed_dims', 'num_heads', 'depths']:
param_value = locals()[param_name]
if isinstance(param_value, collections.abc.Sequence):
assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`'
embed_dims = to_ntuple(num_levels)(embed_dims)
num_heads = to_ntuple(num_levels)(num_heads)
depths = to_ntuple(num_levels)(depths)
self.num_classes = num_classes
self.num_features = self.head_hidden_size = embed_dims[-1]
self.feature_info = []
norm_layer = norm_layer or LayerNorm
act_layer = act_layer or nn.GELU
self.drop_rate = drop_rate
self.num_levels = num_levels
if isinstance(img_size, collections.abc.Sequence):
assert img_size[0] == img_size[1], 'Model only handles square inputs'
img_size = img_size[0]
assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly'
self.patch_size = patch_size
# Number of blocks at each level
self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist()
assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \
'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`'
# Block edge size in units of patches
# Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the
# number of blocks along edge of image
self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0]))
# Patch embedding
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dims[0],
flatten=False,
)
self.num_patches = self.patch_embed.num_patches
self.seq_length = self.num_patches // self.num_blocks[0]
# Build up each hierarchical level
levels = []
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
prev_dim = None
curr_stride = 4
for i in range(len(self.num_blocks)):
dim = embed_dims[i]
levels.append(NestLevel(
self.num_blocks[i],
self.block_size,
self.seq_length,
num_heads[i],
depths[i],
dim,
prev_dim,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dp_rates[i],
norm_layer=norm_layer,
act_layer=act_layer,
pad_type=pad_type,
))
self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')]
prev_dim = dim
curr_stride *= 2
self.levels = nn.Sequential(*levels)
# Final normalization layer
self.norm = norm_layer(embed_dims[-1])
# Classifier
global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.head = head
self.init_weights(weight_init)
@torch.jit.ignore
def init_weights(self, mode=''):
assert mode in ('nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
for level in self.levels:
trunc_normal_(level.pos_embed, std=.02, a=-2, b=2)
named_apply(partial(_init_nest_weights, head_bias=head_bias), self)
@torch.jit.ignore
def no_weight_decay(self):
return {f'level.{i}.pos_embed' for i in range(len(self.levels))}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=[
(r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None),
(r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)),
(r'^norm', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for l in self.levels:
l.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.head = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
x = self.levels(x)
# Layer norm done over channel dim only (to NHWC and back)
x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.):
""" NesT weight initialization
Can replicate Jax implementation. Otherwise follows vision_transformer.py
"""
if isinstance(module, nn.Linear):
if name.startswith('head'):
trunc_normal_(module.weight, std=.02, a=-2, b=2)
nn.init.constant_(module.bias, head_bias)
else:
trunc_normal_(module.weight, std=.02, a=-2, b=2)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02, a=-2, b=2)
if module.bias is not None:
nn.init.zeros_(module.bias)
def resize_pos_embed(posemb, posemb_new):
"""
Rescale the grid of position embeddings when loading from state_dict
Expected shape of position embeddings is (1, T, N, C), and considers only square images
"""
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
seq_length_old = posemb.shape[2]
num_blocks_new, seq_length_new = posemb_new.shape[1:3]
size_new = int(math.sqrt(num_blocks_new*seq_length_new))
# First change to (1, C, H, W)
posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2)
posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False)
# Now change to new (1, T, N, C)
posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new)))
return posemb
def checkpoint_filter_fn(state_dict, model):
""" resize positional embeddings of pretrained weights """
pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')]
for k in pos_embed_keys:
if state_dict[k].shape != getattr(model, k).shape:
state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k))
return state_dict
def _create_nest(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
Nest,
variant,
pretrained,
feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14],
'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'nest_base.untrained': _cfg(),
'nest_small.untrained': _cfg(),
'nest_tiny.untrained': _cfg(),
# (weights from official Google JAX impl, require 'SAME' padding)
'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'),
'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'),
'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def nest_base(pretrained=False, **kwargs) -> Nest:
""" Nest-B @ 224x224
"""
model_kwargs = dict(
embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_small(pretrained=False, **kwargs) -> Nest:
""" Nest-S @ 224x224
"""
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_tiny(pretrained=False, **kwargs) -> Nest:
""" Nest-T @ 224x224
"""
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs)
model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_base_jx(pretrained=False, **kwargs) -> Nest:
""" Nest-B @ 224x224
"""
kwargs.setdefault('pad_type', 'same')
model_kwargs = dict(
embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_small_jx(pretrained=False, **kwargs) -> Nest:
""" Nest-S @ 224x224
"""
kwargs.setdefault('pad_type', 'same')
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_tiny_jx(pretrained=False, **kwargs) -> Nest:
""" Nest-T @ 224x224
"""
kwargs.setdefault('pad_type', 'same')
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs)
model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs)
return model
register_model_deprecations(__name__, {
'jx_nest_base': 'nest_base_jx',
'jx_nest_small': 'nest_small_jx',
'jx_nest_tiny': 'nest_tiny_jx',
}) | pytorch-image-models/timm/models/nest.py/0 | {
"file_path": "pytorch-image-models/timm/models/nest.py",
"repo_id": "pytorch-image-models",
"token_count": 10094
} |
"""PyTorch SelecSLS Net example for ImageNet Classification
License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode)
Author: Dushyant Mehta (@mehtadushy)
SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D
Human Pose Estimation with a Single RGB Camera, Mehta et al."
https://arxiv.org/abs/1907.00837
Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models
and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
class SelectSeq(nn.Module):
def __init__(self, mode='index', index=0):
super(SelectSeq, self).__init__()
self.mode = mode
self.index = index
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor]) -> (torch.Tensor)
pass
def forward(self, x) -> torch.Tensor:
if self.mode == 'index':
return x[self.index]
else:
return torch.cat(x, dim=1)
def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1):
if padding is None:
padding = ((stride - 1) + dilation * (k - 1)) // 2
return nn.Sequential(
nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_chs),
nn.ReLU(inplace=True)
)
class SelecSlsBlock(nn.Module):
def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1):
super(SelecSlsBlock, self).__init__()
self.stride = stride
self.is_first = is_first
assert stride in [1, 2]
# Process input with 4 conv blocks with the same number of input and output channels
self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation)
self.conv2 = conv_bn(mid_chs, mid_chs, 1)
self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1)
self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1)
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if not isinstance(x, list):
x = [x]
assert len(x) in [1, 2]
d1 = self.conv1(x[0])
d2 = self.conv3(self.conv2(d1))
d3 = self.conv5(self.conv4(d2))
if self.is_first:
out = self.conv6(torch.cat([d1, d2, d3], 1))
return [out, out]
else:
return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]]
class SelecSls(nn.Module):
"""SelecSls42 / SelecSls60 / SelecSls84
Parameters
----------
cfg : network config dictionary specifying block type, feature, and head args
num_classes : int, default 1000
Number of classification classes.
in_chans : int, default 3
Number of input (color) channels.
drop_rate : float, default 0.
Dropout probability before classifier, for training
global_pool : str, default 'avg'
Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'
"""
def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
self.num_classes = num_classes
super(SelecSls, self).__init__()
self.stem = conv_bn(in_chans, 32, stride=2)
self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']])
self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way
self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']])
self.num_features = self.head_hidden_size = cfg['num_features']
self.feature_info = cfg['feature_info']
self.global_pool, self.head_drop, self.fc = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^features\.(\d+)',
blocks_head=r'^head'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.fc
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.features(x)
x = self.head(self.from_seq(x))
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.fc(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_selecsls(variant, pretrained, **kwargs):
cfg = {}
feature_info = [dict(num_chs=32, reduction=2, module='stem.2')]
if variant.startswith('selecsls42'):
cfg['block'] = SelecSlsBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 144, 144, True, 2),
(144, 144, 144, 288, False, 1),
(288, 0, 304, 304, True, 2),
(304, 304, 304, 480, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.3'),
dict(num_chs=480, reduction=16, module='features.5'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls42b':
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant.startswith('selecsls60'):
cfg['block'] = SelecSlsBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 128, 128, True, 2),
(128, 128, 128, 128, False, 1),
(128, 128, 128, 288, False, 1),
(288, 0, 288, 288, True, 2),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 416, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.4'),
dict(num_chs=416, reduction=16, module='features.8'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls60b':
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant == 'selecsls84':
cfg['block'] = SelecSlsBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 144, False, 1),
(144, 0, 144, 144, True, 2),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 304, False, 1),
(304, 0, 304, 304, True, 2),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 512, False, 1),
]
feature_info.extend([
dict(num_chs=144, reduction=4, module='features.1'),
dict(num_chs=304, reduction=8, module='features.6'),
dict(num_chs=512, reduction=16, module='features.12'),
])
# Head can be replaced with alternative configurations depending on the problem
cfg['head'] = [
(512, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 3, 1),
]
cfg['num_features'] = 1280
feature_info.extend([
dict(num_chs=1024, reduction=32, module='head.1'),
dict(num_chs=1280, reduction=64, module='head.3')
])
else:
raise ValueError('Invalid net configuration ' + variant + ' !!!')
cfg['feature_info'] = feature_info
# this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises?
return build_model_with_cfg(
SelecSls,
variant,
pretrained,
model_cfg=cfg,
feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'selecsls42.untrained': _cfg(
interpolation='bicubic'),
'selecsls42b.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'selecsls60.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'selecsls60b.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'selecsls84.untrained': _cfg(
interpolation='bicubic'),
})
@register_model
def selecsls42(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls42 model.
"""
return _create_selecsls('selecsls42', pretrained, **kwargs)
@register_model
def selecsls42b(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls42_B model.
"""
return _create_selecsls('selecsls42b', pretrained, **kwargs)
@register_model
def selecsls60(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls60 model.
"""
return _create_selecsls('selecsls60', pretrained, **kwargs)
@register_model
def selecsls60b(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls60_B model.
"""
return _create_selecsls('selecsls60b', pretrained, **kwargs)
@register_model
def selecsls84(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls84 model.
"""
return _create_selecsls('selecsls84', pretrained, **kwargs)
| pytorch-image-models/timm/models/selecsls.py/0 | {
"file_path": "pytorch-image-models/timm/models/selecsls.py",
"repo_id": "pytorch-image-models",
"token_count": 6461
} |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'Exploring Plain Vision Transformer Backbones for Object Detection'
- https://arxiv.org/abs/2203.16527
'Segment Anything Model (SAM)'
- https://github.com/facebookresearch/segment-anything/
"""
import logging
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead, \
Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn
from torch.jit import Final
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
# model_registry will add each entrypoint fn to this
__all__ = ['VisionTransformerSAM']
_logger = logging.getLogger(__name__)
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
register_notrace_function(get_rel_pos)
def get_decomposed_rel_pos_bias(
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
bias (Tensor): attention bias to add to attention map
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
return attn_bias.reshape(-1, q_h * q_w, k_h * k_w)
class Attention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
qk_norm=False,
attn_drop=0.,
proj_drop=0.,
norm_layer=nn.LayerNorm,
use_rel_pos: bool = False,
input_size: Optional[Tuple[int, int]] = None,
rope: Optional[nn.Module] = None,
):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert rope is None
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(
2 * input_size[0] - 1, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(
2 * input_size[1] - 1, self.head_dim))
self.rope = rope
def forward(self, x):
B, H, W, _ = x.shape
N = H * W
x = x.reshape(B, N, -1)
qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# qkv with shape (3, B, nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0)
# q, k, v with shape (B * nHead, H * W, C)
q, k = self.q_norm(q), self.k_norm(k)
if self.use_rel_pos:
attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
else:
attn_bias = None
if self.rope is not None:
rope = self.rope.get_embed()
q = apply_rot_embed_cat(q, rope).type_as(v)
k = apply_rot_embed_cat(k, rope).type_as(v)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if attn_bias is not None:
attn = attn + attn_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
x = x.view(B, H, W, -1)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=True,
qk_norm=False,
proj_drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
mlp_layer=Mlp,
use_rel_pos=False,
window_size=0,
input_size=None,
rope=None,
):
super().__init__()
self.window_size = window_size
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
use_rel_pos=use_rel_pos,
input_size=input_size if window_size == 0 else (window_size, window_size),
rope=rope,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = mlp_layer(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
B, H, W, _ = x.shape
shortcut = x
x = self.norm1(x)
# Window partition
pad_hw: Optional[Tuple[int, int]] = None
if self.window_size > 0:
x, pad_hw = window_partition(x, self.window_size)
x = self.drop_path1(self.ls1(self.attn(x)))
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, (H, W), pad_hw)
x = shortcut + x
x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
x = x.reshape(B, H, W, -1)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None,
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw if pad_hw is not None else hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
x = x[:, :H, :W, :].contiguous()
return x
class VisionTransformerSAM(nn.Module):
""" Vision Transformer for Segment-Anything Model(SAM)
A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
num_classes: int = 768,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_norm: bool = False,
init_values: Optional[float] = None,
pre_norm: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init: str = '',
embed_layer: Callable = partial(PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False),
norm_layer: Optional[Callable] = nn.LayerNorm,
act_layer: Optional[Callable] = nn.GELU,
block_fn: Callable = Block,
mlp_layer: Callable = Mlp,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
use_rope: bool = False,
window_size: int = 14,
global_attn_indexes: Tuple[int, ...] = (),
neck_chans: int = 256,
global_pool: str = 'avg',
head_hidden_size: Optional[int] = None,
ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of image input channels.
num_classes: Number of classes for classification head.
global_pool: Type of global pooling for final sequence (default: 'token').
embed_dim: Transformer embedding dimension.
depth: Depth of transformer.
num_heads: Number of attention heads.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: Enable bias for qkv projections if True.
init_values: Layer-scale init values (layer-scale enabled if not None).
drop_rate: Head dropout rate.
pos_drop_rate: Position embedding dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
weight_init: Weight initialization scheme.
embed_layer: Patch embedding layer.
norm_layer: Normalization layer.
act_layer: MLP activation layer.
block_fn: Transformer block layer.
use_abs_pos: If True, use absolute positional embeddings.
use_rel_pos: If True, add relative positional embeddings to the attention map.
use_rope: If True, add rotary position embeddings to q/k in attention block.
window_size: Window size for window attention blocks. If 0, not use window attention.
global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0.
global_pool: Global pooling type.
head_hidden_size: If set, use NormMlpHead
ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local)
"""
super().__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used
)
grid_size = self.patch_embed.grid_size
r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=0,
)
else:
self.patch_drop = nn.Identity()
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
if use_rope:
assert not use_rel_pos, "ROPE and relative pos embeddings should not be enabled at same time"
if ref_feat_shape is not None:
assert len(ref_feat_shape) == 2
ref_feat_shape_global = to_2tuple(ref_feat_shape[0])
ref_feat_shape_window = to_2tuple(ref_feat_shape[1])
else:
ref_feat_shape_global = ref_feat_shape_window = None
self.rope_global = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=grid_size,
ref_feat_shape=ref_feat_shape_global,
)
self.rope_window = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=to_2tuple(window_size),
ref_feat_shape=ref_feat_shape_window,
)
else:
self.rope_global = None
self.rope_window = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
mlp_layer=mlp_layer,
use_rel_pos=use_rel_pos,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=grid_size,
rope=self.rope_window if i not in global_attn_indexes else self.rope_global,
)
for i in range(depth)])
self.feature_info = [
dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)]
if neck_chans:
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
neck_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(neck_chans),
nn.Conv2d(
neck_chans,
neck_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(neck_chans),
)
self.num_features = neck_chans
else:
if head_hidden_size:
self.neck = nn.Identity()
else:
# should have a final norm with standard ClassifierHead
self.neck = LayerNorm2d(embed_dim)
neck_chans = embed_dim
# Classifier Head
if head_hidden_size:
self.head = NormMlpClassifierHead(
neck_chans,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
)
else:
self.head = ClassifierHead(
neck_chans,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt == 'NCHW', 'Output shape for ViT-SAM must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# forward pass, collect intermediates
x = self.patch_embed(x)
if self.pos_embed is not None:
# dynamically resize abs pos embedding if needed
x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3])
x = self.pos_drop(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
x = blk(x)
if i in take_indices:
# make output BCHW
if norm:
# norm is intertwined with neck convs so apply both, changes the dim
# FIXME only apply to final? Need experiments
intermediates.append(self.neck(x.permute(0, 3, 1, 2)))
else:
intermediates.append(x.permute(0, 3, 1, 2))
if intermediates_only:
return intermediates
x = self.neck(x.permute(0, 3, 1, 2))
return x, intermediates
def prune_intermediate_layers(
self,
indices: Optional[Union[int, List[int]]] = None,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_norm:
# neck is being treated as equivalent to final norm here
self.neck = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
# dynamically resize abs pos embedding if needed
x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3])
x = self.pos_drop(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(
state_dict,
model,
):
""" Remap SAM checkpoints -> timm """
sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict
out_dict = {}
for k, v in state_dict.items():
if k.startswith('image_encoder.'):
k = k[14:]
k = k.replace('mlp.lin', 'mlp.fc')
else:
if sam_checkpoint:
continue
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Segment-Anything Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only)
'samvit_base_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_large_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_huge_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_base_patch16_224': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000,
input_size=(3, 224, 224), crop_pct=0.9),
})
def _create_vision_transformer(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
return build_model_with_cfg(
VisionTransformerSAM,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
@register_model
def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-B/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-L/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-H/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-B/16 based on samvit arch
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],
window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None,
)
model = _create_vision_transformer(
'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/vision_transformer_sam.py/0 | {
"file_path": "pytorch-image-models/timm/models/vision_transformer_sam.py",
"repo_id": "pytorch-image-models",
"token_count": 13920
} |
""" AdamW Optimizer
Impl copied from PyTorch master
NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference
"""
import math
from typing import Tuple
import torch
from torch.optim.optimizer import Optimizer
from ._types import ParamsT
class AdamWLegacy(Optimizer):
r"""Implements AdamW algorithm.
NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference
References:
- Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980
- Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101
- On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ
Args:
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
betas: coefficients used for computing running averages of gradient and its square
eps: term added to the denominator to improve numerical stability
weight_decay: weight decay coefficient
amsgrad: whether to use the AMSGrad variant of this algorithm
from the paper `On the Convergence of Adam and Beyond`
caution: apply caution when using AdamW
"""
def __init__(
self,
params: ParamsT,
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 1e-2,
amsgrad: bool = False,
caution: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
caution=caution,
)
super(AdamWLegacy, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamWLegacy, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
group.setdefault('caution', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
if group['caution']:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| pytorch-image-models/timm/optim/adamw.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adamw.py",
"repo_id": "pytorch-image-models",
"token_count": 2672
} |
""" RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2021 Ross Wightman
"""
import torch
from torch.optim import Optimizer
from ._types import ParamsT
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
momentum: momentum factor
alpha: smoothing (decay) constant
eps: term added to the denominator to improve numerical stability
centered: if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance
weight_decay: weight decay (L2 penalty) (default: 0)
decoupled_decay: decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum: learning rate scaling is included in the momentum buffer update as per defaults in Tensorflow
caution: apply caution
"""
def __init__(
self,
params: ParamsT,
lr: float = 1e-2,
alpha: float = 0.9,
eps: float = 1e-10,
weight_decay: float = 0,
momentum: float = 0.,
centered: bool = False,
decoupled_decay: bool = False,
lr_in_momentum: bool = True,
caution: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(
lr=lr,
momentum=momentum,
alpha=alpha,
eps=eps,
centered=centered,
weight_decay=weight_decay,
decoupled_decay=decoupled_decay,
lr_in_momentum=lr_in_momentum,
caution=caution,
)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
group.setdefault('caution', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if group['decoupled_decay']:
p.mul_(1. - group['lr'] * group['weight_decay'])
else:
grad = grad.add(p, alpha=group['weight_decay'])
# Tensorflow order of ops for updating squared avg
square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha)
# square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha)
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt
# grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
buf.mul_(group['momentum'])
def _apply_caution(_m, _g):
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (_m * _g > 0).to(_g.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
return _m * mask
if group['lr_in_momentum']:
# Tensorflow accumulates the LR scaling in the momentum buffer
buf.addcdiv_(grad, avg, value=group['lr'])
if group['caution']:
buf = _apply_caution(buf, grad)
p.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.addcdiv_(grad, avg)
if group['caution']:
buf = _apply_caution(buf, grad)
p.add_(buf, alpha=-group['lr'])
else:
p.addcdiv_(grad, avg, value=-group['lr'])
return loss
| pytorch-image-models/timm/optim/rmsprop_tf.py/0 | {
"file_path": "pytorch-image-models/timm/optim/rmsprop_tf.py",
"repo_id": "pytorch-image-models",
"token_count": 3431
} |
""" Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 Ross Wightman
"""
import glob
import logging
import operator
import os
import shutil
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model
):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.prev_recovery_file = ''
self.can_hardlink = True
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def _replace(self, src, dst):
if self.can_hardlink:
try:
if os.path.exists(dst):
os.unlink(dst) # required for Windows support.
except (OSError, NotImplementedError) as e:
self.can_hardlink = False
os.replace(src, dst)
def _duplicate(self, src, dst):
if self.can_hardlink:
try:
if os.path.exists(dst):
# for Windows
os.unlink(dst)
os.link(src, dst)
return
except (OSError, NotImplementedError) as e:
self.can_hardlink = False
shutil.copy2(src, dst)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
self._replace(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (
len(self.checkpoint_files) < self.max_history
or metric is None
or self.cmp(metric, worst_file[1])
):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
self._duplicate(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files,
key=lambda x: x[1],
reverse=not self.decreasing # sort in descending order if a lower metric is not better
)
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
self._duplicate(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
tmp_save_path = os.path.join(self.recovery_dir, 'recovery_tmp' + self.extension)
self._save(tmp_save_path, epoch)
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._replace(tmp_save_path, save_path)
if os.path.exists(self.prev_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.prev_recovery_file))
os.remove(self.prev_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.prev_recovery_file))
self.prev_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| pytorch-image-models/timm/utils/checkpoint_saver.py/0 | {
"file_path": "pytorch-image-models/timm/utils/checkpoint_saver.py",
"repo_id": "pytorch-image-models",
"token_count": 3258
} |
#!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import csv
import glob
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
from functools import partial
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.layers import apply_test_time_pool, set_fast_norm
from timm.models import create_model, load_checkpoint, is_model, list_models
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser, \
decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in dataset split, for IterableDatasets.')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--input-key', default=None, type=str,
help='Dataset key for input images.')
parser.add_argument('--input-img-mode', default=None, type=str,
help='Dataset image conversion mode for input images.')
parser.add_argument('--target-key', default=None, type=str,
help='Dataset key for target labels.')
parser.add_argument('--dataset-trust-remote-code', action='store_true', default=False,
help='Allow huggingface dataset import to execute code downloaded from the dataset\'s repo.')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--crop-border-pixels', type=int, default=None,
help='Crop pixels from image border.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--amp-impl', default='native', type=str,
help='AMP impl to use, "native" or "apex" (default: native)')
parser.add_argument('--model-dtype', default=None, type=str,
help='Model dtype override (non-AMP) (default: float32)')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
parser.add_argument('--torchcompile-mode', type=str, default=None,
help="torch.compile mode (default: None).")
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--results-format', default='csv', type=str,
help='Format for results file one of (csv, json) (default: csv).')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
parser.add_argument('--retry', default=False, action='store_true',
help='Enable batch size decay & retry for single model validation')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
model_dtype = None
if args.model_dtype:
assert args.model_dtype in ('float32', 'float16', 'bfloat16')
model_dtype = getattr(torch, args.model_dtype)
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
amp_autocast = suppress
if args.amp:
assert model_dtype is None or model_dtype == torch.float32, 'float32 model dtype must be used with AMP'
if args.amp_impl == 'apex':
assert has_apex, 'AMP impl specified as APEX but APEX is not installed.'
assert args.amp_dtype == 'float16'
use_amp = 'apex'
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
assert args.amp_dtype in ('float16', 'bfloat16')
use_amp = 'native'
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Validating in mixed precision with native PyTorch AMP.')
else:
_logger.info(f'Validating in {model_dtype or torch.float32}. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
if args.fast_norm:
set_fast_norm()
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=in_chans,
global_pool=args.gp,
scriptable=args.torchscript,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
if args.reparam:
model = reparameterize_model(model)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(
vars(args),
model=model,
use_test_size=not args.use_train_size,
verbose=True,
)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device=device, dtype=model_dtype) # FIXME move model device & dtype into create_model
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile, mode=args.torchcompile_mode)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if use_amp == 'apex':
model = amp.initialize(model, opt_level='O1')
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().to(device)
root_dir = args.data or args.data_dir
if args.input_img_mode is None:
input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L'
else:
input_img_mode = args.input_img_mode
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
download=args.dataset_download,
load_bytes=args.tf_preprocessing,
class_map=args.class_map,
num_samples=args.num_samples,
input_key=args.input_key,
input_img_mode=input_img_mode,
target_key=args.target_key,
trust_remote_code=args.dataset_trust_remote_code,
)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = [int(line.rstrip()) for line in f]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
crop_border_pixels=args.crop_border_pixels,
pin_memory=args.pin_mem,
device=device,
img_dtype=model_dtype or torch.float32,
tf_preprocessing=args.tf_preprocessing,
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device=device, dtype=model_dtype)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.to(device=device)
input = input.to(device=device, dtype=model_dtype)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx,
len(loader),
batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses,
top1=top1,
top5=top5
)
)
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
model=args.model,
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
crop_pct=crop_pct,
interpolation=data_config['interpolation'],
)
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
return results
def _try_run(args, initial_batch_size):
batch_size = initial_batch_size
results = OrderedDict()
error_str = 'Unknown'
while batch_size:
args.batch_size = batch_size * args.num_gpu # multiply by num-gpu for DataParallel case
try:
if 'cuda' in args.device and torch.cuda.is_available():
torch.cuda.empty_cache()
elif "npu" in args.device and torch.npu.is_available():
torch.npu.empty_cache()
results = validate(args)
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running validation.')
if not check_batch_size_retry(error_str):
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['model'] = args.model
results['error'] = error_str
_logger.error(f'{args.model} failed to validate ({error_str}).')
return results
_NON_IN1K_FILTERS = ['*_in21k', '*_in22k', '*in12k', '*_dino', '*fcmae', '*seer']
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(
pretrained=True,
exclude_filters=_NON_IN1K_FILTERS,
)
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(
args.model,
pretrained=True,
)
model_cfgs = [(n, '') for n in model_names]
if not model_cfgs and os.path.isfile(args.model):
with open(args.model) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names if n]
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
initial_batch_size = args.batch_size
for m, c in model_cfgs:
args.model = m
args.checkpoint = c
r = _try_run(args, initial_batch_size)
if 'error' in r:
continue
if args.checkpoint:
r['checkpoint'] = args.checkpoint
results.append(r)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
else:
if args.retry:
results = _try_run(args, args.batch_size)
else:
results = validate(args)
if args.results_file:
write_results(args.results_file, results, format=args.results_format)
# output results in JSON to stdout w/ delimiter for runner script
print(f'--result\n{json.dumps(results, indent=4)}')
def write_results(results_file, results, format='csv'):
with open(results_file, mode='w') as cf:
if format == 'json':
json.dump(results, cf, indent=4)
else:
if not isinstance(results, (list, tuple)):
results = [results]
if not results:
return
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| pytorch-image-models/validate.py/0 | {
"file_path": "pytorch-image-models/validate.py",
"repo_id": "pytorch-image-models",
"token_count": 9687
} |
- title: Get started
sections:
- local: index
title: 🤗 Agents
- local: guided_tour
title: गाइडेड टूर
- title: Tutorials
sections:
- local: tutorials/building_good_agents
title: ✨ अच्छे Agents का निर्माण
- local: tutorials/inspect_runs
title: 📊 OpenTelemetry के साथ runs का निरीक्षण
- local: tutorials/tools
title: 🛠️ Tools - in-depth guide
- local: tutorials/secure_code_execution
title: 🛡️ E2B के साथ अपने कोड एक्जीक्यूशन को सुरक्षित करें
- title: Conceptual guides
sections:
- local: conceptual_guides/intro_agents
title: 🤖 Agentic सिस्टम का परिचय
- local: conceptual_guides/react
title: 🤔 मल्टी-स्टेप एजेंट कैसे काम करते हैं?
- title: Examples
sections:
- local: examples/text_to_sql
title: सेल्फ करेक्टिंग Text-to-SQL
- local: examples/rag
title: एजेंटिक RAG के साथ अपनी ज्ञान आधारित को मास्टर करें
- local: examples/multiagents
title: एक बहु-एजेंट प्रणाली का आयोजन करें
- title: Reference
sections:
- local: reference/agents
title: एजेंट से संबंधित ऑब्जेक्ट्स
- local: reference/tools
title: टूल्स से संबंधित ऑब्जेक्ट्स
| smolagents/docs/source/hi/_toctree.yml/0 | {
"file_path": "smolagents/docs/source/hi/_toctree.yml",
"repo_id": "smolagents",
"token_count": 783
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Agent 简介
> [!TIP]
> 译者注:Agent 的业内术语是“智能体”。本译文将保留 agent,不作翻译,以带来更高效的阅读体验。(在中文为主的文章中,It's easier to 注意到英文。Attention Is All You Need!)
## 🤔 什么是 agent?
任何使用 AI 的高效系统都需要为 LLM 提供某种访问现实世界的方式:例如调用搜索工具获取外部信息,或者操作某些程序以完成任务。换句话说,LLM 应该具有 **_Agent 能力_**。Agent 程序是 LLM 通往外部世界的门户。
> [!TIP]
> AI agent 是 **LLM 输出控制工作流的程序**。
任何利用 LLM 的系统都会将 LLM 输出集成到代码中。LLM 输入对代码工作流的影响程度就是 LLM 在系统中的 agent 能力级别。
请注意,根据这个定义,"Agent" 不是一个离散的、非 0 即 1 的定义:相反,"Agent 能力" 是一个连续谱系,随着你在工作流中给予 LLM 更多或更少的权力而变化。
请参见下表中 agent 能力在不同系统中的变化:
| Agent 能力级别 | 描述 | 名称 | 示例模式 |
| ------------ | ---------------------------------------------- | ---------- | -------------------------------------------------- |
| ☆☆☆ | LLM 输出对程序流程没有影响 | 简单处理器 | `process_llm_output(llm_response)` |
| ★☆☆ | LLM 输出决定 if/else 分支 | 路由 | `if llm_decision(): path_a() else: path_b()` |
| ★★☆ | LLM 输出决定函数执行 | 工具调用者 | `run_function(llm_chosen_tool, llm_chosen_args)` |
| ★★★ | LLM 输出控制迭代和程序继续 | 多步 Agent | `while llm_should_continue(): execute_next_step()` |
| ★★★ | 一个 agent 工作流可以启动另一个 agent 工作流 | 多 Agent | `if llm_trigger(): execute_agent()` |
多步 agent 具有以下代码结构:
```python
memory = [user_defined_task]
while llm_should_continue(memory): # 这个循环是多步部分
action = llm_get_next_action(memory) # 这是工具调用部分
observations = execute_action(action)
memory += [action, observations]
```
这个 agent 系统在一个循环中运行,每一步执行一个新动作(该动作可能涉及调用一些预定义的 *工具*,这些工具只是函数),直到其观察结果表明已达到解决给定任务的满意状态。以下是一个多步 agent 如何解决简单数学问题的示例:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"/>
</div>
## ✅ 何时使用 agent / ⛔ 何时避免使用
当你需要 LLM 确定应用程序的工作流时,agent 很有用。但它们通常有些过度。问题是:我真的需要工作流的灵活性来有效解决手头的任务吗?
如果预定义的工作流经常不足,这意味着你需要更多的灵活性。
让我们举个例子:假设你正在开发一个处理冲浪旅行网站客户请求的应用程序。
你可以提前知道请求将属于 2 个类别之一(基于用户选择),并且你为这 2 种情况都有预定义的工作流。
1. 想要了解旅行信息?⇒ 给他们访问搜索栏以搜索你的知识库
2. 想与销售交谈?⇒ 让他们填写联系表单。
如果这个确定性工作流适合所有查询,那就直接编码吧!这将为你提供一个 100% 可靠的系统,没有让不可预测的 LLM 干扰你的工作流而引入错误的风险。为了简单和稳健起见,建议规范化不使用任何 agent 行为。
但如果工作流不能提前确定得那么好呢?
例如,用户想问:`"I can come on Monday, but I forgot my passport so risk being delayed to Wednesday, is it possible to take me and my stuff to surf on Tuesday morning, with a cancellation insurance?"` 这个问题涉及许多因素,可能上述预定的标准都不足以满足这个请求。
如果预定义的工作流经常不足,这意味着你需要更多的灵活性。
这就是 agent 设置发挥作用的地方。
在上面的例子中,你可以创建一个多步 agent,它可以访问天气 API 获取天气预报,Google Maps API 计算旅行距离,员工在线仪表板和你的知识库上的 RAG 系统。
直到最近,计算机程序还局限于预定义的工作流,试图通过堆积 if/else 分支来处理复杂性。它们专注于极其狭窄的任务,如"计算这些数字的总和"或"找到这个图中的最短路径"。但实际上,大多数现实生活中的任务,如我们上面的旅行示例,都不适合预定义的工作流。agent 系统为程序打开了现实世界任务的大门!
## 为什么选择 `smolagents`?
对于一些低级的 agent 用例,如链或路由器,你可以自己编写所有代码。这样会更好,因为它可以让你更好地控制和理解你的系统。
但一旦你开始追求更复杂的行为,比如让 LLM 调用函数(即"工具调用")或让 LLM 运行 while 循环("多步 agent"),一些抽象就变得必要:
- 对于工具调用,你需要解析 agent 的输出,因此这个输出需要一个预定义的格式,如"Thought: I should call tool 'get_weather'. Action: get_weather(Paris).",你用预定义的函数解析它,并且给 LLM 的系统提示应该通知它这个格式。
- 对于 LLM 输出决定循环的多步 agent,你需要根据上次循环迭代中发生的情况给 LLM 不同的提示:所以你需要某种记忆能力。
看到了吗?通过这两个例子,我们已经发现需要一些项目来帮助我们:
- 当然,一个作为系统引擎的 LLM
- agent 可以访问的工具列表
- 从 LLM 输出中提取工具调用的解析器
- 与解析器同步的系统提示
- 记忆能力
但是等等,既然我们给 LLM 在决策中留出了空间,它们肯定会犯错误:所以我们需要错误日志记录和重试机制。
所有这些元素都需要紧密耦合才能形成一个功能良好的系统。这就是为什么我们决定需要制作基本构建块来让所有这些东西协同工作。
## 代码 agent
在多步 agent 中,每一步 LLM 都可以编写一个动作,形式为调用外部工具。编写这些动作的常见格式(由 Anthropic、OpenAI 等使用)通常是"将动作编写为工具名称和要使用的参数的 JSON,然后解析以知道要执行哪个工具以及使用哪些参数"的不同变体。
[多项](https://huggingface.co/papers/2402.01030) [研究](https://huggingface.co/papers/2411.01747) [论文](https://huggingface.co/papers/2401.00812) 表明,在代码中进行工具调用的 LLM 要好得多。
原因很简单,_我们专门设计了我们的代码语言,使其成为表达计算机执行动作的最佳方式_。如果 JSON 片段是更好的表达方式,JSON 将成为顶级编程语言,编程将变得非常困难。
下图取自 [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030),说明了用代码编写动作的一些优势:
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/code_vs_json_actions.png">
与 JSON 片段相比,用代码编写动作提供了更好的:
- **可组合性:** 你能像定义 python 函数一样,将 JSON 动作嵌套在一起,或定义一组 JSON 动作以供重用吗?
- **对象管理:** 你如何在 JSON 中存储像 `generate_image` 这样的动作的输出?
- **通用性:** 代码被构建为简单地表达任何你可以让计算机做的事情。
- **LLM 训练数据中的表示:** 大量高质量的代码动作已经包含在 LLM 的训练数据中,这意味着它们已经为此进行了训练!
| smolagents/docs/source/zh/conceptual_guides/intro_agents.md/0 | {
"file_path": "smolagents/docs/source/zh/conceptual_guides/intro_agents.md",
"repo_id": "smolagents",
"token_count": 5282
} |
from dotenv import load_dotenv
from smolagents import CodeAgent, HfApiModel, Tool
from smolagents.default_tools import VisitWebpageTool
load_dotenv()
class GetCatImageTool(Tool):
name = "get_cat_image"
description = "Get a cat image"
inputs = {}
output_type = "image"
def __init__(self):
super().__init__()
self.url = "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png"
def forward(self):
from io import BytesIO
import requests
from PIL import Image
response = requests.get(self.url)
return Image.open(BytesIO(response.content))
get_cat_image = GetCatImageTool()
agent = CodeAgent(
tools=[get_cat_image, VisitWebpageTool()],
model=HfApiModel(),
additional_authorized_imports=[
"Pillow",
"requests",
"markdownify",
], # "duckduckgo-search",
use_e2b_executor=True,
)
agent.run(
"Calculate how much is 2+2, then return me an image of a cat. Directly use the image provided in your state.",
additional_args={"cat_image": get_cat_image()},
) # Asking to directly return the image from state tests that additional_args are properly sent to server.
# Try the agent in a Gradio UI
from smolagents import GradioUI
GradioUI(agent).launch()
| smolagents/examples/e2b_example.py/0 | {
"file_path": "smolagents/examples/e2b_example.py",
"repo_id": "smolagents",
"token_count": 496
} |
<jupyter_start><jupyter_code>!pip install "smolagents[litellm]" -q
import datasets
eval_ds = datasets.load_dataset("gaia-benchmark/GAIA", "2023_all")["validation"]
to_keep = [
"What's the last line of the rhyme under the flavor",
'Of the authors (First M. Last) that worked on the paper "Pie Menus or Linear Menus',
"In Series 9, Episode 11 of Doctor Who, the Doctor is trapped inside an ever-shifting maze. What is this location called in the official script for the episode? Give the setting exactly as it appears in the first scene heading.",
"Which contributor to the version of OpenCV where support was added for the Mask-RCNN model has the same name as a former Chinese head of government when the names are transliterated to the Latin alphabet?",
"The photograph in the Whitney Museum of American Art's collection with accession number 2022.128 shows a person holding a book. Which military unit did the author of this book join in 1813? Answer without using articles.",
"I went to Virtue restaurant & bar in Chicago for my birthday on March 22, 2021 and the main course I had was delicious! Unfortunately, when I went back about a month later on April 21, it was no longer on the dinner menu.",
"In Emily Midkiff's June 2014 article in a journal named for the one of Hreidmar's ",
"Under DDC 633 on Bielefeld University Library's BASE, as of 2020",
"In the 2018 VSCode blog post on replit.com, what was the command they clicked on in the last video to remove extra lines?",
"The Metropolitan Museum of Art has a portrait in its collection with an accession number of 29.100.5. Of the consecrators and co-consecrators",
"In Nature journal's Scientific Reports conference proceedings from 2012, in the article that did not mention plasmons or plasmonics, what nano-compound is studied?",
'In the year 2022, and before December, what does "R" stand for in the three core policies of the type of content',
"Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?",
]
eval_ds = eval_ds.filter(lambda row: any([el in row["Question"] for el in to_keep]))
eval_ds = eval_ds.rename_columns({"Question": "question", "Final answer": "true_answer", "Level": "task"})
import os
from dotenv import load_dotenv
from huggingface_hub import login
load_dotenv(override=True)
login(os.getenv("HF_TOKEN"))<jupyter_output><empty_output><jupyter_text>Text browser<jupyter_code>from scripts.run_agents import answer_questions
from scripts.text_inspector_tool import TextInspectorTool
from scripts.text_web_browser import (
ArchiveSearchTool,
FinderTool,
FindNextTool,
NavigationalSearchTool,
PageDownTool,
PageUpTool,
SearchInformationTool,
VisitTool,
)
from scripts.visual_qa import VisualQAGPT4Tool
from smolagents import CodeAgent, LiteLLMModel
proprietary_model = LiteLLMModel("gpt-4o")
### BUILD AGENTS & TOOLS
WEB_TOOLS = [
SearchInformationTool(),
NavigationalSearchTool(),
VisitTool(),
PageUpTool(),
PageDownTool(),
FinderTool(),
FindNextTool(),
ArchiveSearchTool(),
]
surfer_agent = CodeAgent(
model=proprietary_model,
tools=WEB_TOOLS,
max_steps=20,
verbosity_level=2,
)
results_text = answer_questions(
eval_ds,
surfer_agent,
"code_gpt4o_27-01_text",
reformulation_model=proprietary_model,
output_folder="output_browsers",
visual_inspection_tool=VisualQAGPT4Tool(),
text_inspector_tool=TextInspectorTool(proprietary_model, 40000),
)<jupyter_output><empty_output><jupyter_text>Vision browser<jupyter_code>!pip install helium -q
from scripts.visual_qa import VisualQAGPT4Tool
from smolagents import CodeAgent, DuckDuckGoSearchTool, LiteLLMModel
from smolagents.vision_web_browser import (
close_popups,
go_back,
helium_instructions,
initialize_agent,
save_screenshot,
search_item_ctrl_f,
)
proprietary_model = LiteLLMModel("gpt-4o")
vision_browser_agent = initialize_agent(proprietary_model)
### BUILD AGENTS & TOOLS
CodeAgent(
tools=[DuckDuckGoSearchTool(), go_back, close_popups, search_item_ctrl_f],
model=proprietary_model,
additional_authorized_imports=["helium"],
step_callbacks=[save_screenshot],
max_steps=20,
verbosity_level=2,
)
results_vision = answer_questions(
eval_ds,
vision_browser_agent,
"code_gpt4o_27-01_vision",
reformulation_model=proprietary_model,
output_folder="output_browsers",
visual_inspection_tool=VisualQAGPT4Tool(),
text_inspector_tool=TextInspectorTool(proprietary_model, 40000),
postprompt=helium_instructions
+ "Any web browser controls won't work on .pdf urls, rather use the tool 'inspect_file_as_text' to read them",
)<jupyter_output><empty_output><jupyter_text>Browser-use browser<jupyter_code>!pip install browser-use lxml_html_clean -q
!playwright install
import asyncio
import nest_asyncio
nest_asyncio.apply()
from browser_use import Agent
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
load_dotenv()
class BrowserUseAgent:
logs = []
def write_inner_memory_from_logs(self, summary_mode):
return self.results
def run(self, task, **kwargs):
agent = Agent(
task=task,
llm=ChatOpenAI(model="gpt-4o"),
)
self.results = asyncio.get_event_loop().run_until_complete(agent.run())
return self.results.history[-1].result[0].extracted_content
browser_use_agent = BrowserUseAgent()
results_browseruse = answer_questions(
eval_ds,
browser_use_agent,
"gpt-4o_27-01_browseruse",
reformulation_model=proprietary_model,
output_folder="output_browsers",
visual_inspection_tool=VisualQAGPT4Tool(),
text_inspector_tool=TextInspectorTool(proprietary_model, 40000),
postprompt="",
run_simple=True,
)<jupyter_output><empty_output><jupyter_text>Get results<jupyter_code>import pandas as pd
from scripts.gaia_scorer import question_scorer
results_vision, results_text, results_browseruse = (
pd.DataFrame(results_vision),
pd.DataFrame(results_text),
pd.DataFrame(results_browseruse),
)
results_vision["is_correct"] = results_vision.apply(
lambda x: question_scorer(x["prediction"], x["true_answer"]), axis=1
)
results_text["is_correct"] = results_text.apply(lambda x: question_scorer(x["prediction"], x["true_answer"]), axis=1)
results_browseruse["is_correct"] = results_browseruse.apply(
lambda x: question_scorer(x["prediction"], x["true_answer"]), axis=1
)
results = pd.concat([results_vision, results_text, results_browseruse])
results.groupby("agent_name")["is_correct"].mean()
correct_vision_results = results_vision.loc[results_vision["is_correct"]]
correct_vision_results
false_text_results = results_text.loc[~results_text["is_correct"]]
false_text_results<jupyter_output><empty_output> | smolagents/examples/open_deep_research/visual_vs_text_browser.ipynb/0 | {
"file_path": "smolagents/examples/open_deep_research/visual_vs_text_browser.ipynb",
"repo_id": "smolagents",
"token_count": 2367
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import IntEnum
from typing import List, Optional
from rich import box
from rich.console import Console, Group
from rich.panel import Panel
from rich.rule import Rule
from rich.syntax import Syntax
from rich.table import Table
from rich.text import Text
from rich.tree import Tree
__all__ = ["AgentLogger", "LogLevel", "Monitor"]
class Monitor:
def __init__(self, tracked_model, logger):
self.step_durations = []
self.tracked_model = tracked_model
self.logger = logger
if getattr(self.tracked_model, "last_input_token_count", "Not found") != "Not found":
self.total_input_token_count = 0
self.total_output_token_count = 0
def get_total_token_counts(self):
return {
"input": self.total_input_token_count,
"output": self.total_output_token_count,
}
def reset(self):
self.step_durations = []
self.total_input_token_count = 0
self.total_output_token_count = 0
def update_metrics(self, step_log):
"""Update the metrics of the monitor.
Args:
step_log ([`MemoryStep`]): Step log to update the monitor with.
"""
step_duration = step_log.duration
self.step_durations.append(step_duration)
console_outputs = f"[Step {len(self.step_durations) - 1}: Duration {step_duration:.2f} seconds"
if getattr(self.tracked_model, "last_input_token_count", None) is not None:
self.total_input_token_count += self.tracked_model.last_input_token_count
self.total_output_token_count += self.tracked_model.last_output_token_count
console_outputs += (
f"| Input tokens: {self.total_input_token_count:,} | Output tokens: {self.total_output_token_count:,}"
)
console_outputs += "]"
self.logger.log(Text(console_outputs, style="dim"), level=1)
class LogLevel(IntEnum):
OFF = -1 # No output
ERROR = 0 # Only errors
INFO = 1 # Normal output (default)
DEBUG = 2 # Detailed output
YELLOW_HEX = "#d4b702"
class AgentLogger:
def __init__(self, level: LogLevel = LogLevel.INFO):
self.level = level
self.console = Console()
def log(self, *args, level: str | LogLevel = LogLevel.INFO, **kwargs) -> None:
"""Logs a message to the console.
Args:
level (LogLevel, optional): Defaults to LogLevel.INFO.
"""
if isinstance(level, str):
level = LogLevel[level.upper()]
if level <= self.level:
self.console.print(*args, **kwargs)
def log_markdown(self, content: str, title: Optional[str] = None, level=LogLevel.INFO, style=YELLOW_HEX) -> None:
markdown_content = Syntax(
content,
lexer="markdown",
theme="github-dark",
word_wrap=True,
)
if title:
self.log(
Group(
Rule(
"[bold italic]" + title,
align="left",
style=style,
),
markdown_content,
),
level=level,
)
else:
self.log(markdown_content, level=level)
def log_code(self, title: str, content: str, level: int = LogLevel.INFO) -> None:
self.log(
Panel(
Syntax(
content,
lexer="python",
theme="monokai",
word_wrap=True,
),
title="[bold]" + title,
title_align="left",
box=box.HORIZONTALS,
),
level=level,
)
def log_rule(self, title: str, level: int = LogLevel.INFO) -> None:
self.log(
Rule(
"[bold]" + title,
characters="━",
style=YELLOW_HEX,
),
level=LogLevel.INFO,
)
def log_task(self, content: str, subtitle: str, title: Optional[str] = None, level: int = LogLevel.INFO) -> None:
self.log(
Panel(
f"\n[bold]{content}\n",
title="[bold]New run" + (f" - {title}" if title else ""),
subtitle=subtitle,
border_style=YELLOW_HEX,
subtitle_align="left",
),
level=level,
)
def log_messages(self, messages: List) -> None:
messages_as_string = "\n".join([json.dumps(dict(message), indent=4) for message in messages])
self.log(
Syntax(
messages_as_string,
lexer="markdown",
theme="github-dark",
word_wrap=True,
)
)
def visualize_agent_tree(self, agent):
def create_tools_section(tools_dict):
table = Table(show_header=True, header_style="bold")
table.add_column("Name", style="#1E90FF")
table.add_column("Description")
table.add_column("Arguments")
for name, tool in tools_dict.items():
args = [
f"{arg_name} (`{info.get('type', 'Any')}`{', optional' if info.get('optional') else ''}): {info.get('description', '')}"
for arg_name, info in getattr(tool, "inputs", {}).items()
]
table.add_row(name, getattr(tool, "description", str(tool)), "\n".join(args))
return Group("🛠️ [italic #1E90FF]Tools:[/italic #1E90FF]", table)
def get_agent_headline(agent, name: Optional[str] = None):
name_headline = f"{name} | " if name else ""
return f"[bold {YELLOW_HEX}]{name_headline}{agent.__class__.__name__} | {agent.model.model_id}"
def build_agent_tree(parent_tree, agent_obj):
"""Recursively builds the agent tree."""
parent_tree.add(create_tools_section(agent_obj.tools))
if agent_obj.managed_agents:
agents_branch = parent_tree.add("🤖 [italic #1E90FF]Managed agents:")
for name, managed_agent in agent_obj.managed_agents.items():
agent_tree = agents_branch.add(get_agent_headline(managed_agent, name))
if managed_agent.__class__.__name__ == "CodeAgent":
agent_tree.add(
f"✅ [italic #1E90FF]Authorized imports:[/italic #1E90FF] {managed_agent.additional_authorized_imports}"
)
agent_tree.add(f"📝 [italic #1E90FF]Description:[/italic #1E90FF] {managed_agent.description}")
build_agent_tree(agent_tree, managed_agent)
main_tree = Tree(get_agent_headline(agent))
if agent.__class__.__name__ == "CodeAgent":
main_tree.add(
f"✅ [italic #1E90FF]Authorized imports:[/italic #1E90FF] {agent.additional_authorized_imports}"
)
build_agent_tree(main_tree, agent)
self.console.print(main_tree)
| smolagents/src/smolagents/monitoring.py/0 | {
"file_path": "smolagents/src/smolagents/monitoring.py",
"repo_id": "smolagents",
"token_count": 3705
} |
import subprocess
def test_import_smolagents_without_extras():
# Run the import statement in an isolated virtual environment
result = subprocess.run(
["uv", "run", "--isolated", "--no-editable", "-"], input="import smolagents", text=True, capture_output=True
)
# Check if the import was successful
assert result.returncode == 0, (
"Import failed with error: "
+ (result.stderr.splitlines()[-1] if result.stderr else "No error message")
+ "\n"
+ result.stderr
)
| smolagents/tests/test_import.py/0 | {
"file_path": "smolagents/tests/test_import.py",
"repo_id": "smolagents",
"token_count": 200
} |
# Rust builder
FROM lukemathwalker/cargo-chef:latest-rust-1.84.0 AS chef
WORKDIR /usr/src
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
FROM chef AS planner
COPY Cargo.lock Cargo.lock
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY backends backends
COPY launcher launcher
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
python3.11-dev
RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \
unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \
unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \
rm -f $PROTOC_ZIP
COPY --from=planner /usr/src/recipe.json recipe.json
RUN cargo chef cook --profile release-opt --recipe-path recipe.json
ARG GIT_SHA
ARG DOCKER_LABEL
COPY Cargo.lock Cargo.lock
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY backends backends
COPY launcher launcher
RUN cargo build --profile release-opt --frozen
# Python builder
# Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile
FROM nvidia/cuda:12.4.1-devel-ubuntu22.04 AS pytorch-install
# NOTE: When updating PyTorch version, beware to remove `pip install nvidia-nccl-cu12==2.22.3` below in the Dockerfile. Context: https://github.com/huggingface/text-generation-inference/pull/2099
ARG PYTORCH_VERSION=2.5.1
ARG PYTHON_VERSION=3.11
# Keep in sync with `server/pyproject.toml
ARG CUDA_VERSION=12.4
ARG MAMBA_VERSION=24.3.0-0
ARG CUDA_CHANNEL=nvidia
ARG INSTALL_CHANNEL=pytorch
# Automatically set by buildx
ARG TARGETPLATFORM
ENV PATH=/opt/conda/bin:$PATH
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
curl \
git && \
rm -rf /var/lib/apt/lists/*
# Install conda
# translating Docker's TARGETPLATFORM into mamba arches
RUN case ${TARGETPLATFORM} in \
"linux/arm64") MAMBA_ARCH=aarch64 ;; \
*) MAMBA_ARCH=x86_64 ;; \
esac && \
curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh"
RUN chmod +x ~/mambaforge.sh && \
bash ~/mambaforge.sh -b -p /opt/conda && \
rm ~/mambaforge.sh
# Install pytorch
# On arm64 we exit with an error code
RUN case ${TARGETPLATFORM} in \
"linux/arm64") exit 1 ;; \
*) /opt/conda/bin/conda update -y conda && \
/opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" "pytorch=$PYTORCH_VERSION" "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \
esac && \
/opt/conda/bin/conda clean -ya
# CUDA kernels builder image
FROM pytorch-install AS kernel-builder
ARG MAX_JOBS=8
ENV TORCH_CUDA_ARCH_LIST="8.0;8.6;9.0+PTX"
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ninja-build cmake \
&& rm -rf /var/lib/apt/lists/*
# Build Flash Attention CUDA kernels
FROM kernel-builder AS flash-att-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att Makefile
# Build specific version of flash attention
RUN make build-flash-attention
# Build Flash Attention v2 CUDA kernels
FROM kernel-builder AS flash-att-v2-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att-v2 Makefile
# Build specific version of flash attention v2
RUN make build-flash-attention-v2-cuda
# Build Transformers exllama kernels
FROM kernel-builder AS exllama-kernels-builder
WORKDIR /usr/src
COPY server/exllama_kernels/ .
RUN python setup.py build
# Build Transformers exllama kernels
FROM kernel-builder AS exllamav2-kernels-builder
WORKDIR /usr/src
COPY server/Makefile-exllamav2/ Makefile
# Build specific version of transformers
RUN make build-exllamav2
# Build Transformers awq kernels
FROM kernel-builder AS awq-kernels-builder
WORKDIR /usr/src
COPY server/Makefile-awq Makefile
# Build specific version of transformers
RUN make build-awq
# Build eetq kernels
FROM kernel-builder AS eetq-kernels-builder
WORKDIR /usr/src
COPY server/Makefile-eetq Makefile
# Build specific version of transformers
RUN make build-eetq
# Build Lorax Punica kernels
FROM kernel-builder AS lorax-punica-builder
WORKDIR /usr/src
COPY server/Makefile-lorax-punica Makefile
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" make build-lorax-punica
# Build Transformers CUDA kernels
FROM kernel-builder AS custom-kernels-builder
WORKDIR /usr/src
COPY server/custom_kernels/ .
# Build specific version of transformers
RUN python setup.py build
# Build mamba kernels
FROM kernel-builder AS mamba-builder
WORKDIR /usr/src
COPY server/Makefile-selective-scan Makefile
RUN make build-all
# Build flashinfer
FROM kernel-builder AS flashinfer-builder
WORKDIR /usr/src
COPY server/Makefile-flashinfer Makefile
RUN make install-flashinfer
# Text Generation Inference base image
FROM nvidia/cuda:12.1.0-base-ubuntu22.04 AS base
# Conda env
ENV PATH=/opt/conda/bin:$PATH \
CONDA_PREFIX=/opt/conda
# Text Generation Inference base env
ENV HF_HOME=/data \
HF_HUB_ENABLE_HF_TRANSFER=1 \
PORT=80
WORKDIR /usr/src
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libssl-dev \
ca-certificates \
make \
curl \
git \
&& rm -rf /var/lib/apt/lists/*
# Install flash-attention dependencies
# RUN pip install einops --no-cache-dir
# Copy conda with PyTorch installed
COPY --from=pytorch-install /opt/conda /opt/conda
# Install server
COPY proto proto
COPY server server
COPY server/Makefile server/Makefile
ENV UV_SYSTEM_PYTHON=1
RUN cd server && \
pip install -U pip uv && \
uv sync --frozen --extra gen --extra attention --extra bnb --extra accelerate --extra compressed-tensors --extra marlin --extra moe --extra quantize --extra peft --extra outlines --no-install-project && \
. ./.venv/bin/activate && \
make gen-server-raw
RUN cd server && \
uv sync --frozen --extra gen --extra attention --extra bnb --extra accelerate --extra compressed-tensors --extra marlin --extra moe --extra quantize --extra peft --extra outlines && \
. ./.venv/bin/activate && \
pwd && \
text-generation-server --help
# Copy build artifacts from flash attention builder
COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from flash attention v2 builder
COPY --from=flash-att-v2-builder /opt/conda/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from custom kernels builder
COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from exllama kernels builder
COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from exllamav2 kernels builder
COPY --from=exllamav2-kernels-builder /usr/src/exllamav2/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from awq kernels builder
COPY --from=awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from eetq kernels builder
COPY --from=eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from lorax punica kernels builder
COPY --from=lorax-punica-builder /usr/src/lorax-punica/server/punica_kernels/build/lib.linux-x86_64-cpython-311 /usr/src/server/.venv/lib/python3.11/site-packages
# Copy build artifacts from mamba builder
COPY --from=mamba-builder /usr/src/mamba/build/lib.linux-x86_64-cpython-311/ /usr/src/server/.venv/lib/python3.11/site-packages
COPY --from=mamba-builder /usr/src/causal-conv1d/build/lib.linux-x86_64-cpython-311/ /usr/src/server/.venv/lib/python3.11/site-packages
COPY --from=flashinfer-builder /opt/conda/lib/python3.11/site-packages/flashinfer/ /usr/src/server/.venv/lib/python3.11/site-packages/flashinfer/
# ENV LD_PRELOAD=/opt/conda/lib/python3.11/site-packages/nvidia/nccl/lib/libnccl.so.2
# Required to find libpython within the rust binaries
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/conda/lib/"
# This is needed because exl2 tries to load flash-attn
# And fails with our builds.
ENV EXLLAMA_NO_FLASH_ATTN=1
# Deps before the binaries
# The binaries change on every build given we burn the SHA into them
# The deps change less often.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
g++ \
&& rm -rf /var/lib/apt/lists/*
# Install benchmarker
COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
# Install router
COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router
# Install launcher
COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher
# AWS Sagemaker compatible image
FROM base AS sagemaker
COPY sagemaker-entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
# Final image
FROM base
COPY ./tgi-entrypoint.sh /tgi-entrypoint.sh
RUN chmod +x /tgi-entrypoint.sh
ENTRYPOINT ["/tgi-entrypoint.sh"]
# CMD ["--json-output"]
| text-generation-inference/Dockerfile/0 | {
"file_path": "text-generation-inference/Dockerfile",
"repo_id": "text-generation-inference",
"token_count": 3963
} |
#[allow(clippy::derive_partial_eq_without_eq)]
mod pb;
mod client;
mod sharded_client;
pub use client::Client;
pub use pb::generate::v2::HealthResponse;
pub use pb::generate::v2::{
Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, InfoResponse,
NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens,
};
pub use sharded_client::ShardedClient;
| text-generation-inference/backends/client/src/v2/mod.rs/0 | {
"file_path": "text-generation-inference/backends/client/src/v2/mod.rs",
"repo_id": "text-generation-inference",
"token_count": 134
} |
#ifndef TGI_BACKEND_TRTLLM
#define TGI_BACKEND_TRTLLM
#include <cmath>
#include <cstdint>
#include <expected>
#include <fstream>
#include <list>
#include <span>
#include <nlohmann/json.hpp>
#include <spdlog/spdlog.h>
#include <spdlog/fmt/fmt.h>
#include <tensorrt_llm/executor/executor.h>
namespace huggingface::tgi::backends::trtllm {
namespace tle = tensorrt_llm::executor;
using json = nlohmann::json;
using request_id_t = uint64_t;
using token_id_t = tle::TokenIdType;
/**
* Represent the parameters used for generation
*/
struct generation_params_t {
uint32_t max_new_tokens;
};
/**
* Represent the parameters used to sample tokens from the logit distribution
*/
struct sampling_params_t {
uint32_t top_k;
float_t top_p;
float_t repetition_penalty;
float_t frequency_penalty;
float_t temperature;
uint64_t seed;
constexpr explicit operator tle::SamplingConfig() const {
return tle::SamplingConfig{
1,
top_k,
top_p,
std::nullopt,
std::nullopt,
std::nullopt,
seed,
temperature,
std::nullopt,
std::nullopt,
repetition_penalty,
std::nullopt,
frequency_penalty,
std::nullopt
};
}
};
/**
* Represent possible values from transformers generation `generation_config.json`.
* It usually stores default sampling parameters to use, such as top_p, temperature, etc.
*/
struct generation_config_t {
float_t top_p;
float_t temperature;
std::list<std::vector<int32_t>> stop_words;
constexpr explicit generation_config_t(const json &config) :
top_p(config.value("top_p", 1.0f)), temperature(config.value("temperature", 1.0f)), stop_words(0) {
if (config.contains("/eos_token_id"_json_pointer) && config["/eos_token_id"_json_pointer].is_array()) {
const auto &eos_token_id = config["/eos_token_id"_json_pointer];
std::for_each(eos_token_id.begin(), eos_token_id.end(), [this](const auto token_id) {
stop_words.emplace_back(1, token_id.template get<int32_t>());
});
SPDLOG_DEBUG("Detected {:d} predefined stop_words from generation_config.json", stop_words.size());
}
}
};
/**
* Helper class representing various items which are stored within the TensorRT-LLM engines folder and
* can be retrieved at runtime
*/
class backend_workspace_t {
private:
constexpr static auto as_json = [](const std::filesystem::path &path) -> json {
std::ifstream config_f(path);
return json::parse(config_f);
};
std::filesystem::path engines_folder_;
std::filesystem::path executor_worker_path_;
json config_;
generation_config_t generation_config_;
public:
backend_workspace_t(std::filesystem::path &engines_folder, std::filesystem::path &executor_worker_path) :
engines_folder_(engines_folder),
executor_worker_path_(executor_worker_path),
config_(as_json(engines_folder / "config.json")),
generation_config_(as_json(engines_folder / "generation_config.json")) {};
backend_workspace_t(std::filesystem::path &&engines_folder, std::filesystem::path &&executor_worker_path) :
engines_folder_(engines_folder),
executor_worker_path_(executor_worker_path),
config_(as_json(engines_folder / "config.json")),
generation_config_(as_json(engines_folder / "generation_config.json")) {};
/**
* Path to the folder containing the TensorRT-LLM engines
* @return local filesystem path to the folder
*/
[[nodiscard]] constexpr std::filesystem::path engines_folder() const { return engines_folder_; }
/**
* Hugging Face transformers' generated `generation_config_t` mapping information stored in the
* `generation_config.json` holding default generation parameters.
* @return `generation_config_t`
*/
[[nodiscard]] constexpr const generation_config_t &generation_config() const { return generation_config_; }
/**
* Factory method returning new `tensorrt_llm::executor::ParallelConfig` instance used
* to initialize `tensorrt_llm::executor::Executor` with multi-instance communication information
* @return `tensorrt_llm::executor::ParallelConfig` instance
*/
[[nodiscard]] tle::ParallelConfig parallel_config() const;
/**
* Factory method returning new `tensorrt_llm::executor::ExecutorConfig` instance used
* to initialize `tensorrt_llm::executor::Executor`
* @return `tensorrt_llm::executor::ExecutorConfig` instance
*/
[[nodiscard]] tle::ExecutorConfig executor_config() const;
};
/**
* Error raised by the underlying backend implementation
*/
enum backend_error_t {
EXECUTOR_NOT_READY = 3,
EXECUTOR_SCHEDULING_FAILED = 4,
};
/**
* Actual TensorRT-LLM backend implementation interacting with TensorRT-LLM Executor service to
* - schedule new request
* - pull status of submitted request(s)
* - cancel submitted request(s)
*/
class backend_t {
private:
backend_workspace_t workspace;
tle::Executor executor_;
public:
backend_t(std::filesystem::path &engines_folder, std::filesystem::path &executor_worker_path);
backend_t(std::filesystem::path &&engines_folder, std::filesystem::path &&executor_worker_path)
: backend_t(engines_folder, executor_worker_path) {};
/**
* Submit a new request to the executor
* @param token_ids
* @param generation_params
* @param sampling_params
* @return Either newly submitted request's id or the error why it failed to submit
*/
[[nodiscard("Discarded executor request_id needs to be assigned")]]
std::expected<request_id_t, backend_error_t>
submit(std::span<const token_id_t> token_ids, generation_params_t generation_params,
sampling_params_t sampling_params) noexcept;
/**
* Query the number of tokens available across all in-flight generations
* @return
*/
[[nodiscard("Pulling out the number of tokens")]]
size_t num_tokens_ready() const noexcept;
/**
* Pull out newly generated tokens from the executor
* @return
*/
[[nodiscard("")]]
std::vector<tle::Response> pull_tokens() noexcept;
/**
* Cancel the specified request on the executor' set
* @param request_id Request's Identifier to remove from the in-flight executor
*/
void cancel(request_id_t) noexcept;
};
/**
* Create a TensorRT-LLM executor from a workspace
*/
const auto executor_factory_initializer = [](const backend_workspace_t &workspace) -> tle::Executor {
return {workspace.engines_folder(), tensorrt_llm::executor::ModelType::kDECODER_ONLY,
workspace.executor_config()};
};
}
/**
* Helper structures to define formatting strategies for various types in the backend
*/
template<>
struct fmt::formatter<huggingface::tgi::backends::trtllm::generation_params_t> : formatter<string_view> {
auto format(huggingface::tgi::backends::trtllm::generation_params_t const &c,
format_context &ctx) const -> format_context::iterator {
return fmt::format_to(ctx.out(), "generation_params_t{{ max_new_tokens={:d} }}", c.max_new_tokens);
}
};
template<>
struct fmt::formatter<huggingface::tgi::backends::trtllm::sampling_params_t> : formatter<string_view> {
auto format(huggingface::tgi::backends::trtllm::sampling_params_t const &c,
format_context &ctx) const -> format_context::iterator {
return fmt::format_to(
ctx.out(),
"sampling_params_t{{ top_k={:d}, top_p={:.3f}, repetition_penalty={:.3f}, frequency_penalty={:.3f}, temperature={:.3f}, seed={:d} }}",
c.top_k, c.top_p, c.repetition_penalty, c.frequency_penalty, c.temperature, c.seed
);
}
};
#endif
| text-generation-inference/backends/trtllm/csrc/backend.hpp/0 | {
"file_path": "text-generation-inference/backends/trtllm/csrc/backend.hpp",
"repo_id": "text-generation-inference",
"token_count": 3772
} |
//! Text Generation gRPC client library
use async_trait::async_trait;
use thiserror::Error;
use tonic::transport;
use tonic::Status;
#[allow(clippy::derive_partial_eq_without_eq)]
mod pb;
mod grpc_client;
mod sharded_client;
pub use grpc_client::Client;
pub use pb::generate::v2::{
Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, HealthResponse,
InfoResponse, NextTokenChooserParameters, Request, StoppingCriteriaParameters,
};
pub use sharded_client::ShardedClient;
#[async_trait]
pub trait Health {
/// Check if a generate server is healthy by asking it to allocate a tensor on device
async fn device_health(&self) -> Result<()>;
/// Check if a generate server is healthy by doing a forward pass.
/// EXPENSIVE
async fn model_health(&self) -> Result<()>;
}
#[derive(Debug)]
pub struct ShardInfo {
pub requires_padding: bool,
pub dtype: String,
pub device_type: String,
pub window_size: Option<u32>,
pub speculate: u32,
}
#[derive(Error, Debug, Clone)]
pub enum ClientError {
#[error("Could not connect to Text Generation server: {0}")]
Connection(String),
#[error("Server error: {0}")]
Generation(String),
#[error("Sharded results are empty")]
EmptyResults,
}
impl From<Status> for ClientError {
fn from(err: Status) -> Self {
let err = Self::Generation(err.message().to_string());
tracing::error!("{err}");
err
}
}
impl From<transport::Error> for ClientError {
fn from(err: transport::Error) -> Self {
let err = Self::Connection(err.to_string());
tracing::error!("{err}");
err
}
}
static WARMUP_IMAGE_BASE64 :&str = "iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV/TSotUROxQxCFDdbKLijjWKhShQqgVWnUwufQLmrQkKS6OgmvBwY/FqoOLs64OroIg+AHi7OCk6CIl/i8ptIjx4Lgf7+497t4BQqvKNDOQADTdMjKppJjLr4rBVwQQwhAERGVm1uckKQ3P8XUPH1/v4jzL+9yfY0AtmAzwicQJVjcs4g3imU2rznmfOMLKskp8Tjxh0AWJH7muuPzGueSwwDMjRjYzTxwhFks9rPQwKxsa8TRxTNV0yhdyLquctzhr1Qbr3JO/MFzQV5a5TnMUKSxiCRJEKGiggiosxGnVSTGRof2kh3/E8UvkUshVASPHAmrQIDt+8D/43a1ZnJp0k8JJoO/Ftj/GgOAu0G7a9vexbbdPAP8zcKV3/bUWMPtJerOrxY6AwW3g4rqrKXvA5Q4QfarLhuxIfppCsQi8n9E35YHhW6B/ze2ts4/TByBLXaVvgINDYLxE2ese7w719vbvmU5/PycecohsjayNAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH6AQIEQMnlTSSjwAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAASSURBVDjLY2AYBaNgFIyCoQsABMQAAeRw1DoAAAAASUVORK5CYII=";
pub type Result<T> = std::result::Result<T, ClientError>;
| text-generation-inference/backends/v2/src/client/mod.rs/0 | {
"file_path": "text-generation-inference/backends/v2/src/client/mod.rs",
"repo_id": "text-generation-inference",
"token_count": 1194
} |
use crate::block_allocator::{Allocator, BlockAllocation};
use slotmap::{DefaultKey, SlotMap};
use std::hash::{Hash, Hasher};
use std::{
collections::{BTreeSet, HashMap},
sync::Arc,
};
fn hash(slice: &[u32]) -> u64 {
assert!(!slice.is_empty());
if slice.len() == 1 {
slice[0] as u64
} else {
let mut s = std::hash::DefaultHasher::new();
slice.hash(&mut s);
s.finish()
}
}
pub struct RadixAllocator {
allocation_id: u64,
allocations: HashMap<u64, RadixAllocation>,
cache_blocks: RadixTrie,
/// Blocks that are immediately available for allocation.
free_blocks: Vec<u32>,
#[allow(dead_code)]
// This isn't used because the prefix need to match without the windowing
// mecanism. This at worst is overallocating, not necessarily being wrong.
window_size: Option<u32>,
block_size: u32,
}
impl RadixAllocator {
pub fn new(block_size: u32, n_blocks: u32, window_size: Option<u32>) -> Self {
RadixAllocator {
allocation_id: 0,
allocations: HashMap::new(),
cache_blocks: RadixTrie::new(block_size as usize),
// Block 0 is reserved for health checks.
free_blocks: (1..n_blocks).collect(),
window_size,
block_size,
}
}
fn alloc_or_reclaim(&mut self, n_blocks_needed: usize) -> Option<Vec<u32>> {
if self.free_blocks.len() < n_blocks_needed {
// This is a bit annoying, we first extend the free list and then
// split it off again below. This is because we need to put it on
// the free list if we cannot allocate enough blocks. This is only
// temporary, the trie needs to be able to report whether it can
// allocate the requested amount. Just not implemented yet.
tracing::debug!(
"Free blocks {} need {n_blocks_needed}",
self.free_blocks.len()
);
self.free_blocks.extend(
self.cache_blocks
.evict(n_blocks_needed - self.free_blocks.len()),
);
}
if self.free_blocks.len() >= n_blocks_needed {
Some(
self.free_blocks
.split_off(self.free_blocks.len() - n_blocks_needed),
)
} else {
None
}
}
}
// Allocator trait
impl Allocator for RadixAllocator {
fn allocate(
&mut self,
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation> {
let mut blocks = vec![];
let prefix_node = if let Some(prefill_tokens) = prefill_tokens.as_ref() {
let node_id = self
.cache_blocks
.find(prefill_tokens.as_slice(), &mut blocks);
node_id
} else {
self.cache_blocks.root_id()
};
// Even if this allocation fails below, we need to increase he
// refcount to ensure that the prefix that was found is not evicted.
self.cache_blocks
.incref(prefix_node)
.expect("Failed to increment refcount");
let prefix_len = blocks.len() * self.block_size as usize;
let suffix_len = tokens - prefix_len as u32;
let suffix_blocks = suffix_len.div_ceil(self.block_size);
tracing::info!("Prefix {prefix_len} - Suffix {suffix_len}");
match self.alloc_or_reclaim(suffix_blocks as usize) {
Some(suffix_blocks) => blocks.extend(suffix_blocks),
None => {
tracing::debug!("Cannot allocate {:?}", self.cache_blocks);
tracing::debug!("Found {prefix_len} prefix tokens need {suffix_blocks} suffix blocks for {tokens} tokens");
tracing::debug!("Block size {}", self.block_size);
self.cache_blocks
.decref(prefix_node)
.expect("Failed to decrement refcount");
return None;
}
}
// 1:1 mapping of blocks and slots.
let slots = if self.block_size == 1 {
blocks.clone()
} else {
let mut slots = Vec::with_capacity(blocks.len() * self.block_size as usize);
'slots: for block_id in &blocks {
for s in (block_id * self.block_size)..((block_id + 1) * self.block_size) {
slots.push(s);
if slots.len() as u32 == tokens {
break 'slots;
}
}
}
slots
};
let allocation = RadixAllocation {
prefix_node,
cached_prefix_len: prefix_len,
prefill_tokens: prefill_tokens.clone(),
};
self.allocation_id += 1;
self.allocations.insert(self.allocation_id, allocation);
Some(BlockAllocation {
allocation_id: self.allocation_id,
block_allocator: None,
blocks,
slots,
prefix_len: prefix_len as u32,
})
}
fn free(&mut self, blocks: Vec<u32>, allocation_id: u64) {
let allocation = match self.allocations.remove(&allocation_id) {
Some(allocation) => allocation,
None => unreachable!("Tried to free an unknown allocation."),
};
self.cache_blocks
.decref(allocation.prefix_node)
.expect("Failed to decrement refcount");
if let Some(prefill_tokens) = allocation.prefill_tokens {
let prefill_tokens = prefill_tokens.as_slice();
// If there are prefill tokens that did not come from the cache,
// add them to the cache.
if prefill_tokens.len() > allocation.cached_prefix_len {
let aligned =
(prefill_tokens.len() / self.block_size as usize) * self.block_size as usize;
if aligned > 0 {
let prefix_len = self
.cache_blocks
.insert(
&prefill_tokens[..aligned],
&blocks[..aligned / self.block_size as usize],
)
// Unwrap, failing is a programming error.
.expect("Failed to store prefill tokens");
// We can have a prefill with the following structure:
//
// |---| From the prefix cache.
// A B C D E F G
//|--------| Found in the trie during insertion.
//
// This means that while processing this request there was a
// partially overlapping request that had A..=E in its
// prefill. In this case we need to free the blocks D E.
if prefix_len > allocation.cached_prefix_len {
self.free_blocks.extend(
&blocks[allocation.cached_prefix_len / self.block_size as usize
..prefix_len / self.block_size as usize],
);
}
}
}
// Free non-prefill blocks.
self.free_blocks
.extend(&blocks[prefill_tokens.len() / self.block_size as usize..]);
} else {
self.free_blocks.extend(blocks);
}
}
}
struct RadixAllocation {
prefix_node: NodeId,
cached_prefix_len: usize,
prefill_tokens: Option<Arc<Vec<u32>>>,
}
// Radix trie that is heavily inspired by radix attention from sglang.
//
// The trie is optimized for prefix caching:
//
// - A normal radix trie stores discrete values. In this radix trie,
// inserting *abc* with value *xyz* will also enable lookup for
// *a* (*x*) and *ab* (*xy*).
// - As a result, every value is required to have the same length as
// the key.
// - We store additional information in each node, such as last access
// time and a reference count.
#[derive(Debug)]
pub enum TrieError {
InvalidNodeId,
RefCountUnderflow,
}
pub type NodeId = DefaultKey;
#[derive(Debug)]
pub struct RadixTrie {
/// Identifier of the root nod.
root: DefaultKey,
/// Leave node identifiers ordered by increasing recency.
leaves: BTreeSet<(u64, NodeId)>,
/// All trie nodes.
nodes: SlotMap<NodeId, TrieNode>,
/// Time as a monotonically increating counter to avoid the system
/// call that a real time lookup would require.
time: u64,
/// All blocks need to be aligned with this
block_size: usize,
}
impl RadixTrie {
/// Construct a new radix trie.
pub fn new(block_size: usize) -> Self {
let root = TrieNode::new(vec![], vec![], 0, None);
let mut nodes = SlotMap::new();
let root = nodes.insert(root);
RadixTrie {
leaves: BTreeSet::new(),
nodes,
root,
time: 0,
block_size,
}
}
/// Find the prefix of the given tokens.
///
/// The blocks corresponding to the part of the prefix that could be found
/// are written to `blocks`. The number of blocks is in `0..=tokens.len()`.
/// Returns the identifier of the trie node that contains the longest
/// prefix. The node identifier can be used by callers to e.g. increase its
/// reference count.
///
/// Using this method will update the access time of the traversed nodes.
pub fn find(&mut self, key: &[u32], blocks: &mut Vec<u32>) -> NodeId {
self.time += 1;
self.find_(self.root, key, blocks)
}
/// Find worker.
fn find_(&mut self, mut node_id: NodeId, key: &[u32], blocks: &mut Vec<u32>) -> NodeId {
let node = &self.nodes[node_id];
if key.len() >= self.block_size {
let node_key = hash(&key[..self.block_size]);
if let Some(&child_id) = node.children.get(&node_key) {
self.update_access_time(child_id);
let child = self.nodes.get(child_id).expect("Invalid child identifier");
let shared_prefix_len = shared_prefix(&child.key, key, self.block_size);
assert_eq!(shared_prefix_len % self.block_size, 0);
blocks.extend(&child.blocks[..shared_prefix_len / self.block_size]);
let key = &key[shared_prefix_len..];
if !key.is_empty() {
node_id = self.find_(child_id, key, blocks);
}
}
}
node_id
}
/// Decrease the reference count of a node.
pub fn decref(&mut self, node_id: NodeId) -> Result<(), TrieError> {
// We don't care about refcounting for root, since it will never
// be evicted.
if node_id == self.root {
return Ok(());
}
let node = self
.nodes
.get_mut(node_id)
.ok_or(TrieError::InvalidNodeId)?;
if node.ref_count == 0 {
return Err(TrieError::RefCountUnderflow);
}
node.ref_count -= 1;
if node.ref_count == 0 {
assert!(
node.children.is_empty(),
"Nodes with children must have refcount > 0"
);
self.leaves.insert((node.last_accessed, node_id));
}
Ok(())
}
/// Increase the reference count of a node.
pub fn incref(&mut self, node_id: NodeId) -> Result<(), TrieError> {
if node_id == self.root {
return Ok(());
}
let node = self
.nodes
.get_mut(node_id)
.ok_or(TrieError::InvalidNodeId)?;
if node.ref_count == 0 {
self.leaves.remove(&(node.last_accessed, node_id));
}
node.ref_count += 1;
Ok(())
}
/// Evict `n_blocks` from the trie.
///
/// Returns the evicted blocks. When the length is less than `n_blocks`,
/// not enough blocks could be evicted.
pub fn evict(&mut self, n_blocks: usize) -> Vec<u32> {
// NOTE: we don't return Result here. If any of the unwrapping fails,
// it's a programming error in the trie implementation, not a user
// error caused by e.g. an invalid argument.
// TODO: add some bookkeeping in the future to check whether we can
// evict n_blocks and return `None` if we can't. We are now needlessly
// evicting prefixes from the cache in such a case.
let mut evicted = Vec::new();
tracing::debug!("Evicting in search of {n_blocks}");
while let Some((last_access, node_id)) = self.leaves.pop_first() {
let blocks_needed = n_blocks.saturating_sub(evicted.len());
tracing::debug!("Evicting node {node_id:?} ");
let node = self.nodes.get(node_id).expect("Leave does not exist");
assert_eq!(
node.ref_count, 0,
"Leaf must have refcount of 0, got {}",
node.ref_count
);
if blocks_needed >= node.blocks.len() {
// We need to evict the whole node if we need more blocks than it has.
let node = self.remove_node(node_id);
evicted.extend(node.blocks);
if evicted.len() >= n_blocks {
break;
}
} else {
// The node has more blocks than needed, so we'll just remove
// the required number of blocks and leave the remaining blocks
// untouched.
let node = self.nodes.get_mut(node_id).expect("Leave does not exist");
let truncate_blocks = node.blocks.len() - blocks_needed;
let truncate_tokens = truncate_blocks * self.block_size;
node.key.truncate(truncate_tokens);
evicted.extend(node.blocks.split_off(truncate_blocks));
self.leaves.insert((last_access, node_id));
break;
}
}
evicted
}
/// Insert a prefill along with its blocks.
///
/// This method returns the length of the prefix that was already
/// in the trie. E.g. if the length is 10, this means that for
/// the first 10 elements of the tree **the blocks are not updated**.
pub fn insert(&mut self, tokens: &[u32], blocks: &[u32]) -> Result<usize, TrieError> {
self.time += 1;
let common = self.insert_(self.root, tokens, blocks)?;
Ok(common)
}
/// Insertion worker.
fn insert_(
&mut self,
node_id: NodeId,
tokens: &[u32],
blocks: &[u32],
) -> Result<usize, TrieError> {
// TODO: in the future we may want to check that the blocks match for
// the part of the prefix that is already in the trie to detect
// mismatches.
assert_eq!(tokens.len(), blocks.len() * self.block_size);
let node_key = hash(&tokens[..self.block_size]);
if let Some(&child_id) = self.nodes[node_id].children.get(&node_key) {
self.update_access_time(child_id);
let child = self
.nodes
.get_mut(child_id)
// Unwrap here, since failure is a bug.
.expect("Child node does not exist");
let shared_prefix_len = shared_prefix(&child.key, tokens, self.block_size);
// We are done, the prefix is already in the trie.
if shared_prefix_len == tokens.len() || shared_prefix_len == 0 {
return Ok(shared_prefix_len);
}
// The node's prefix is a prefix of the insertion prefix.
if shared_prefix_len == child.key.len() {
return Ok(shared_prefix_len
+ self.insert_(
child_id,
&tokens[shared_prefix_len..],
&blocks[shared_prefix_len / self.block_size..],
)?);
}
// The node's prefix and the insertion prefix only match partially,
// split the node to just contain the matching part. Then insert the
// remainder of the prefix into the node again
let child_id = self.split_node(child_id, shared_prefix_len);
let key = &tokens[shared_prefix_len..];
let blocks = &blocks[shared_prefix_len / self.block_size..];
Ok(shared_prefix_len + self.insert_(child_id, key, blocks)?)
} else {
self.add_node(node_id, tokens, blocks);
Ok(0)
}
}
fn split_node(&mut self, node_id: NodeId, prefix_len: usize) -> NodeId {
// We have to make the current node a child to ensure that its
// properties and node id stay the same.
// This funcion unwraps, an invalid node_id is a programming error.
let node = self
.nodes
.get_mut(node_id)
.expect("Node to-be split does not exist");
let mut parent_key = node.key.split_off(prefix_len);
let prefix_blocks = prefix_len / self.block_size;
let mut parent_blocks = node.blocks.split_off(prefix_blocks);
// Move first part of the prefix to the parent. We swap to avoid
// an allocation + copy for both splits of the key/blocks.
std::mem::swap(&mut node.key, &mut parent_key);
std::mem::swap(&mut node.blocks, &mut parent_blocks);
let node_key = hash(&node.key[..self.block_size]);
let grandparent_id = node.parent.expect("Node does not have a parent");
let parent_id = self.add_node(grandparent_id, parent_key, parent_blocks);
self.add_node_to_parent(parent_id, node_key, node_id);
// Reborrow to make the borrow checker happy.
let node = self
.nodes
.get_mut(node_id)
.expect("Node to-be split does not exist");
node.parent = Some(parent_id);
parent_id
}
/// Create a node and add it to the parent.
fn add_node(
&mut self,
parent_id: NodeId,
key: impl Into<Vec<u32>>,
blocks: impl Into<Vec<u32>>,
) -> NodeId {
let key = key.into();
let blocks = blocks.into();
let first = hash(&key[..self.block_size]);
let child = TrieNode::new(key, blocks, self.time, Some(parent_id));
let child_id = self.nodes.insert(child);
self.add_node_to_parent(parent_id, first, child_id);
self.leaves.insert((self.time, child_id));
child_id
}
/// Add a node to the parent.
fn add_node_to_parent(&mut self, parent_id: NodeId, hash: u64, child_id: NodeId) {
// Unwrap here, passing in an unknown id is a programming error.
let parent = self.nodes.get_mut(parent_id).expect("Unknown parent node");
if parent.children.insert(hash, child_id).is_none() {
// Only increase reference count if child does not replace another child.
self.incref(parent_id)
.expect("Failed to increase parent refcount");
}
}
/// Remove a node from the trie.
fn remove_node(&mut self, node_id: NodeId) -> TrieNode {
// Unwrap here, passing in an unknown id is a programming error.
let node = self.nodes.remove(node_id).expect("Unknown node");
assert!(
node.children.is_empty(),
"Tried to remove a node with {} children",
node.children.len()
);
let parent_id = node.parent.expect("Attempted to remove root node");
let parent = self.nodes.get_mut(parent_id).expect("Unknown parent node");
let node_key = hash(&node.key[..self.block_size]);
parent.children.remove(&node_key);
self.decref(parent_id)
.expect("Failed to decrease parent refcount");
node
}
fn update_access_time(&mut self, node_id: NodeId) {
// Unwrap here, passing in an unknown id is a programming error.
let node = self.nodes.get_mut(node_id).expect("Unknown node");
// Update the ordered leaves set if the node is a leave.
if self.leaves.remove(&(node.last_accessed, node_id)) {
self.leaves.insert((self.time, node_id));
}
node.last_accessed = self.time;
}
#[allow(dead_code)]
#[doc(hidden)]
/// Print debugging output for the trie.
///
/// In contrast to `Debug` nicely formatted.
pub fn print_debug(&self) {
self.print_debug_(self.root, 0);
}
fn print_debug_(&self, node_id: NodeId, indent: usize) {
let node = &self.nodes[node_id];
eprintln!(
"{}{:?}, key: {:?}, blocks: {:?}, ref_count: {}, last_accessed: {}, parent: {:?}, children: {:?}",
" ".repeat(indent),
node_id,
node.key,
node.blocks,
node.ref_count,
node.last_accessed,
node.parent,
node.children
);
for child_id in self.nodes[node_id].children.values() {
self.print_debug_(*child_id, indent + 2);
}
}
pub(crate) fn root_id(&self) -> DefaultKey {
self.root
}
}
/// Trie node.
#[derive(Debug)]
struct TrieNode {
blocks: Vec<u32>,
children: HashMap<u64, NodeId>,
key: Vec<u32>,
last_accessed: u64,
parent: Option<NodeId>,
ref_count: usize,
}
impl TrieNode {
fn new(key: Vec<u32>, blocks: Vec<u32>, last_accessed: u64, parent: Option<NodeId>) -> Self {
TrieNode {
children: HashMap::new(),
key,
blocks,
last_accessed,
parent,
ref_count: 0,
}
}
}
fn shared_prefix(left: &[u32], right: &[u32], block_size: usize) -> usize {
let full = left.iter().zip(right).take_while(|(a, b)| a == b).count();
// NOTE: this is the case because the child node was chosen based on
// matching the first character of the key/prefix.
assert!(full > 0, "Prefixes must at least share 1 token");
(full / block_size) * block_size
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
#[test]
fn allocator_block_size() {
let mut cache = RadixAllocator::new(2, 12, None);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22, 23]);
assert_eq!(allocation.prefix_len, 0);
cache.free(allocation.blocks.clone(), allocation.allocation_id);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22, 23]);
assert_eq!(allocation.prefix_len, 4);
}
#[test]
fn allocator_block_size_non_aligned() {
let mut cache = RadixAllocator::new(2, 12, None);
let allocation = cache.allocate(7, Some(Arc::new(vec![0, 1, 2]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22]);
assert_eq!(allocation.prefix_len, 0);
cache.free(allocation.blocks.clone(), allocation.allocation_id);
let allocation = cache.allocate(7, Some(Arc::new(vec![0, 1, 2]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22]);
assert_eq!(allocation.prefix_len, 2);
}
#[test]
fn allocator_reuses_prefixes() {
let mut cache = RadixAllocator::new(1, 12, None);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![4, 5, 6, 7, 8, 9, 10, 11]);
assert_eq!(allocation.blocks, allocation.slots);
assert_eq!(allocation.prefix_len, 0);
cache.free(allocation.blocks.clone(), allocation.allocation_id);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![4, 5, 6, 7, 8, 9, 10, 11]);
assert_eq!(allocation.prefix_len, 4);
}
#[test]
fn allocator_collects_older_prefixes_first() {
let mut cache = RadixAllocator::new(1, 7, None);
let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation1.blocks, vec![3, 4, 5, 6]);
assert_eq!(allocation1.prefix_len, 0);
let allocation2 = cache.allocate(2, Some(Arc::new(vec![4, 5]))).unwrap();
assert_eq!(allocation2.blocks, vec![1, 2]);
assert_eq!(allocation2.prefix_len, 0);
cache.free(allocation1.blocks.clone(), allocation1.allocation_id);
cache.free(allocation2.blocks.clone(), allocation2.allocation_id);
// We should get the blocks of the first allocation, since they are more recent.
let allocation3 = cache.allocate(4, Some(Arc::new(vec![6, 7, 8, 9]))).unwrap();
assert_eq!(allocation3.blocks, vec![3, 4, 5, 6]);
assert_eq!(allocation3.prefix_len, 0);
}
#[test]
fn allocator_frees_fully_overlapping_prefills() {
let mut cache = RadixAllocator::new(1, 10, None);
let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
let allocation2 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
cache.free(allocation2.blocks.clone(), allocation2.allocation_id);
cache.free(allocation1.blocks.clone(), allocation1.allocation_id);
let allocation3 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation3.prefix_len, 4);
// 10 blocks, of which 1 reserved for health checks, 4 for the cached blocks.
assert_eq!(cache.free_blocks.len(), 5);
}
#[test]
fn allocator_frees_partially_overlapping_prefills() {
let mut cache = RadixAllocator::new(1, 20, None);
let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1]))).unwrap();
assert_eq!(allocation1.blocks, vec![16, 17, 18, 19]);
assert_eq!(allocation1.prefix_len, 0);
cache.free(allocation1.blocks.clone(), allocation1.allocation_id);
let allocation2 = cache
.allocate(8, Some(Arc::new(vec![0, 1, 2, 3, 4, 5])))
.unwrap();
assert_eq!(allocation2.blocks, vec![16, 17, 12, 13, 14, 15, 18, 19]);
assert_eq!(allocation2.prefix_len, 2);
let allocation3 = cache
.allocate(8, Some(Arc::new(vec![0, 1, 2, 3, 6, 7])))
.unwrap();
assert_eq!(allocation3.blocks, vec![16, 17, 6, 7, 8, 9, 10, 11]);
assert_eq!(allocation3.prefix_len, 2);
cache.free(allocation3.blocks.clone(), allocation3.allocation_id);
cache.free(allocation2.blocks.clone(), allocation2.allocation_id);
// 20 blocks, of which 1 reserved for health checks, 6 for allocation3, 2 for allocation2.
assert_eq!(cache.free_blocks.len(), 11);
let allocation4 = cache
.allocate(6, Some(Arc::new(vec![0, 1, 2, 3, 4, 5])))
.unwrap();
assert_eq!(allocation4.blocks, vec![16, 17, 6, 7, 14, 15]);
assert_eq!(allocation4.prefix_len, 6);
assert_eq!(cache.free_blocks.len(), 11);
let allocation5 = cache
.allocate(6, Some(Arc::new(vec![0, 1, 2, 3, 6, 7])))
.unwrap();
assert_eq!(allocation5.blocks, vec![16, 17, 6, 7, 8, 9]);
assert_eq!(allocation5.prefix_len, 6);
assert_eq!(cache.free_blocks.len(), 11);
}
#[test]
fn trie_insertions_have_correct_prefix_len() {
let mut trie = RadixTrie::new(1);
assert_eq!(trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(), 0);
// Already exists.
assert_eq!(trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(), 3);
// Completely new at root-level
assert_eq!(trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap(), 0);
// Contains full prefix, but longer.
assert_eq!(trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap(), 3);
// Shares partial prefix, we need a split.
assert_eq!(
trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7])
.unwrap(),
4
);
}
#[test]
fn trie_insertions_block_size() {
let mut trie = RadixTrie::new(2);
assert_eq!(trie.insert(&[0, 1, 2, 3], &[0, 1]).unwrap(), 0);
// Already exists.
// But needs to be block_size aligned
assert_eq!(trie.insert(&[0, 1, 2, 3], &[0, 1]).unwrap(), 4);
// Completely new at root-level
assert_eq!(trie.insert(&[1, 2, 3, 4], &[1, 2]).unwrap(), 0);
// Contains full prefix, but longer.
assert_eq!(trie.insert(&[0, 1, 2, 3, 4, 5], &[0, 1, 2]).unwrap(), 4);
// Shares partial prefix, we need a split.
assert_eq!(
trie.insert(&[0, 1, 3, 4, 5, 6, 7, 8], &[0, 1, 2, 3])
.unwrap(),
2
);
}
#[test]
fn trie_get_returns_correct_blocks() {
let mut trie = RadixTrie::new(1);
trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap();
trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap();
trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap();
trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7])
.unwrap();
let mut blocks = Vec::new();
trie.find(&[0], &mut blocks);
assert_eq!(blocks, vec![0]);
blocks.clear();
trie.find(&[0, 1, 2], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2]);
blocks.clear();
trie.find(&[1, 2, 3], &mut blocks);
assert_eq!(blocks, vec![1, 2, 3]);
blocks.clear();
trie.find(&[0, 1, 2, 3], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 4], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3, 4]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 5], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3, 5]);
}
#[test]
fn trie_evict_removes_correct_blocks() {
let mut trie = RadixTrie::new(1);
trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap();
trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7])
.unwrap();
trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap();
trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap();
let mut blocks = Vec::new();
// Remove less than the leave blocks.
assert_eq!(trie.evict(1), vec![7]);
trie.find(&[0, 1, 2, 3, 5, 6, 7], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3, 5, 6]);
// Refresh other leaf.
trie.find(&[0, 1, 2, 3, 4], &mut blocks);
trie.find(&[1, 2, 3], &mut blocks);
// Remove the leave blocks exactly.
assert_eq!(trie.evict(2), vec![5, 6]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 5, 6, 7], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3]);
trie.find(&[1, 2, 3], &mut blocks);
// Remove more than the leave blocks.
assert_eq!(trie.evict(3), vec![4, 3, 2]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 4], &mut blocks);
assert_eq!(blocks, vec![0, 1]);
// Clear out the whole trie.
assert_eq!(trie.evict(10), vec![1, 2, 3, 0, 1]);
}
}
| text-generation-inference/backends/v3/src/radix.rs/0 | {
"file_path": "text-generation-inference/backends/v3/src/radix.rs",
"repo_id": "text-generation-inference",
"token_count": 14966
} |
import pytest
from text_generation import Client, AsyncClient
from text_generation.errors import NotFoundError, ValidationError
from text_generation.types import FinishReason, InputToken
def test_generate(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
response = client.generate("test", max_new_tokens=1, decoder_input_details=True)
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
assert len(response.details.prefill) == 2
assert response.details.prefill[0] == InputToken(id=1, text="<s>", logprob=None)
assert len(response.details.tokens) == 1
assert response.details.tokens[0].id == 29918
assert response.details.tokens[0].text == "_"
assert not response.details.tokens[0].special
def test_generate_best_of(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
response = client.generate(
"test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True
)
assert response.details.seed is not None
assert response.details.best_of_sequences is not None
assert len(response.details.best_of_sequences) == 1
assert response.details.best_of_sequences[0].seed is not None
def test_generate_not_found(fake_url, hf_headers):
client = Client(fake_url, hf_headers)
with pytest.raises(NotFoundError):
client.generate("test")
def test_generate_validation_error(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
client.generate("test", max_new_tokens=10_000)
def test_generate_stream(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
responses = [
response for response in client.generate_stream("test", max_new_tokens=1)
]
assert len(responses) == 1
response = responses[0]
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
def test_generate_stream_not_found(fake_url, hf_headers):
client = Client(fake_url, hf_headers)
with pytest.raises(NotFoundError):
list(client.generate_stream("test"))
def test_generate_stream_validation_error(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
list(client.generate_stream("test", max_new_tokens=10_000))
@pytest.mark.asyncio
async def test_generate_async(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
response = await client.generate(
"test", max_new_tokens=1, decoder_input_details=True
)
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
assert len(response.details.prefill) == 2
assert response.details.prefill[0] == InputToken(id=1, text="<s>", logprob=None)
assert response.details.prefill[1] == InputToken(
id=1243, text="test", logprob=-10.96875
)
assert len(response.details.tokens) == 1
assert response.details.tokens[0].id == 29918
assert response.details.tokens[0].text == "_"
assert not response.details.tokens[0].special
@pytest.mark.asyncio
async def test_generate_async_best_of(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
response = await client.generate(
"test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True
)
assert response.details.seed is not None
assert response.details.best_of_sequences is not None
assert len(response.details.best_of_sequences) == 1
assert response.details.best_of_sequences[0].seed is not None
@pytest.mark.asyncio
async def test_generate_async_not_found(fake_url, hf_headers):
client = AsyncClient(fake_url, hf_headers)
with pytest.raises(NotFoundError):
await client.generate("test")
@pytest.mark.asyncio
async def test_generate_async_validation_error(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
await client.generate("test", max_new_tokens=10_000)
@pytest.mark.asyncio
async def test_generate_stream_async(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
responses = [
response async for response in client.generate_stream("test", max_new_tokens=1)
]
assert len(responses) == 1
response = responses[0]
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
@pytest.mark.asyncio
async def test_generate_stream_async_not_found(fake_url, hf_headers):
client = AsyncClient(fake_url, hf_headers)
with pytest.raises(NotFoundError):
async for _ in client.generate_stream("test"):
pass
@pytest.mark.asyncio
async def test_generate_stream_async_validation_error(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
async for _ in client.generate_stream("test", max_new_tokens=10_000):
pass
| text-generation-inference/clients/python/tests/test_client.py/0 | {
"file_path": "text-generation-inference/clients/python/tests/test_client.py",
"repo_id": "text-generation-inference",
"token_count": 2110
} |
# Consuming Text Generation Inference
There are many ways to consume Text Generation Inference (TGI) server in your applications. After launching the server, you can use the [Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) `/v1/chat/completions` route and make a `POST` request to get results from the server. You can also pass `"stream": true` to the call if you want TGI to return a stream of tokens.
For more information on the API, consult the OpenAPI documentation of `text-generation-inference` available [here](https://huggingface.github.io/text-generation-inference).
You can make the requests using any tool of your preference, such as curl, Python, or TypeScript. For an end-to-end experience, we've open-sourced [ChatUI](https://github.com/huggingface/chat-ui), a chat interface for open-access models.
## curl
After a successful server launch, you can query the model using the `v1/chat/completions` route, to get responses that are compliant to the OpenAI Chat Completion spec:
```bash
curl localhost:8080/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
For non-chat use-cases, you can also use the `/generate` and `/generate_stream` routes.
```bash
curl 127.0.0.1:8080/generate \
-X POST \
-d '{
"inputs":"What is Deep Learning?",
"parameters":{
"max_new_tokens":20
}
}' \
-H 'Content-Type: application/json'
```
## Python
### Inference Client
[`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a high-level class, [`huggingface_hub.InferenceClient`](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient), which makes it easy to make calls to TGI's Messages API. `InferenceClient` also takes care of parameter validation and provides a simple-to-use interface.
Install `huggingface_hub` package via pip.
```bash
pip install huggingface_hub
```
You can now use `InferenceClient` the exact same way you would use `OpenAI` client in Python
```python
from huggingface_hub import InferenceClient
client = InferenceClient(
base_url="http://localhost:8080/v1/",
)
output = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Count to 10"},
],
stream=True,
max_tokens=1024,
)
for chunk in output:
print(chunk.choices[0].delta.content)
```
You can check out more details about OpenAI compatibility [here](https://huggingface.co/docs/huggingface_hub/en/guides/inference#openai-compatibility).
There is also an async version of the client, `AsyncInferenceClient`, based on `asyncio` and `aiohttp`. You can find docs for it [here](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.AsyncInferenceClient)
### OpenAI Client
You can directly use the OpenAI [Python](https://github.com/openai/openai-python) or [JS](https://github.com/openai/openai-node) clients to interact with TGI.
Install the OpenAI Python package via pip.
```bash
pip install openai
```
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:8080/v1/",
api_key="-"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
```
## UI
### Gradio
Gradio is a Python library that helps you build web applications for your machine learning models with a few lines of code. It has a `ChatInterface` wrapper that helps create neat UIs for chatbots. Let's take a look at how to create a chatbot with streaming mode using TGI and Gradio. Let's install Gradio and Hub Python library first.
```bash
pip install huggingface-hub gradio
```
Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client).
```python
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient(base_url="http://127.0.0.1:8080")
def inference(message, history):
partial_message = ""
output = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": message},
],
stream=True,
max_tokens=1024,
)
for chunk in output:
partial_message += chunk.choices[0].delta.content
yield partial_message
gr.ChatInterface(
inference,
chatbot=gr.Chatbot(height=300),
textbox=gr.Textbox(placeholder="Chat with me!", container=False, scale=7),
description="This is the demo for Gradio UI consuming TGI endpoint.",
title="Gradio 🤝 TGI",
examples=["Are tomatoes vegetables?"],
retry_btn="Retry",
undo_btn="Undo",
clear_btn="Clear",
).queue().launch()
```
You can check out the UI and try the demo directly here 👇
<div class="block dark:hidden">
<iframe
src="https://merve-gradio-tgi-2.hf.space?__theme=light"
width="850"
height="750"
></iframe>
</div>
<div class="hidden dark:block">
<iframe
src="https://merve-gradio-tgi-2.hf.space?__theme=dark"
width="850"
height="750"
></iframe>
</div>
You can read more about how to customize a `ChatInterface` [here](https://www.gradio.app/guides/creating-a-chatbot-fast).
### ChatUI
[ChatUI](https://github.com/huggingface/chat-ui) is an open-source interface built for consuming LLMs. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces.
To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served.
```
{
// rest of the model config here
"endpoints": [{"url": "https://HOST:PORT/generate_stream"}]
}
```

| text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md",
"repo_id": "text-generation-inference",
"token_count": 2375
} |
# Text-generation-launcher arguments
<!-- WRAP CODE BLOCKS -->
```shell
Text Generation Launcher
Usage: text-generation-launcher [OPTIONS]
Options:
```
## MODEL_ID
```shell
--model-id <MODEL_ID>
The name of the model to load. Can be a MODEL_ID as listed on <https://hf.co/models> like `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. Or it can be a local directory containing the necessary files as saved by `save_pretrained(...)` methods of transformers
[env: MODEL_ID=]
[default: bigscience/bloom-560m]
```
## REVISION
```shell
--revision <REVISION>
The actual revision of the model if you're referring to a model on the hub. You can use a specific commit id or a branch like `refs/pr/2`
[env: REVISION=]
```
## VALIDATION_WORKERS
```shell
--validation-workers <VALIDATION_WORKERS>
The number of tokenizer workers used for payload validation and truncation inside the router
[env: VALIDATION_WORKERS=]
[default: 2]
```
## SHARDED
```shell
--sharded <SHARDED>
Whether to shard the model across multiple GPUs By default text-generation-inference will use all available GPUs to run the model. Setting it to `false` deactivates `num_shard`
[env: SHARDED=]
[possible values: true, false]
```
## NUM_SHARD
```shell
--num-shard <NUM_SHARD>
The number of shards to use if you don't want to use all GPUs on a given machine. You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance
[env: NUM_SHARD=]
```
## QUANTIZE
```shell
--quantize <QUANTIZE>
Quantization method to use for the model. It is not necessary to specify this option for pre-quantized models, since the quantization method is read from the model configuration.
Marlin kernels will be used automatically for GPTQ/AWQ models.
[env: QUANTIZE=]
Possible values:
- awq: 4 bit quantization. Requires a specific AWQ quantized model: <https://hf.co/models?search=awq>. Should replace GPTQ models wherever possible because of the better latency
- compressed-tensors: Compressed tensors, which can be a mixture of different quantization methods
- eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from <https://github.com/NetEase-FuXi/EETQ.git>
- exl2: Variable bit quantization. Requires a specific EXL2 quantized model: <https://hf.co/models?search=exl2>. Requires exllama2 kernels and does not support tensor parallelism (num_shard > 1)
- gptq: 4 bit quantization. Requires a specific GTPQ quantized model: <https://hf.co/models?search=gptq>. text-generation-inference will use exllama (faster) kernels wherever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels
- marlin: 4 bit quantization. Requires a specific Marlin quantized model: <https://hf.co/models?search=marlin>
- bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16
- bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16
- bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model
- fp8: [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above This dtype has native ops should be the fastest if available. This is currently not the fastest because of local unpacking + padding to satisfy matrix multiplication limitations
```
## SPECULATE
```shell
--speculate <SPECULATE>
The number of input_ids to speculate on If using a medusa model, the heads will be picked up automatically Other wise, it will use n-gram speculation which is relatively free in terms of compute, but the speedup heavily depends on the task
[env: SPECULATE=]
```
## DTYPE
```shell
--dtype <DTYPE>
The dtype to be forced upon the model. This option cannot be used with `--quantize`
[env: DTYPE=]
[possible values: float16, bfloat16]
```
## KV_CACHE_DTYPE
```shell
--kv-cache-dtype <KV_CACHE_DTYPE>
Specify the dtype for the key-value cache. When this option is not provided, the dtype of the model is used (typically `float16` or `bfloat16`). Currently the only supported value are `fp8_e4m3fn` and `fp8_e5m2` on CUDA
[env: KV_CACHE_DTYPE=]
[possible values: fp8_e4m3fn, fp8_e5m2]
```
## TRUST_REMOTE_CODE
```shell
--trust-remote-code
Whether you want to execute hub modelling code. Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision
[env: TRUST_REMOTE_CODE=]
```
## MAX_CONCURRENT_REQUESTS
```shell
--max-concurrent-requests <MAX_CONCURRENT_REQUESTS>
The maximum amount of concurrent requests for this particular deployment. Having a low limit will refuse clients requests instead of having them wait for too long and is usually good to handle backpressure correctly
[env: MAX_CONCURRENT_REQUESTS=]
[default: 128]
```
## MAX_BEST_OF
```shell
--max-best-of <MAX_BEST_OF>
This is the maximum allowed value for clients to set `best_of`. Best of makes `n` generations at the same time, and return the best in terms of overall log probability over the entire generated sequence
[env: MAX_BEST_OF=]
[default: 2]
```
## MAX_STOP_SEQUENCES
```shell
--max-stop-sequences <MAX_STOP_SEQUENCES>
This is the maximum allowed value for clients to set `stop_sequences`. Stop sequences are used to allow the model to stop on more than just the EOS token, and enable more complex "prompting" where users can preprompt the model in a specific way and define their "own" stop token aligned with their prompt
[env: MAX_STOP_SEQUENCES=]
[default: 4]
```
## MAX_TOP_N_TOKENS
```shell
--max-top-n-tokens <MAX_TOP_N_TOKENS>
This is the maximum allowed value for clients to set `top_n_tokens`. `top_n_tokens` is used to return information about the the `n` most likely tokens at each generation step, instead of just the sampled token. This information can be used for downstream tasks like for classification or ranking
[env: MAX_TOP_N_TOKENS=]
[default: 5]
```
## MAX_INPUT_TOKENS
```shell
--max-input-tokens <MAX_INPUT_TOKENS>
This is the maximum allowed input length (expressed in number of tokens) for users. The larger this value, the longer prompt users can send which can impact the overall memory required to handle the load. Please note that some models have a finite range of sequence they can handle. Default to min(max_allocatable, max_position_embeddings) - 1
[env: MAX_INPUT_TOKENS=]
```
## MAX_INPUT_LENGTH
```shell
--max-input-length <MAX_INPUT_LENGTH>
Legacy version of [`Args::max_input_tokens`]
[env: MAX_INPUT_LENGTH=]
```
## MAX_TOTAL_TOKENS
```shell
--max-total-tokens <MAX_TOTAL_TOKENS>
This is the most important value to set as it defines the "memory budget" of running clients requests. Clients will send input sequences and ask to generate `max_new_tokens` on top. with a value of `1512` users can send either a prompt of `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for `1511` max_new_tokens. The larger this value, the larger amount each request will be in your RAM and the less effective batching can be. Default to min(max_allocatable, max_position_embeddings)
[env: MAX_TOTAL_TOKENS=]
```
## WAITING_SERVED_RATIO
```shell
--waiting-served-ratio <WAITING_SERVED_RATIO>
This represents the ratio of waiting queries vs running queries where you want to start considering pausing the running queries to include the waiting ones into the same batch. `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's only 10 queries left in the current batch we check if we can fit those 12 waiting queries into the batching strategy, and if yes, then batching happens delaying the 10 running queries by a `prefill` run.
This setting is only applied if there is room in the batch as defined by `max_batch_total_tokens`.
[env: WAITING_SERVED_RATIO=]
[default: 0.3]
```
## MAX_BATCH_PREFILL_TOKENS
```shell
--max-batch-prefill-tokens <MAX_BATCH_PREFILL_TOKENS>
Limits the number of tokens for the prefill operation. Since this operation take the most memory and is compute bound, it is interesting to limit the number of requests that can be sent. Default to `max_input_tokens + 50` to give a bit of room
[env: MAX_BATCH_PREFILL_TOKENS=]
```
## MAX_BATCH_TOTAL_TOKENS
```shell
--max-batch-total-tokens <MAX_BATCH_TOTAL_TOKENS>
**IMPORTANT** This is one critical control to allow maximum usage of the available hardware.
This represents the total amount of potential tokens within a batch. When using padding (not recommended) this would be equivalent of `batch_size` * `max_total_tokens`.
However in the non-padded (flash attention) version this can be much finer.
For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` or a single query of `1000` tokens.
Overall this number should be the largest possible amount that fits the remaining memory (after the model is loaded). Since the actual memory overhead depends on other parameters like if you're using quantization, flash attention or the model implementation, text-generation-inference cannot infer this number automatically.
[env: MAX_BATCH_TOTAL_TOKENS=]
```
## MAX_WAITING_TOKENS
```shell
--max-waiting-tokens <MAX_WAITING_TOKENS>
This setting defines how many tokens can be passed before forcing the waiting queries to be put on the batch (if the size of the batch allows for it). New queries require 1 `prefill` forward, which is different from `decode` and therefore you need to pause the running batch in order to run `prefill` to create the correct values for the waiting queries to be able to join the batch.
With a value too small, queries will always "steal" the compute to run `prefill` and running queries will be delayed by a lot.
With a value too big, waiting queries could wait for a very long time before being allowed a slot in the running batch. If your server is busy that means that requests that could run in ~2s on an empty server could end up running in ~20s because the query had to wait for 18s.
This number is expressed in number of tokens to make it a bit more "model" agnostic, but what should really matter is the overall latency for end users.
[env: MAX_WAITING_TOKENS=]
[default: 20]
```
## MAX_BATCH_SIZE
```shell
--max-batch-size <MAX_BATCH_SIZE>
Enforce a maximum number of requests per batch Specific flag for hardware targets that do not support unpadded inference
[env: MAX_BATCH_SIZE=]
```
## CUDA_GRAPHS
```shell
--cuda-graphs <CUDA_GRAPHS>
Specify the batch sizes to compute cuda graphs for. Use "0" to disable. Default = "1,2,4,8,16,32"
[env: CUDA_GRAPHS=]
```
## HOSTNAME
```shell
--hostname <HOSTNAME>
The IP address to listen on
[env: HOSTNAME=]
[default: 0.0.0.0]
```
## PORT
```shell
-p, --port <PORT>
The port to listen on
[env: PORT=]
[default: 3000]
```
## SHARD_UDS_PATH
```shell
--shard-uds-path <SHARD_UDS_PATH>
The name of the socket for gRPC communication between the webserver and the shards
[env: SHARD_UDS_PATH=]
[default: /tmp/text-generation-server]
```
## MASTER_ADDR
```shell
--master-addr <MASTER_ADDR>
The address the master shard will listen on. (setting used by torch distributed)
[env: MASTER_ADDR=]
[default: localhost]
```
## MASTER_PORT
```shell
--master-port <MASTER_PORT>
The address the master port will listen on. (setting used by torch distributed)
[env: MASTER_PORT=]
[default: 29500]
```
## HUGGINGFACE_HUB_CACHE
```shell
--huggingface-hub-cache <HUGGINGFACE_HUB_CACHE>
The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance
[env: HUGGINGFACE_HUB_CACHE=]
```
## WEIGHTS_CACHE_OVERRIDE
```shell
--weights-cache-override <WEIGHTS_CACHE_OVERRIDE>
The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance
[env: WEIGHTS_CACHE_OVERRIDE=]
```
## DISABLE_CUSTOM_KERNELS
```shell
--disable-custom-kernels
For some models (like bloom), text-generation-inference implemented custom cuda kernels to speed up inference. Those kernels were only tested on A100. Use this flag to disable them if you're running on different hardware and encounter issues
[env: DISABLE_CUSTOM_KERNELS=]
```
## CUDA_MEMORY_FRACTION
```shell
--cuda-memory-fraction <CUDA_MEMORY_FRACTION>
Limit the CUDA available memory. The allowed value equals the total visible memory multiplied by cuda-memory-fraction
[env: CUDA_MEMORY_FRACTION=]
[default: 1.0]
```
## ROPE_SCALING
```shell
--rope-scaling <ROPE_SCALING>
Rope scaling will only be used for RoPE models and allow rescaling the position rotary to accomodate for larger prompts.
Goes together with `rope_factor`.
`--rope-factor 2.0` gives linear scaling with a factor of 2.0 `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0 `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed basically)
`--rope-scaling linear --rope-factor` fully describes the scaling you want
[env: ROPE_SCALING=]
[possible values: linear, dynamic]
```
## ROPE_FACTOR
```shell
--rope-factor <ROPE_FACTOR>
Rope scaling will only be used for RoPE models See `rope_scaling`
[env: ROPE_FACTOR=]
```
## JSON_OUTPUT
```shell
--json-output
Outputs the logs in JSON format (useful for telemetry)
[env: JSON_OUTPUT=]
```
## OTLP_ENDPOINT
```shell
--otlp-endpoint <OTLP_ENDPOINT>
[env: OTLP_ENDPOINT=]
```
## OTLP_SERVICE_NAME
```shell
--otlp-service-name <OTLP_SERVICE_NAME>
[env: OTLP_SERVICE_NAME=]
[default: text-generation-inference.router]
```
## CORS_ALLOW_ORIGIN
```shell
--cors-allow-origin <CORS_ALLOW_ORIGIN>
[env: CORS_ALLOW_ORIGIN=]
```
## API_KEY
```shell
--api-key <API_KEY>
[env: API_KEY=]
```
## WATERMARK_GAMMA
```shell
--watermark-gamma <WATERMARK_GAMMA>
[env: WATERMARK_GAMMA=]
```
## WATERMARK_DELTA
```shell
--watermark-delta <WATERMARK_DELTA>
[env: WATERMARK_DELTA=]
```
## NGROK
```shell
--ngrok
Enable ngrok tunneling
[env: NGROK=]
```
## NGROK_AUTHTOKEN
```shell
--ngrok-authtoken <NGROK_AUTHTOKEN>
ngrok authentication token
[env: NGROK_AUTHTOKEN=]
```
## NGROK_EDGE
```shell
--ngrok-edge <NGROK_EDGE>
ngrok edge
[env: NGROK_EDGE=]
```
## TOKENIZER_CONFIG_PATH
```shell
--tokenizer-config-path <TOKENIZER_CONFIG_PATH>
The path to the tokenizer config file. This path is used to load the tokenizer configuration which may include a `chat_template`. If not provided, the default config will be used from the model hub
[env: TOKENIZER_CONFIG_PATH=]
```
## DISABLE_GRAMMAR_SUPPORT
```shell
--disable-grammar-support
Disable outlines grammar constrained generation. This is a feature that allows you to generate text that follows a specific grammar
[env: DISABLE_GRAMMAR_SUPPORT=]
```
## ENV
```shell
-e, --env
Display a lot of information about your runtime environment
```
## MAX_CLIENT_BATCH_SIZE
```shell
--max-client-batch-size <MAX_CLIENT_BATCH_SIZE>
Control the maximum number of inputs that a client can send in a single request
[env: MAX_CLIENT_BATCH_SIZE=]
[default: 4]
```
## LORA_ADAPTERS
```shell
--lora-adapters <LORA_ADAPTERS>
Lora Adapters a list of adapter ids i.e. `repo/adapter1,repo/adapter2` to load during startup that will be available to callers via the `adapter_id` field in a request
[env: LORA_ADAPTERS=]
```
## USAGE_STATS
```shell
--usage-stats <USAGE_STATS>
Control if anonymous usage stats are collected. Options are "on", "off" and "no-stack" Defaul is on
[env: USAGE_STATS=]
[default: on]
Possible values:
- on: Default option, usage statistics are collected anonymously
- off: Disables all collection of usage statistics
- no-stack: Doesn't send the error stack trace or error type, but allows sending a crash event
```
## PAYLOAD_LIMIT
```shell
--payload-limit <PAYLOAD_LIMIT>
Payload size limit in bytes
Default is 2MB
[env: PAYLOAD_LIMIT=]
[default: 2000000]
```
## ENABLE_PREFILL_LOGPROBS
```shell
--enable-prefill-logprobs
Enables prefill logprobs
Logprobs in the prompt are deactivated by default because they consume a large amount of VRAM (especially for long prompts). Using this flag reallows users to ask for them.
[env: ENABLE_PREFILL_LOGPROBS=]
```
## HELP
```shell
-h, --help
Print help (see a summary with '-h')
```
## VERSION
```shell
-V, --version
Print version
```
| text-generation-inference/docs/source/reference/launcher.md/0 | {
"file_path": "text-generation-inference/docs/source/reference/launcher.md",
"repo_id": "text-generation-inference",
"token_count": 7683
} |
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"text": " A Beginner’s Guide\nDeep learning is a subset"
},
{
"finish_reason": "length",
"index": 1,
"logprobs": null,
"text": " This is a question that has puzzled many people for"
},
{
"finish_reason": "length",
"index": 3,
"logprobs": null,
"text": "usculas_minusculas(s):\n \"\"\"\n"
},
{
"finish_reason": "length",
"index": 2,
"logprobs": null,
"text": " Paris\nWhat is the capital of France?\nThe"
}
],
"created": 1725877154,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "text_completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 40,
"prompt_tokens": 22,
"total_tokens": 62
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json",
"repo_id": "text-generation-inference",
"token_count": 434
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 34564,
"logprob": -1.765625,
"special": false,
"text": "Deep"
},
{
"id": 6975,
"logprob": -0.023864746,
"special": false,
"text": " learning"
},
{
"id": 374,
"logprob": -0.1060791,
"special": false,
"text": " is"
},
{
"id": 264,
"logprob": -0.1940918,
"special": false,
"text": " a"
},
{
"id": 27084,
"logprob": -0.79785156,
"special": false,
"text": " subset"
},
{
"id": 315,
"logprob": -0.008262634,
"special": false,
"text": " of"
},
{
"id": 5780,
"logprob": -0.046569824,
"special": false,
"text": " machine"
},
{
"id": 6975,
"logprob": -0.0023479462,
"special": false,
"text": " learning"
},
{
"id": 430,
"logprob": -0.7626953,
"special": false,
"text": " that"
},
{
"id": 5829,
"logprob": -1.0107422,
"special": false,
"text": " uses"
}
],
"top_tokens": null
},
"generated_text": "Deep learning is a subset of machine learning that uses"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_wna16_int_24/test_compressed_tensors_wna16_int_24.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_wna16_int_24/test_compressed_tensors_wna16_int_24.json",
"repo_id": "text-generation-inference",
"token_count": 868
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 7539,
"logprob": -0.609375,
"special": false,
"text": " forms"
},
{
"id": 708,
"logprob": 0.0,
"special": false,
"text": " are"
},
{
"id": 671,
"logprob": -1.5546875,
"special": false,
"text": " an"
},
{
"id": 8727,
"logprob": 0.0,
"special": false,
"text": " essential"
},
{
"id": 1702,
"logprob": 0.0,
"special": false,
"text": " part"
},
{
"id": 576,
"logprob": 0.0,
"special": false,
"text": " of"
},
{
"id": 573,
"logprob": 0.0,
"special": false,
"text": " the"
},
{
"id": 11859,
"logprob": -1.953125,
"special": false,
"text": " lab"
},
{
"id": 2185,
"logprob": -1.7734375,
"special": false,
"text": " process"
},
{
"id": 235265,
"logprob": 0.0,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": "Test request forms are an essential part of the lab process."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 849
} |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5351562,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5566406,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2519531,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.03414917,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.96240234,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.3647461,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.012901306,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.1542969,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json",
"repo_id": "text-generation-inference",
"token_count": 4045
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 25584,
"logprob": 0.0,
"special": false,
"text": "Grad"
},
{
"id": 993,
"logprob": 0.0,
"special": false,
"text": "ient"
},
{
"id": 2726,
"logprob": 0.0,
"special": false,
"text": " Des"
},
{
"id": 1760,
"logprob": 0.0,
"special": false,
"text": "cent"
},
{
"id": 313,
"logprob": -0.12322998,
"special": false,
"text": " ("
},
{
"id": 29954,
"logprob": 0.0,
"special": false,
"text": "G"
},
{
"id": 29928,
"logprob": 0.0,
"special": false,
"text": "D"
},
{
"id": 29897,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 338,
"logprob": -0.6040039,
"special": false,
"text": " is"
},
{
"id": 385,
"logprob": -0.1796875,
"special": false,
"text": " an"
}
],
"top_tokens": null
},
"generated_text": "What is gradient descent?\nGradient Descent (GD) is an"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi35_moe/test_flash_phi35_moe_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi35_moe/test_flash_phi35_moe_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 849
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 19,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 415,
"logprob": -0.03665161,
"special": false,
"text": " The"
},
{
"id": 12072,
"logprob": -0.13549805,
"special": false,
"text": " cow"
},
{
"id": 349,
"logprob": -0.05819702,
"special": false,
"text": " is"
},
{
"id": 6328,
"logprob": -0.6826172,
"special": false,
"text": " standing"
},
{
"id": 356,
"logprob": -0.1607666,
"special": false,
"text": " on"
},
{
"id": 272,
"logprob": -0.5073242,
"special": false,
"text": " the"
},
{
"id": 10305,
"logprob": -0.016418457,
"special": false,
"text": " beach"
},
{
"id": 304,
"logprob": -1.3916016,
"special": false,
"text": " and"
},
{
"id": 272,
"logprob": -0.020217896,
"special": false,
"text": " the"
},
{
"id": 13088,
"logprob": -0.0028133392,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.003145218,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.37060547,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.034851074,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.2878418,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.046051025,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00028848648,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.025772095,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.018127441,
"special": false,
"text": "."
},
{
"id": 32002,
"logprob": -0.00019824505,
"special": true,
"text": "<end_of_utterance>"
}
],
"top_tokens": null
},
"generated_text": " The cow is standing on the beach and the chicken is sitting on a pile of money."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_two_images.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_two_images.json",
"repo_id": "text-generation-inference",
"token_count": 1559
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 5,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": 0,
"tokens": [
{
"id": 926,
"logprob": -4.3554688,
"special": false,
"text": " To"
},
{
"id": 18295,
"logprob": -7.7734375,
"special": false,
"text": " sell"
},
{
"id": 7868,
"logprob": -3.9257812,
"special": false,
"text": " things"
},
{
"id": 260,
"logprob": -2.4179688,
"special": false,
"text": "."
},
{
"id": 1,
"logprob": 0.0,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "To sell things."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json",
"repo_id": "text-generation-inference",
"token_count": 532
} |
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "I am an AI assistant",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1728497062,
"id": "",
"model": "meta-llama/Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.4.2-dev0-native",
"usage": {
"completion_tokens": 23,
"prompt_tokens": 604,
"total_tokens": 627
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information.json",
"repo_id": "text-generation-inference",
"token_count": 275
} |
import pytest
import requests
@pytest.fixture(scope="module")
def llama_continue_final_message_handle(launcher):
with launcher("TinyLlama/TinyLlama-1.1B-Chat-v1.0") as handle:
yield handle
@pytest.fixture(scope="module")
async def llama_continue_final_message(llama_continue_final_message_handle):
await llama_continue_final_message_handle.health(300)
return llama_continue_final_message_handle.client
def test_llama_completion_single_prompt(
llama_continue_final_message, response_snapshot
):
response = requests.post(
f"{llama_continue_final_message.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{"role": "system", "content": "system message"},
{"role": "user", "content": "Which is bigger an elephant or a mouse?"},
],
"max_tokens": 30,
"stream": False,
"seed": 1337,
},
headers=llama_continue_final_message.headers,
stream=False,
)
response = response.json()
print(response)
assert len(response["choices"]) == 1
content = response["choices"][0]["message"]["content"]
assert (
content
== "Both an elephant and a mouse are mammals. However, the differences between elephants and mice are:\n\n1"
)
assert response == response_snapshot
def test_llama_completion_single_prompt_continue(
llama_continue_final_message, response_snapshot
):
response = requests.post(
f"{llama_continue_final_message.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{"role": "system", "content": "system message"},
{"role": "user", "content": "Which is bigger an elephant or a mouse?"},
{
"role": "assistant",
"content": "the elephant, but have you heard about",
},
],
"max_tokens": 30,
"stream": False,
"seed": 1337,
},
headers=llama_continue_final_message.headers,
stream=False,
)
response = response.json()
print(response)
assert len(response["choices"]) == 1
content = response["choices"][0]["message"]["content"]
assert (
content
== " the royal mouse? It is a little more slender and only weighs around 1.5 pounds for males and 1.3 pounds"
)
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_continue_final_message.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_continue_final_message.py",
"repo_id": "text-generation-inference",
"token_count": 1102
} |
import pytest
@pytest.fixture(scope="module")
def flash_llama_marlin24_handle(launcher):
with launcher(
"nm-testing/Llama-2-7b-pruned2.4-Marlin_24", quantize="marlin"
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_marlin(flash_llama_marlin24_handle):
await flash_llama_marlin24_handle.health(300)
return flash_llama_marlin24_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin(flash_llama_marlin, response_snapshot):
response = await flash_llama_marlin.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin24_all_params(flash_llama_marlin, response_snapshot):
response = await flash_llama_marlin.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin24_load(
flash_llama_marlin, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_marlin, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama_marlin_24.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_marlin_24.py",
"repo_id": "text-generation-inference",
"token_count": 754
} |
import pytest
@pytest.fixture(scope="module")
def flash_santacoder_handle(launcher):
with launcher("bigcode/santacoder") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_santacoder(flash_santacoder_handle):
await flash_santacoder_handle.health(300)
return flash_santacoder_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_santacoder(flash_santacoder, response_snapshot):
response = await flash_santacoder.generate(
"def print_hello", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_santacoder_load(
flash_santacoder, generate_load, response_snapshot
):
responses = await generate_load(
flash_santacoder, "def print_hello", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_santacoder.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_santacoder.py",
"repo_id": "text-generation-inference",
"token_count": 403
} |
import pytest
@pytest.fixture(scope="module")
def neox_handle(launcher):
with launcher(
"stabilityai/stablelm-tuned-alpha-3b", num_shard=1, use_flash_attention=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def neox(neox_handle):
await neox_handle.health(300)
return neox_handle.client
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox(neox, response_snapshot):
response = await neox.generate(
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox_load(neox, generate_load, response_snapshot):
responses = await generate_load(
neox,
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_neox.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_neox.py",
"repo_id": "text-generation-inference",
"token_count": 515
} |
import argparse
import datetime
import json
import os
import traceback
from typing import Dict, Tuple, List
import GPUtil
import docker
from docker.models.containers import Container
from loguru import logger
import pandas as pd
class InferenceEngineRunner:
def __init__(self, model: str):
self.model = model
def run(self, parameters: list[tuple], gpus: int = 0):
NotImplementedError("This method should be implemented by the subclass")
def stop(self):
NotImplementedError("This method should be implemented by the subclass")
class TGIDockerRunner(InferenceEngineRunner):
def __init__(
self,
model: str,
image: str = "ghcr.io/huggingface/text-generation-inference:latest",
volumes=None,
):
super().__init__(model)
if volumes is None:
volumes = []
self.container = None
self.image = image
self.volumes = volumes
def run(self, parameters: list[tuple], gpus: int = 0):
params = f"--model-id {self.model} --port 8080"
for p in parameters:
params += f" --{p[0]} {str(p[1])}"
logger.info(f"Running TGI with parameters: {params}")
volumes = {}
for v in self.volumes:
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
self.container = run_docker(
self.image,
params,
"Connected",
"ERROR",
volumes=volumes,
gpus=gpus,
ports={"8080/tcp": 8080},
)
def stop(self):
if self.container:
self.container.stop()
class BenchmarkRunner:
def __init__(
self,
image: str = "ghcr.io/huggingface/text-generation-inference-benchmark:latest",
volumes: List[Tuple[str, str]] = None,
):
if volumes is None:
volumes = []
self.container = None
self.image = image
self.volumes = volumes
def run(self, parameters: list[tuple], network_mode):
params = "text-generation-inference-benchmark"
for p in parameters:
params += f" --{p[0]} {str(p[1])}" if p[1] is not None else f" --{p[0]}"
logger.info(
f"Running text-generation-inference-benchmarks with parameters: {params}"
)
volumes = {}
for v in self.volumes:
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
self.container = run_docker(
self.image,
params,
"Benchmark finished",
"Fatal:",
volumes=volumes,
extra_env={
"RUST_LOG": "text_generation_inference_benchmark=info",
"RUST_BACKTRACE": "full",
},
network_mode=network_mode,
)
def stop(self):
if self.container:
self.container.stop()
def run_docker(
image: str,
args: str,
success_sentinel: str,
error_sentinel: str,
ports: Dict[str, int] = None,
volumes=None,
network_mode: str = "bridge",
gpus: int = 0,
extra_env: Dict[str, str] = None,
) -> Container:
if ports is None:
ports = {}
if volumes is None:
volumes = {}
if extra_env is None:
extra_env = {}
client = docker.from_env(timeout=300)
# retrieve the GPU devices from CUDA_VISIBLE_DEVICES
devices = [f"{i}" for i in range(get_num_gpus())][:gpus]
environment = {"HF_TOKEN": os.environ.get("HF_TOKEN")}
environment.update(extra_env)
container = client.containers.run(
image,
args,
detach=True,
device_requests=(
[docker.types.DeviceRequest(device_ids=devices, capabilities=[["gpu"]])]
if gpus > 0
else None
),
volumes=volumes,
shm_size="1g",
ports=ports,
network_mode=network_mode,
environment=environment,
)
for line in container.logs(stream=True):
print(line.decode("utf-8"), end="")
if success_sentinel.encode("utf-8") in line:
break
if error_sentinel.encode("utf-8") in line:
container.stop()
raise Exception(f"Error starting container: {line}")
return container
def get_gpu_names() -> str:
gpus = GPUtil.getGPUs()
if len(gpus) == 0:
return ""
return f'{len(gpus)}x{gpus[0].name if gpus else "No GPU available"}'
def get_gpu_name() -> str:
gpus = GPUtil.getGPUs()
if len(gpus) == 0:
return ""
return gpus[0].name
def get_num_gpus() -> int:
return len(GPUtil.getGPUs())
def build_df(model: str, data_files: dict[str, str]) -> pd.DataFrame:
df = pd.DataFrame()
now = datetime.datetime.now(datetime.timezone.utc)
created_at = now.isoformat() # '2024-10-02T11:53:17.026215+00:00'
# Load the results
for key, filename in data_files.items():
with open(filename, "r") as f:
data = json.load(f)
for result in data["results"]:
entry = result
[config] = pd.json_normalize(result["config"]).to_dict(orient="records")
entry.update(config)
entry["engine"] = data["config"]["meta"]["engine"]
entry["tp"] = data["config"]["meta"]["tp"]
entry["version"] = data["config"]["meta"]["version"]
entry["model"] = model
entry["created_at"] = created_at
del entry["config"]
df = pd.concat([df, pd.DataFrame(entry, index=[0])])
return df
def main(sha, results_file):
results_dir = "results"
# get absolute path
results_dir = os.path.join(os.path.dirname(__file__), results_dir)
logger.info("Starting benchmark")
models = [
("meta-llama/Llama-3.1-8B-Instruct", 1),
# ('meta-llama/Llama-3.1-70B-Instruct', 4),
# ('mistralai/Mixtral-8x7B-Instruct-v0.1', 2),
]
success = True
for model in models:
tgi_runner = TGIDockerRunner(model[0])
# create results directory
model_dir = os.path.join(
results_dir, f'{model[0].replace("/", "_").replace(".", "_")}'
)
os.makedirs(model_dir, exist_ok=True)
runner = BenchmarkRunner(
volumes=[(model_dir, "/opt/text-generation-inference-benchmark/results")]
)
try:
tgi_runner.run([("max-concurrent-requests", 512)], gpus=model[1])
logger.info(f"TGI started for model {model[0]}")
parameters = [
("tokenizer-name", model[0]),
("max-vus", 800),
("url", "http://localhost:8080"),
("duration", "120s"),
("warmup", "30s"),
("benchmark-kind", "rate"),
(
"prompt-options",
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
),
(
"decode-options",
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
),
(
"extra-meta",
f'"engine=TGI,tp={model[1]},version={sha},gpu={get_gpu_name()}"',
),
("no-console", None),
]
rates = [("rates", f"{r / 10.}") for r in list(range(8, 248, 8))]
parameters.extend(rates)
runner.run(parameters, f"container:{tgi_runner.container.id}")
except Exception as e:
logger.error(f"Error running benchmark for model {model[0]}: {e}")
# print the stack trace
print(traceback.format_exc())
success = False
finally:
tgi_runner.stop()
runner.stop()
if not success:
logger.error("Some benchmarks failed")
exit(1)
df = pd.DataFrame()
# list recursively directories
directories = [
f"{results_dir}/{d}"
for d in os.listdir(results_dir)
if os.path.isdir(f"{results_dir}/{d}")
]
logger.info(f"Found result directories: {directories}")
for directory in directories:
data_files = {}
for filename in os.listdir(directory):
if filename.endswith(".json"):
data_files[filename.split(".")[-2]] = f"{directory}/{filename}"
logger.info(f"Processing directory {directory}")
df = pd.concat([df, build_df(directory.split("/")[-1], data_files)])
df["device"] = get_gpu_name()
df["error_rate"] = (
df["failed_requests"]
/ (df["failed_requests"] + df["successful_requests"])
* 100.0
)
df.to_parquet(results_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sha", help="SHA of the commit to add to the results", required=True
)
parser.add_argument(
"--results-file",
help="The file where to store the results, can be a local file or a s3 path",
)
args = parser.parse_args()
if args.results_file is None:
results_file = f"{args.sha}.parquet"
else:
results_file = args.results_file
main(args.sha, results_file)
| text-generation-inference/load_tests/benchmarks.py/0 | {
"file_path": "text-generation-inference/load_tests/benchmarks.py",
"repo_id": "text-generation-inference",
"token_count": 4444
} |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import torch
extra_cuda_cflags = []
extra_cflags = []
if torch.version.hip:
extra_cflags = ["-DLEGACY_HIPBLAS_DIRECT=ON"]
extra_cuda_cflags = ["-DLEGACY_HIPBLAS_DIRECT=ON"]
extra_compile_args = {
"cxx": extra_cflags,
"nvcc": extra_cuda_cflags,
}
setup(
name="exllama_kernels",
ext_modules=[
CUDAExtension(
name="exllama_kernels",
sources=[
"exllama_kernels/exllama_ext.cpp",
"exllama_kernels/cuda_buffers.cu",
"exllama_kernels/cuda_func/column_remap.cu",
"exllama_kernels/cuda_func/q4_matmul.cu",
"exllama_kernels/cuda_func/q4_matrix.cu",
],
extra_compile_args=extra_compile_args,
)
],
cmdclass={"build_ext": BuildExtension},
)
| text-generation-inference/server/exllama_kernels/setup.py/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/setup.py",
"repo_id": "text-generation-inference",
"token_count": 470
} |
#ifndef _qdq_8_cuh
#define _qdq_8_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_8BIT == 1
// Not implemented
#else
__forceinline__ __device__ void shuffle_8bit_4
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_8bit_8
(
const uint32_t q_0,
const uint32_t q_1,
half2 (&dq)[4],
int stride
)
{
half dqh[8];
for (int i = 0; i < 4; i++) dqh[i ] = dq_ns(exb(q_0, i * 8, 0xff), 128);
for (int i = 0; i < 4; i++) dqh[i + 4] = dq_ns(exb(q_1, i * 8, 0xff), 128);
for (int i = 0; i < 4; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
#endif
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_8.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_8.cuh",
"repo_id": "text-generation-inference",
"token_count": 337
} |
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch
@pytest.fixture(scope="session")
def mt0_small_tokenizer():
tokenizer = AutoTokenizer.from_pretrained(
"bigscience/mt0-small", padding_side="left"
)
tokenizer.bos_token_id = 0
return tokenizer
@pytest.fixture(scope="session")
def default_seq2seq_lm():
return Seq2SeqLM.fallback("bigscience/mt0-small")
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer):
return Seq2SeqLMBatch.from_pb(
default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
return Seq2SeqLMBatch.from_pb(
batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch):
batch = default_seq2seq_lm_batch
sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert batch.input_ids.shape == (default_pb_batch.size, sequence_length)
assert batch.input_ids[0][-2] == 4268
assert batch.input_ids[0][-1] == 1
assert torch.all(batch.input_ids[0][:-2] == 0)
assert torch.all(batch.attention_mask[0][-2:] == 1)
assert torch.all(batch.attention_mask[0][:-2] == 0)
assert len(batch.decoder_input_ids) == default_pb_batch.size
assert batch.decoder_attention_mask is None
assert batch.encoder_last_hidden_state is None
assert batch.past_key_values is None
assert batch.input_lengths == [2]
assert batch.decoder_input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
assert batch.max_decoder_input_length == batch.decoder_input_lengths[0]
def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch):
with pytest.raises(ValueError):
Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch])
def test_seq2seq_lm_batch_type(default_seq2seq_lm):
assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch
def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch):
sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
generations, next_batch, _ = default_seq2seq_lm.generate_token(
default_seq2seq_lm_batch
)
assert len(generations) == len(next_batch)
assert isinstance(next_batch, Seq2SeqLMBatch)
assert next_batch.input_ids is None
assert torch.equal(
next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask
)
assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths
assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length
assert (
next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers
)
assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias
assert len(next_batch.decoder_input_ids) == len(next_batch)
assert next_batch.all_decoder_input_ids[0][0] == 0
assert next_batch.all_decoder_input_ids[0][1] == 259
assert next_batch.decoder_attention_mask is None
assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512)
assert next_batch.decoder_input_lengths == [2]
assert next_batch.max_decoder_input_length == 2
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
)
assert all(
[
p[2].shape == (len(next_batch), 6, sequence_length, 64)
for p in next_batch.past_key_values
]
)
assert all(
[
p[3].shape == (len(next_batch), 6, sequence_length, 64)
for p in next_batch.past_key_values
]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 259
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == " "
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_seq2seq_lm_generate_token_completion(
default_seq2seq_lm, default_seq2seq_lm_batch
):
next_batch = default_seq2seq_lm_batch
for _ in range(6):
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == "a few weeks"
assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
assert generations[0].generated_text.generated_tokens == 7
def test_seq2seq_lm_generate_token_completion_multi(
default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch
):
next_batch = default_multi_requests_seq2seq_lm_batch
for i in range(4):
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == "a few "
assert (
generations[1].request_id
== default_multi_requests_seq2seq_lm_batch.requests[1].id
)
assert generations[1].generated_text.generated_tokens == 5
next_batch = next_batch.filter([next_batch.requests[0].id])
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == "a few weeks"
assert (
generations[0].request_id
== default_multi_requests_seq2seq_lm_batch.requests[0].id
)
assert generations[0].generated_text.generated_tokens == 7
def test_batch_concatenate(
default_seq2seq_lm,
default_seq2seq_lm_batch,
default_multi_requests_seq2seq_lm_batch,
):
next_batch_0 = default_seq2seq_lm_batch
_, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
_, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_seq2seq_lm_batch
_, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1)
# Copy hidden state because it is removed from the concatenated branches
next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state
next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
[t.clone() for t in layer] for layer in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
[t.clone() for t in layer] for layer in next_batch_1.past_key_values
]
next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1])
assert next_batch.batch_id == 0
assert torch.equal(
next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0]
)
assert next_batch.all_decoder_input_ids[1][0] == 0
assert next_batch.all_decoder_input_ids[2][0] == 0
assert torch.equal(
next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids
)
assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1)
assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0)
assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0)
assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1)
assert torch.equal(
next_batch.encoder_last_hidden_state[0],
next_batch_0_encoder_last_hidden_state[0, -2:],
)
assert torch.equal(
next_batch.encoder_last_hidden_state[1:],
next_batch_1_encoder_last_hidden_state[:, -2:],
)
assert next_batch.input_lengths == [2, 2, 2]
assert next_batch.decoder_input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 2
assert next_batch.max_decoder_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == list(next_batch_1.requests)
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
assert all(
[p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
assert all(
[p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
)
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :]
)
assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :]
)
assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0])
assert torch.equal(
next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:]
)
assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0])
assert torch.equal(
next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:]
)
for _ in range(3):
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == "a few "
assert (
generations[2].request_id
== default_multi_requests_seq2seq_lm_batch.requests[1].id
)
assert generations[2].generated_text.generated_tokens == 5
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[0].generated_text.text == "a few weeks"
assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
assert generations[0].generated_text.generated_tokens == 7
next_batch = next_batch.filter([next_batch.requests[1].id])
generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == "a few weeks"
assert (
generations[0].request_id
== default_multi_requests_seq2seq_lm_batch.requests[0].id
)
assert generations[0].generated_text.generated_tokens == 7
| text-generation-inference/server/tests/models/test_seq2seq_lm.py/0 | {
"file_path": "text-generation-inference/server/tests/models/test_seq2seq_lm.py",
"repo_id": "text-generation-inference",
"token_count": 5528
} |
from text_generation_server.layers.tensor_parallel import (
TensorParallelColumnLinear,
TensorParallelRowLinear,
TensorParallelEmbedding,
)
from text_generation_server.layers.linear import (
get_linear,
FastLinear,
)
from text_generation_server.layers.speculative import SpeculativeHead
# Just to add the `load` methods.
from text_generation_server.layers.layernorm import load_layer_norm
from text_generation_server.layers.conv import load_conv2d
from text_generation_server.layers.lora import (
LoraLinear,
TensorParallelMultiAdapterLinear,
TensorParallelAdapterRowLinear,
)
__all__ = [
"get_linear",
"FastLinear",
"TensorParallelColumnLinear",
"TensorParallelRowLinear",
"TensorParallelEmbedding",
"SpeculativeHead",
"LoraLinear",
"TensorParallelMultiAdapterLinear",
"TensorParallelAdapterRowLinear",
"load_layer_norm",
"load_conv2d",
]
| text-generation-inference/server/text_generation_server/layers/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 346
} |
from typing import List, Optional, Union, TypeVar
from dataclasses import dataclass
from loguru import logger
import torch
from compressed_tensors.quantization import QuantizationArgs, QuantizationType
from text_generation_server.layers.fp8 import _load_scalar_or_matrix_scale
from text_generation_server.utils.log import log_once
from text_generation_server.utils.weights import Weight, Weights, WeightsLoader
try:
import marlin_kernels
except ImportError:
marlin_kernels = None
class W8A8IntLoader(WeightsLoader):
"""
Loader for w8a8 integer compressed-tensors parameters.
"""
def __init__(
self,
*,
input_args: Optional[QuantizationArgs],
weight_args: QuantizationArgs,
):
if weight_args.type != QuantizationType.INT and weight_args.num_bits != 8:
raise ValueError(
f"{type(self).__name__} only supports w8a8 int checkpoints"
)
if not weight_args.symmetric:
raise ValueError("Checkpoints with asymmetric weights are not supported")
self.load_weight_scale = not weight_args.dynamic
if input_args is not None:
self.input_symmetric = input_args.symmetric
if not input_args.dynamic:
log_once(
logger.warning,
"Forcing dynamic input quantization for compressed_tensors w8a8 int checkpoint (for better accuracy).",
)
else:
self.input_symmetric = True
def __str__(self) -> str:
def scale_to_str(scale):
return "static" if scale else "dynamic"
def symmetric_to_str(symmetric):
return "symmetric" if symmetric else "asymmetric"
return f"{self.__class__.__name__} (w8a8 int, input: dynamic/{symmetric_to_str(self.input_symmetric)}, weight: {scale_to_str(self.load_weight_scale)}/symmetric))"
def get_weights(self, weights: "Weights", prefix: str):
w = weights.get_tensor(f"{prefix}.weight", to_dtype=False)
weight_scale = None
if self.load_weight_scale:
weight_scale = weights.get_tensor(
f"{prefix}.weight_scale", to_dtype=False
).reshape(-1)
return Int8Weight(
input_symmetric=self.input_symmetric,
weight=w,
weight_scale=weight_scale,
)
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
w = weights.get_packed_sharded(
f"{prefix}.weight", dim=0, block_sizes=block_sizes, to_dtype=False
)
weight_scale = None
if self.load_weight_scale:
weight_scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False)
if weight_scale.numel() > 1:
weight_scale = weights.get_packed_sharded(
f"{prefix}.weight_scale",
dim=0,
block_sizes=block_sizes,
to_dtype=False,
)
weight_scale = weight_scale.reshape(-1)
return Int8Weight(
input_symmetric=self.input_symmetric,
weight=w,
weight_scale=weight_scale,
)
def get_multi_weights_col(self, weights: "Weights", prefixes: List[str], dim: int):
w = [
weights.get_sharded(f"{p}.weight", dim=0, to_dtype=False) for p in prefixes
]
shapes = [x.shape for x in w]
w = torch.cat(w, dim=dim)
weight_scale = None
if self.load_weight_scale:
weight_scale = [
_load_scalar_or_matrix_scale(weights, f"{p}.weight_scale", shape)
for p, shape in zip(prefixes, shapes)
]
weight_scale = torch.cat(weight_scale, dim=0).reshape(-1, 1)
return Int8Weight(
input_symmetric=self.input_symmetric,
weight=w,
weight_scale=weight_scale,
)
def get_weights_row(self, weights: "Weights", prefix: str):
w = weights.get_sharded(f"{prefix}.weight", dim=1, to_dtype=False)
weight_scale = None
if self.load_weight_scale:
weight_scale = weights.get_tensor(
f"{prefix}.weight_scale", to_dtype=False
).reshape(-1)
return Int8Weight(
input_symmetric=self.input_symmetric,
weight=w,
weight_scale=weight_scale,
)
OtherT = TypeVar("OtherT")
def _get_tensor_or_else(
weights: Weights, prefix: str, other: OtherT
) -> Union[torch.Tensor, OtherT]:
# Even if a checkpoint uses e.g. zero-points, they can be elided:
# https://github.com/neuralmagic/compressed-tensors/blob/db6ccb25b265e8370813ecab5e95714a6728b5a6/src/compressed_tensors/compressors/quantized_compressors/base.py#L105
if weights.has_tensor(prefix):
return weights.get_tensor(prefix, to_dtype=False)
else:
return other
@dataclass
class Int8Weight(Weight):
input_symmetric: bool
weight: torch.Tensor
weight_scale: Optional[torch.Tensor]
def get_linear(self, bias: torch.Tensor):
if self.weight_scale is None:
assert marlin_kernels is not None
qweight, weight_scale, _ = marlin_kernels.scaled_int8_quant(self.weight)
return W8A8IntLinear(
bias=bias,
input_symmetric=self.input_symmetric,
weight=qweight,
weight_scale=weight_scale,
)
else:
return W8A8IntLinear(
bias=bias,
input_symmetric=self.input_symmetric,
weight=self.weight,
weight_scale=self.weight_scale,
)
class W8A8IntLinear(torch.nn.Module):
def __init__(
self,
*,
bias: Optional[torch.Tensor],
input_symmetric: bool,
weight: torch.Tensor,
weight_scale: torch.Tensor,
):
super().__init__()
weight_scale = weight_scale.to(torch.float32)
self.bias = bias
self.input_symmetric = input_symmetric
# cutlass kernels require transposed weights.
self.weight = weight.t()
self.weight_scale = weight_scale
if input_symmetric:
self.zero_point_adj = None
else:
# https://github.com/vllm-project/vllm/blob/8d59dbb00044a588cab96bcdc028006ed922eb06/csrc/quantization/cutlass_w8a8/Epilogues.md#scaledepilogueazp
self.zero_point_adj = self.weight.sum(
dim=0, keepdim=True, dtype=torch.int32
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
assert marlin_kernels is not None
qinput, input_scale, input_zero_point = marlin_kernels.scaled_int8_quant(
input=input,
scale=None,
azp=None,
symmetric=self.input_symmetric,
)
if self.input_symmetric:
return marlin_kernels.cutlass_scaled_mm(
a=qinput,
b=self.weight,
scale_a=input_scale,
scale_b=self.weight_scale,
out_dtype=input.dtype,
bias=self.bias,
)
else:
assert (
self.zero_point_adj is not None
and input_scale is not None
and (self.input_symmetric or input_zero_point is not None)
)
return marlin_kernels.cutlass_scaled_mm_azp(
a=qinput,
b=self.weight,
scale_a=input_scale,
scale_b=self.weight_scale,
out_dtype=input.dtype,
azp_adj=self.zero_point_adj,
azp=input_zero_point,
bias=self.bias,
)
| text-generation-inference/server/text_generation_server/layers/compressed_tensors/w8a8_int.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/compressed_tensors/w8a8_int.py",
"repo_id": "text-generation-inference",
"token_count": 3942
} |
import torch
from torch import nn
from accelerate import init_empty_weights
from text_generation_server.utils.import_utils import (
SYSTEM,
)
# Monkey patching
@classmethod
def load_layer_norm(cls, prefix, weights, eps):
weight = weights.get_tensor(f"{prefix}.weight")
bias = weights.get_tensor(f"{prefix}.bias")
with init_empty_weights():
ln = cls(weight.shape, eps=eps)
ln.weight = torch.nn.Parameter(weight)
ln.bias = torch.nn.Parameter(bias)
return ln
@classmethod
def load_layer_norm_no_bias(cls, prefix, weights, eps):
weight = weights.get_tensor(f"{prefix}.weight")
with init_empty_weights():
ln = cls(weight.shape, eps=eps)
ln.weight = torch.nn.Parameter(weight)
ln.bias = None
return ln
torch.nn.LayerNorm.load = load_layer_norm
torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias
if SYSTEM == "cuda":
import dropout_layer_norm
class FastLayerNorm(nn.LayerNorm):
def forward(self, hidden_states, residual=None):
if hidden_states.shape[-1] > 8192:
if residual is not None:
hidden_states += residual
residual = hidden_states
return super(FastLayerNorm, self).forward(hidden_states), residual
else:
(
normed_hidden_states,
residual,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
self.bias,
None,
None,
None,
None,
0.0,
self.eps,
1.0,
0,
None,
False,
False,
)
if residual is None:
residual = hidden_states
return normed_hidden_states, residual
elif SYSTEM == "rocm":
import vllm._custom_ops as ops
class FastLayerNorm(nn.LayerNorm):
def forward(self, hidden_states, residual=None):
if residual is not None:
hidden_states += residual
residual = hidden_states
return super().forward(hidden_states), residual
elif SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
class FastLayerNorm(nn.LayerNorm):
def forward(self, hidden_states, residual=None):
out = ipex.llm.functional.add_layer_norm(
residual,
hidden_states,
self.weight,
self.bias,
self.eps,
residual is not None,
)
return out, residual if residual is not None else hidden_states
class FastRMSNorm(nn.Module):
def __init__(self, weight: torch.Tensor, eps: float):
super().__init__()
self.weight = nn.Parameter(weight)
self.variance_epsilon = eps
@classmethod
def load(cls, prefix, weights, eps=1e-6):
weight = weights.get_tensor(f"{prefix}.weight")
return cls(weight, eps)
def forward(self, hidden_states, residual=None):
if SYSTEM == "ipex":
out = ipex.llm.functional.add_rms_norm(
residual,
hidden_states,
self.weight,
None,
self.variance_epsilon,
residual is not None,
)
return out, residual if residual is not None else hidden_states
elif SYSTEM == "rocm":
# We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not.
if residual is not None:
ops.fused_add_rms_norm(
hidden_states,
residual,
self.weight.data,
self.variance_epsilon,
)
return hidden_states, residual
residual = hidden_states
out = torch.empty_like(hidden_states)
ops.rms_norm(
out,
hidden_states,
self.weight.data,
self.variance_epsilon,
)
return out, residual
elif hidden_states.shape[-1] > 8192:
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(
variance + self.variance_epsilon
)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states, residual
elif SYSTEM == "cuda":
# faster post attention rms norm
(
normed_hidden_states,
res,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
None,
None,
None,
None,
None,
0.0,
self.variance_epsilon,
1.0,
0,
None,
False,
True, # Activate RMSNorm
)
if res is None:
res = hidden_states
return normed_hidden_states, res
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
| text-generation-inference/server/text_generation_server/layers/layernorm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/layernorm.py",
"repo_id": "text-generation-inference",
"token_count": 3189
} |
import torch
import json
from typing import Tuple, Optional
from text_generation_server.layers.tensor_parallel import TensorParallelHead
from text_generation_server.layers.medusa import MedusaHeadV1, MedusaHeadV2
from text_generation_server.layers.mlp import MLPSpeculatorHead
class SpeculativeHead(torch.nn.Module):
def __init__(self, lm_head, speculator):
super().__init__()
self.head = lm_head
self.speculator = speculator
@staticmethod
def load(config, prefix: str, weights):
speculator = config.speculator
if speculator:
speculator_path = config.speculator["path"]
speculator_config = str(speculator_path / "config.json")
with open(speculator_config, "r") as f:
speculator_config = json.load(f)
config.speculator_config = speculator_config
try:
architecture = speculator_config["architectures"][0]
if architecture == "MLPSpeculatorPreTrainedModel":
speculator = MLPSpeculatorHead.load(config, prefix, weights)
else:
speculator = None
except KeyError:
try:
speculator = MedusaHeadV1.load(config, prefix, weights)
except Exception:
speculator = MedusaHeadV2(config, prefix, weights)
lm_head = None
else:
lm_head = TensorParallelHead.load(config, prefix, weights)
speculator = None
return SpeculativeHead(lm_head, speculator)
def forward(
self, input: torch.Tensor
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
if self.speculator is not None:
return self.speculator(input)
assert self.head is not None
logits = self.head(input)
return logits, None
| text-generation-inference/server/text_generation_server/layers/speculative.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/speculative.py",
"repo_id": "text-generation-inference",
"token_count": 851
} |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from typing import List, Optional, Tuple, Type
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from text_generation_server.layers.attention import (
KVCache,
get_kv_scales,
)
from text_generation_server.layers.moe import DenseMoELayer, MoELayer, SparseMoELayer
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
TensorParallelMultiAdapterLinear,
TensorParallelAdapterRowLinear,
)
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.layers.layernorm import (
FastRMSNorm,
FastLayerNorm,
)
from text_generation_server.layers import (
FastLinear,
)
from text_generation_server.utils.weights import (
Weights,
)
from text_generation_server.layers.fp8 import HybridFP8UnquantLoader
if SYSTEM != "ipex":
pass
if SYSTEM == "rocm":
try:
import vllm._custom_ops as ops
except Exception as e:
raise ImportError(f"Could not load `vllm._custom_ops`. Full error: {e}")
def load_attention(config, prefix: str, weights, layer_id):
# Only defined in granite.
bias = getattr(config, "attention_bias", False)
head_size = config.hidden_size // config.num_attention_heads
sizes = None
prefixes = None
if config.model_type == "phi3":
base_layer = TensorParallelColumnLinear.load_qkv(
config,
prefix=f"{prefix}.qkv_proj",
weights=weights,
bias=bias,
num_heads=config.num_attention_heads,
num_key_value_heads=config.num_key_value_heads,
)
prefixes = ["qkv_proj"]
elif config.model_type == "baichuan":
prefix = f"{prefix}.W_pack"
base_layer = TensorParallelColumnLinear.load_qkv(
config,
prefix=prefix,
weights=weights,
bias=bias,
num_heads=config.num_attention_heads,
num_key_value_heads=config.num_key_value_heads,
)
prefixes = [prefix]
else:
prefixes = ["q_proj", "k_proj", "v_proj"]
sizes = [
head_size * config.num_attention_heads,
head_size * config.num_key_value_heads,
head_size * config.num_key_value_heads,
]
base_layer = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=bias,
)
return TensorParallelMultiAdapterLinear.load(
base_layer=base_layer,
layer_id=layer_id,
layer_names=prefixes,
sizes=sizes,
process_group=weights.process_group,
)
@contextmanager
def no_fp8(weights: Weights):
"""De-activate fp8 auto conversion for the duration of this context manager"""
weights_loader = weights.weights_loader
if isinstance(weights_loader, HybridFP8UnquantLoader) and weights_loader.to_fp8:
weights_loader = HybridFP8UnquantLoader(
weights_loader.activation_scale_ub, to_fp8=False
)
with weights.use_loader(weights_loader):
yield
class FlashLlamaAttention(torch.nn.Module):
def __init__(
self,
index: int,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
# Setting defaults for baichuan custom config which doesn't apply them.
config.rope_theta = getattr(config, "rope_theta", 10000)
config.num_key_value_heads = getattr(
config, "num_key_value_heads", config.num_attention_heads
)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
# `config.attention_multiplier` is used in Granite
self.softmax_scale = getattr(
config, "attention_multiplier", self.head_size**-0.5
)
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
if config.num_key_value_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_key_value_heads` must be divisible by `num_shards` (got `num_key_value_heads`: {config.num_key_value_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights, index)
self.index = index
self.kv_scales = get_kv_scales(weights, f"{prefix}")
o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=getattr(config, "attention_bias", False),
)
self.o_proj = TensorParallelAdapterRowLinear.load(
o_proj,
index,
"o_proj",
process_group=weights.process_group,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache: KVCache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
):
qkv = self.query_key_value(hidden_states, adapter_data)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
kv_cache.store(
key=kv[:, 0],
value=kv[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv[:, 0],
value=kv[:, 1],
kv_scales=self.kv_scales,
kv_cache=kv_cache,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), adapter_data
)
class Phi3MoE(nn.Module):
def __init__(
self, prefix: str, config, moe_layer_cls: Type[MoELayer], weights: Weights
):
super().__init__()
# gating
self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False)
self.moe = moe_layer_cls(
prefix=f"{prefix}.experts",
n_experts=config.num_local_experts,
n_expert_group=None,
renormalize=True,
topk=config.num_experts_per_tok,
topk_group=None,
weights=weights,
gate_proj_name="w1",
up_proj_name="w3",
down_proj_name="w2",
)
self.process_group = weights.process_group
def forward(self, x, adapter_data) -> torch.Tensor:
# router_logits: (num_tokens, n_experts)
router_logits = self.gate(x)
out = self.moe(x, gating_output=router_logits)
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out.view(*x.shape)
class LlamaMLP(nn.Module):
def __init__(self, prefix, config, weights, index):
super().__init__()
self.hidden_act = config.hidden_act
self.act = (
ACT2FN[self.hidden_act]
if "gelu" not in self.hidden_act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh"
if self.hidden_act in ["gelu_fast", "gelu_pytorch_tanh"]
else "none"
),
)
)
prefixes = None
sizes = None
# Fuse gate and up proj
bias = getattr(config, "mlp_bias", False)
if config.model_type == "phi3":
gate_up_proj = TensorParallelColumnLinear.load_gate_up(
config,
prefix=f"{prefix}.gate_up_proj",
weights=weights,
bias=bias,
)
else:
prefixes = ["gate_proj", "up_proj"]
sizes = [
config.intermediate_size,
config.intermediate_size,
]
gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=bias,
)
self.gate_up_proj = TensorParallelMultiAdapterLinear.load(
gate_up_proj,
index,
layer_names=prefixes,
sizes=sizes,
process_group=weights.process_group,
)
down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=bias,
)
self.down_proj = TensorParallelAdapterRowLinear.load(
down_proj,
index,
"down_proj",
process_group=weights.process_group,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
# TODO: This is a hotfix to be removed & properly refactored.
self.quantize = config.quantize
self.hidden_size = config.hidden_size
def forward(self, hidden_states, adapter_data):
if (
SYSTEM == "rocm"
and self.hidden_act == "silu"
and hidden_states.dtype == torch.float16
and hidden_states.shape[0] == 1
and not self.quantize
and self.hidden_size
!= 16384 # TODO: Temporary workaround for `LLMM_Silu` kernel not working with LLama3.1 405B; needs refactoring once fixed.
):
out = torch.empty(
hidden_states.shape[0],
self.intermediate_size,
dtype=hidden_states.dtype,
device="cuda",
)
ops.LLMM_Silu(
self.gate_up_proj.base_layer.linear.weight, hidden_states, out, 8
)
return self.down_proj(out, adapter_data)
else:
gate_up_states = self.gate_up_proj(hidden_states, adapter_data)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data
)
class FlashLlamaLayer(nn.Module):
def __init__(self, index, prefix, config, weights):
super().__init__()
with no_fp8(weights):
self.self_attn = FlashLlamaAttention(
index=index,
prefix=f"{prefix}.self_attn",
config=config,
weights=weights,
)
if config.model_type == "phimoe":
moe_layer_cls = (
SparseMoELayer
if SparseMoELayer.is_supported(weights)
else DenseMoELayer
)
self.mlp = Phi3MoE(
f"{prefix}.block_sparse_moe", config, moe_layer_cls, weights
)
# with moe the layernorms are are not rmsnorms and they have bias
self.input_layernorm = FastLayerNorm.load(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.post_attention_layernorm = FastLayerNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
else:
self.mlp = LlamaMLP(
prefix=f"{prefix}.mlp", config=config, weights=weights, index=index
)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.post_attention_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
# Used in Granite
# This could eventually be baked into the weights like we do for the embeddings/lm_head
# but this would mean modifying the lora code
self.residual_multiplier = getattr(config, "residual_multiplier", None)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
cross_attention_states,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
)
if self.residual_multiplier is not None:
attn_output *= self.residual_multiplier
normed_attn_res_output, attn_res = self.post_attention_layernorm(
attn_output, res
)
mlp_output = self.mlp(normed_attn_res_output, adapter_data)
if self.residual_multiplier is not None:
mlp_output *= self.residual_multiplier
return mlp_output, attn_res
class FlashLlamaModel(torch.nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
# Skip fp8 quant for first and last layers
self.layers = nn.ModuleList()
self.cross_attention_layers = getattr(config, "cross_attention_layers", [])
with no_fp8(weights):
self.layers.append(
FlashLlamaLayer(
index=0,
prefix=f"{prefix}.layers.0",
config=config,
weights=weights,
)
)
# Skip first and last layers
for layer_id in range(1, config.num_hidden_layers - 1):
if layer_id in self.cross_attention_layers:
from text_generation_server.models.custom_modeling.mllama import (
FlashLlamaCrossLayer,
)
self.layers.append(
FlashLlamaCrossLayer(
index=layer_id,
prefix=(f"{prefix}.layers.{layer_id}"),
config=config,
weights=weights,
)
)
else:
self.layers.append(
FlashLlamaLayer(
index=layer_id,
prefix=(f"{prefix}.layers.{layer_id}"),
config=config,
weights=weights,
)
)
with no_fp8(weights):
last_layer_id = config.num_hidden_layers - 1
self.layers.append(
FlashLlamaLayer(
index=last_layer_id,
prefix=(f"{prefix}.layers.{last_layer_id}"),
config=config,
weights=weights,
)
)
self.norm = FastRMSNorm.load(
prefix=f"{prefix}.norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
inputs_embeds: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
true_max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
adapter_data,
cross_attention_states=None,
) -> torch.Tensor:
hidden_states = inputs_embeds
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
adapter_data,
cross_attention_states,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashLlamaForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights, name=None):
if name is None:
name = "model"
super().__init__()
with no_fp8(weights):
self.embed_tokens = TensorParallelEmbedding(
prefix=(
f"{name}.embed_tokens"
if not prefix
else f"{prefix}.{name}.embed_tokens"
),
weights=weights,
)
self.model = FlashLlamaModel(
prefix=name if not prefix else f"{prefix}.{name}",
config=config,
weights=weights,
)
if config.tie_word_embeddings:
suffix = "model.embed_tokens"
else:
suffix = "lm_head"
# Used in Granite
embedding_multiplier = getattr(config, "embedding_multiplier", None)
if embedding_multiplier is not None:
self.embed_tokens.weight.data *= embedding_multiplier
prefix = suffix if not prefix or name != "model" else f"{prefix}.{suffix}"
with no_fp8(weights):
self.lm_head = SpeculativeHead.load(
config,
prefix,
weights,
)
# Used in Granite
self.logits_scaling = getattr(config, "logits_scaling", None)
if self.logits_scaling is not None and self.lm_head.head is not None:
try:
# Scale the weights directly
self.lm_head.head.linear.weight.data /= self.logits_scaling
self.logits_scaled = True
except Exception:
self.logits_scaled = False
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor] = None,
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
cross_attention_states=None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = self.model(
inputs_embeds,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
true_max_s=max_s,
prefill_cache_indices=prefill_cache_indices,
adapter_data=adapter_data,
cross_attention_states=cross_attention_states,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
# Used in Granite
if self.logits_scaling is not None and not self.logits_scaled:
logits /= self.logits_scaling
if speculative_logits is not None:
speculative_logits /= self.logits_scaling
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 11907
} |
# This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License.
#
# MIT License
#
# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially
time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note
that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to
prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that
to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore.
References:
- DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model
- Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch
"""
from typing import Optional, Tuple
import torch
import torch.nn as nn
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelRowLinear,
)
EPS = 1e-5
class IdeficsPerceiverResampler(nn.Module):
def __init__(
self,
prefix,
config,
embed_dim: int,
depth: int,
n_heads: int,
head_dim: int,
n_latents: int,
weights,
) -> None:
"""
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
Args:
config (`IdeficsConfig`): config object
embed_dim (`int`): The size of each embedding vector
depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
head_dim (`int`): Dimensionality of each head projection in the Transformer block.
n_latents (`int`):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
"""
super().__init__()
self.embed_dim, self.n_heads, self.head_dim, self.n_latents = (
embed_dim,
n_heads,
head_dim,
n_latents,
)
self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver
# Create Latents for Perceiver
self.latents = nn.Parameter(weights.get_tensor(f"{prefix}.latents"))
self.intermediate_dim = (
self.embed_dim * 4
if not hasattr(config.vision_config, "embed_dim")
else config.vision_config.embed_dim * 4
)
# Create Transformer Blocks
self.blocks = nn.ModuleList(
[
nn.ModuleList(
[
IdeficsPerceiverAttention(
prefix=f"{prefix}.blocks.{layer_id}.0",
config=config,
embed_dim=self.embed_dim,
n_heads=self.n_heads,
head_dim=self.head_dim,
qk_layer_norms=self.qk_layer_norms,
weights=weights,
),
IdeficsMLP(
prefix=f"{prefix}.blocks.{layer_id}.1",
intermediate_size=self.intermediate_dim,
config=config,
weights=weights,
),
]
)
for layer_id in range(depth)
]
)
self.layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.layer_norm", weights=weights, eps=EPS
)
def forward(self, context: torch.Tensor) -> torch.Tensor:
"""Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
# einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0])
latents = self.latents.repeat(context.shape[0], 1, 1)
# Feed through Perceiver Attention blocks...
for attn, ff in self.blocks:
latents = attn(context, latents) + latents
latents = ff(latents) + latents
return self.layer_norm(latents)
class IdeficsPerceiverAttention(nn.Module):
def __init__(
self,
prefix,
config,
embed_dim: int,
n_heads: int,
head_dim: int,
qk_layer_norms: bool,
weights,
) -> None:
"""Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
super().__init__()
self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim
self.qk_layer_norms = qk_layer_norms
# Normalization & Scaling
self.context_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.context_layer_norm", weights=weights, eps=EPS
)
self.latents_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.latents_layer_norm", weights=weights, eps=EPS
)
if self.qk_layer_norms:
self.q_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.q_layer_norm", weights=weights, eps=EPS
)
self.k_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.k_layer_norm", weights=weights, eps=EPS
)
self.qk_scale = self.head_dim**-0.5
if n_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {n_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.n_heads //= weights.process_group.size()
# Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers).
self.q_proj = TensorParallelColumnLinear.load(
config=config, prefix=f"{prefix}.q_proj", weights=weights, bias=False
)
self.k_proj = TensorParallelColumnLinear.load(
config=config, prefix=f"{prefix}.k_proj", weights=weights, bias=False
)
self.v_proj = TensorParallelColumnLinear.load(
config=config, prefix=f"{prefix}.v_proj", weights=weights, bias=False
)
self.output_proj = TensorParallelRowLinear.load(
config=config, prefix=f"{prefix}.output_proj", weights=weights, bias=False
)
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
"""
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
Args:
context (`torch.Tensor`):
Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
latents (`torch.Tensor`):
Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
Returns:
`torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
from context.
"""
context = self.context_layer_norm(context)
latents = self.latents_layer_norm(latents)
batch_size, seq_length, embed_dim = context.shape[:3]
# Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
# Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
q = self.q_proj(latents)
k = self.k_proj(torch.cat([context, latents], dim=-2))
v = self.v_proj(torch.cat([context, latents], dim=-2))
# Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call)
# =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)]
# einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads)
q, k, v = [
x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(
1, 2
)
for x in (q, k, v)
]
if self.qk_layer_norms:
q = self.q_layer_norm(q)
k = self.k_layer_norm(k)
scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k)
stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach())
attn = stabilized_scores.softmax(dim=-1)
# Attend & project back to output...
resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v)
# einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads)
return self.output_proj(resampled.transpose(1, 2).flatten(-2))
class IdeficsMLP(nn.Module):
def __init__(
self,
prefix,
intermediate_size,
config,
weights,
):
"""Simple MLP block with intermediate_size and embedding size"""
super().__init__()
self.embed_dim = config.vision_config.embed_dim
self.ln = nn.LayerNorm.load(prefix=f"{prefix}.ln", weights=weights, eps=EPS)
self.fc = TensorParallelColumnLinear.load(
config=config,
prefix=f"{prefix}.fc",
weights=weights,
bias=False,
)
self.act = nn.ReLU()
self.c_proj = TensorParallelRowLinear.load(
config=config,
prefix=f"{prefix}.c_proj",
weights=weights,
bias=False,
)
def forward(
self, hidden_states: Optional[Tuple[torch.FloatTensor]]
) -> torch.FloatTensor:
hidden_states = self.ln(hidden_states)
hidden_states = self.fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
return hidden_states
| text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_perceiver.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_perceiver.py",
"repo_id": "text-generation-inference",
"token_count": 5152
} |
import torch
import os
from loguru import logger
from typing import Dict, Optional
from text_generation_server.utils.log import log_master
REQUEST_LOGPROBS = os.getenv("REQUEST_LOGPROBS", "0").lower() in {"1", "true"}
ATTENTION = os.environ["ATTENTION"]
# default_prefix_caching = "1" if ATTENTION in {"flashinfer", "flashdecoding"} else "0"
PREFIX_CACHING = os.environ["PREFIX_CACHING"].lower() in {
"1",
"true",
}
PREFILL_CHUNKING = os.getenv("PREFILL_CHUNKING", "1").lower() in {"1", "true"}
log_master(logger.info, f"Using prefix caching = {PREFIX_CACHING}")
_expected = {"paged", "flashdecoding", "flashdecoding-ipex", "flashinfer"}
assert (
ATTENTION in _expected
), f"Attention is not valid {ATTENTION}, expected {_expected}"
log_master(logger.info, f"Using Attention = {ATTENTION}")
if PREFIX_CACHING and ATTENTION not in {
"flashinfer",
"flashdecoding",
"flashdecoding-ipex",
}:
raise RuntimeError("Prefix caching is only supported with flashinfer")
MEM_POOL = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None
TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.93"))
assert TGI_WIGGLE_ROOM > 0
assert TGI_WIGGLE_ROOM < 1
# This is overridden by the cli
BLOCK_SIZE: int
if ATTENTION == "flashdecoding":
BLOCK_SIZE = 256
elif ATTENTION == "flashinfer":
BLOCK_SIZE = 1
elif ATTENTION == "flashdecoding-ipex":
BLOCK_SIZE = 64
else:
BLOCK_SIZE = 16
cuda_graphs = os.getenv("CUDA_GRAPHS")
if cuda_graphs is not None:
try:
cuda_graphs = [int(item) for item in cuda_graphs.split(",")]
except Exception as e:
raise RuntimeError(
f"Could not parse cuda graphs {cuda_graphs}, expected comma separated list for batch sizes to run on: {e}"
)
else:
cuda_graphs = None
# sorting the cuda graphs in descending order helps reduce the
# memory impact and results in less memory usage
if cuda_graphs is not None:
cuda_graphs.sort(reverse=True)
CUDA_GRAPHS = cuda_graphs
# NOTE: eventually we should move this into the router and pass back the
# index in all cases.
ADAPTER_TO_INDEX: Optional[Dict[str, int]] = None
def set_adapter_to_index(adapter_to_index: Dict[str, int]):
global ADAPTER_TO_INDEX
ADAPTER_TO_INDEX = adapter_to_index
def get_adapter_to_index():
global ADAPTER_TO_INDEX
return ADAPTER_TO_INDEX
| text-generation-inference/server/text_generation_server/models/globals.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/globals.py",
"repo_id": "text-generation-inference",
"token_count": 900
} |
from typing import Iterable
from loguru import logger
from text_generation_server.pb import generate_pb2
def concat_text_chunks(chunks: Iterable[generate_pb2.InputChunk]) -> str:
"""
Concatenate text in text chunks. Non-text chunks are dropped.
"""
text = None
for chunk in chunks:
chunk_type = chunk.WhichOneof("chunk")
if chunk_type == "text":
if text is None:
text = chunk.text
else:
raise NotImplementedError("Request contained more than one text chunk")
else:
# We cannot reject this, e.g. warmup sends an image chunk.
logger.debug(f"Encountered non-text chunk type {chunk_type}")
if text is None:
raise NotImplementedError("Request without a text chunk")
return text
| text-generation-inference/server/text_generation_server/utils/chunks.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/chunks.py",
"repo_id": "text-generation-inference",
"token_count": 332
} |
# coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from transformers import LogitsProcessor
from typing import List, Union
GAMMA = float(os.getenv("WATERMARK_GAMMA", 0.5))
DELTA = float(os.getenv("WATERMARK_DELTA", 2.0))
class WatermarkLogitsProcessor(LogitsProcessor):
def __init__(
self,
gamma: float = GAMMA,
delta: float = DELTA,
hash_key: int = 15485863, # just a large prime number to create a rng seed with sufficient bit width
device: str = "cpu",
):
# watermarking parameters
self.gamma = gamma
self.delta = delta
self.rng = torch.Generator(device=device)
self.hash_key = hash_key
def _seed_rng(self, input_ids: Union[List[int], torch.LongTensor]):
if isinstance(input_ids, list):
assert (
len(input_ids) >= 1
), "requires at least a 1 token prefix sequence to seed rng"
prev_token = input_ids[-1]
else:
assert len(input_ids) == 1
input_ids = input_ids[0]
assert (
input_ids.shape[-1] >= 1
), "requires at least a 1 token prefix sequence to seed rng"
prev_token = input_ids[-1].item()
self.rng.manual_seed(self.hash_key * prev_token)
def _get_greenlist_ids(
self,
input_ids: Union[List[int], torch.LongTensor],
max_value: int,
device: torch.device,
) -> List[int]:
# seed the rng using the previous tokens/prefix
self._seed_rng(input_ids)
greenlist_size = int(max_value * self.gamma)
vocab_permutation = torch.randperm(max_value, device=device, generator=self.rng)
greenlist_ids = vocab_permutation[:greenlist_size]
return greenlist_ids
@staticmethod
def _calc_greenlist_mask(
scores: torch.FloatTensor, greenlist_token_ids
) -> torch.BoolTensor:
green_tokens_mask = torch.zeros_like(scores)
green_tokens_mask[-1, greenlist_token_ids] = 1
final_mask = green_tokens_mask.bool()
return final_mask
@staticmethod
def _bias_greenlist_logits(
scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float
) -> torch.Tensor:
scores[greenlist_mask] = scores[greenlist_mask] + greenlist_bias
return scores
def __call__(
self, input_ids: Union[List[int], torch.LongTensor], scores: torch.FloatTensor
) -> torch.FloatTensor:
greenlist_ids = self._get_greenlist_ids(
input_ids, scores.shape[-1], scores.device
)
green_tokens_mask = self._calc_greenlist_mask(
scores=scores, greenlist_token_ids=greenlist_ids
)
scores = self._bias_greenlist_logits(
scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta
)
return scores
| text-generation-inference/server/text_generation_server/utils/watermark.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/watermark.py",
"repo_id": "text-generation-inference",
"token_count": 1489
} |
import {
PaddingDirection,
WordPiece,
punctuationPreTokenizer,
sequencePreTokenizer,
whitespacePreTokenizer,
Encoding,
EncodeOptions,
Tokenizer,
} from '../../'
import { InputSequence } from '../../types'
const MOCKS_DIR = __dirname + '/__mocks__'
describe('Can modify pretokenizers on the fly', () => {
let encoding: Encoding
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null,
) => Promise<Encoding>
let tokenizer: Tokenizer
beforeAll(async () => {
const model = await WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, {
continuingSubwordPrefix: '##',
})
tokenizer = new Tokenizer(model)
encode = tokenizer.encode.bind(tokenizer)
})
it('Can change pre tokenizer', async () => {
const input = 'my name is john.!?'
tokenizer.setPreTokenizer(sequencePreTokenizer([whitespacePreTokenizer()]))
encoding = await encode(input, null)
expect(encoding.getIds()).toEqual([0, 1, 2, 3, 4, 8])
// Change pre tokenizer
tokenizer.setPreTokenizer(sequencePreTokenizer([whitespacePreTokenizer(), punctuationPreTokenizer()]))
encoding = await encode(input, null)
expect(encoding.getIds()).toEqual([0, 1, 2, 3, 4, 8, 8, 8])
})
})
describe('Encoding', () => {
const originalString = 'my name is john'
const originalPairString = 'what is yours?'
let encoding: Encoding
let encodingDual: Encoding
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null,
) => Promise<Encoding>
beforeAll(async () => {
const model = await WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, {
continuingSubwordPrefix: '##',
})
const tokenizer = new Tokenizer(model)
tokenizer.setPreTokenizer(whitespacePreTokenizer())
encode = tokenizer.encode.bind(tokenizer)
})
beforeEach(async () => {
encoding = await encode(originalString, null)
encodingDual = await encode(originalString, originalPairString)
})
it('has a list of defined methods', () => {
expect(typeof encoding.wordToTokens).toBe('function')
expect(typeof encoding.wordToChars).toBe('function')
expect(typeof encoding.tokenToChars).toBe('function')
expect(typeof encoding.tokenToWord).toBe('function')
expect(typeof encoding.charToToken).toBe('function')
expect(typeof encoding.charToWord).toBe('function')
expect(typeof encoding.getAttentionMask).toBe('function')
expect(typeof encoding.getIds).toBe('function')
expect(typeof encoding.getLength).toBe('function')
expect(typeof encoding.getOffsets).toBe('function')
expect(typeof encoding.getOverflowing).toBe('function')
expect(typeof encoding.getSpecialTokensMask).toBe('function')
expect(typeof encoding.getTokens).toBe('function')
expect(typeof encoding.getTypeIds).toBe('function')
expect(typeof encoding.getWordIds).toBe('function')
expect(typeof encoding.getSequenceIds).toBe('function')
expect(typeof encoding.pad).toBe('function')
expect(typeof encoding.truncate).toBe('function')
})
describe('truncate', () => {
it('accepts `undefined` as second parameter', () => {
expect(encoding.truncate(10, undefined)).toBeUndefined()
})
it('should throw an Error on invalid direction', () => {
const t = () => encoding.truncate(10, 3, 'not_valid')
expect(t).toThrow(`not_valid is not a valid truncation direction`)
})
})
describe('getWordIds', () => {
it('returns the correct list of indexes', () => {
const indexes = encoding.getWordIds()
expect(indexes).toEqual([0, 1, 2, 3, 3])
})
})
describe('getSequenceIds', () => {
it('returns the correct list of indexes', () => {
expect(encoding.getSequenceIds()).toEqual([0, 0, 0, 0, 0])
expect(encodingDual.getSequenceIds()).toEqual([0, 0, 0, 0, 0, 1, 1, 1, 1])
})
})
describe('wordToTokens', () => {
it('returns the correct indexes', () => {
const indexes = encoding.wordToTokens(3)
expect(indexes).toEqual([3, 5])
})
it('returns the corrent indexes with pair sequences', () => {
expect(encodingDual.wordToTokens(3, 0)).toEqual([3, 5])
expect(encodingDual.wordToTokens(3, 1)).toEqual([8, 9])
})
it('returns undefined when out of range word', () => {
const index = encoding.wordToTokens(100)
expect(index).toBeNull()
})
})
describe('wordToChars', () => {
it('returns the correct offsets', () => {
const offsets = encoding.wordToChars(3)
expect(offsets).toEqual([11, 15])
})
it('returns the correct offsets with pair sequences', () => {
expect(encodingDual.wordToChars(3, 0)).toEqual([11, 15])
expect(encodingDual.wordToChars(3, 1)).toEqual([13, 14])
})
it('returns undefined when out of range word', () => {
const offsets = encoding.wordToChars(100)
expect(offsets).toBeNull()
})
})
describe('tokenToSequence', () => {
it('returns the correct value', () => {
expect(encodingDual.tokenToSequence(4)).toEqual(0)
expect(encodingDual.tokenToSequence(6)).toEqual(1)
})
})
describe('tokenToChars', () => {
it('returns the correct offsets', () => {
const offsets = encoding.tokenToChars(3)
expect(offsets).toEqual([11, 13])
})
it('returns the correct offsets with pair sequences', () => {
expect(encodingDual.tokenToChars(3)).toEqual([11, 13])
expect(encodingDual.tokenToChars(7)).toEqual([8, 13])
})
it('returns undefined when out of range token', () => {
const offsets = encoding.tokenToChars(100)
expect(offsets).toBeNull()
})
})
describe('tokenToWord', () => {
it('returns the correct index', () => {
const index = encoding.tokenToWord(3)
expect(index).toEqual(3)
})
it('returns the correct index with pair sequences', () => {
expect(encodingDual.tokenToWord(3)).toEqual(3)
expect(encodingDual.tokenToWord(7)).toEqual(2)
})
it('returns undefined when out of range token', () => {
const index = encoding.tokenToWord(100)
expect(index).toBeNull()
})
})
describe('charToToken', () => {
it('returns the correct index', () => {
const index = encoding.charToToken(3)
expect(index).toEqual(1)
})
it('returns the correct index with pair sequences', () => {
expect(encodingDual.charToToken(3, 0)).toEqual(1)
expect(encodingDual.charToToken(3, 1)).toEqual(5)
})
it('returns undefined when out of range char', () => {
const index = encoding.charToToken(100)
expect(index).toBeNull()
})
})
describe('charToWord', () => {
it('returns the correct index', () => {
const index = encoding.charToWord(3)
expect(index).toEqual(1)
})
it('returns the correct index with pair sequences', () => {
expect(encodingDual.charToWord(3, 0)).toEqual(1)
expect(encodingDual.charToWord(3, 1)).toEqual(0)
})
it('returns undefined when out of range char', () => {
const index = encoding.charToWord(100)
expect(index).toBeNull()
})
})
describe('pad', () => {
it('works correctly with only one parameter', () => {
encoding.pad(10)
expect(encoding.getTokens()).toHaveLength(10)
})
it('accepts `undefined` as second parameter', () => {
encoding.pad(10, undefined)
expect(encoding.getTokens()).toHaveLength(10)
})
it('accepts options as second parameter', () => {
encoding.pad(10, {
direction: PaddingDirection.Left,
padToken: '[PA]',
padTypeId: 10,
padId: 400,
})
const tokens = encoding.getTokens()
expect(tokens).toHaveLength(10)
expect(tokens[0]).toBe('[PA]')
expect(encoding.getTypeIds()[0]).toBe(10)
expect(encoding.getIds()[0]).toBe(400)
})
})
})
| tokenizers/bindings/node/lib/bindings/encoding.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/encoding.test.ts",
"repo_id": "tokenizers",
"token_count": 3021
} |
{
"name": "tokenizers-freebsd-x64",
"version": "0.13.4-rc1",
"os": [
"freebsd"
],
"cpu": [
"x64"
],
"main": "tokenizers.freebsd-x64.node",
"files": [
"tokenizers.freebsd-x64.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/freebsd-x64/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/freebsd-x64/package.json",
"repo_id": "tokenizers",
"token_count": 272
} |
{
"name": "tokenizers-win32-x64-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"x64"
],
"main": "tokenizers.win32-x64-msvc.node",
"files": [
"tokenizers.win32-x64-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/win32-x64-msvc/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-x64-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
} |
use napi::bindgen_prelude::*;
use napi_derive::napi;
use tokenizers as tk;
use tokenizers::Encoding;
use crate::encoding::JsEncoding;
#[napi]
pub fn slice(s: String, begin_index: Option<i32>, end_index: Option<i32>) -> Result<String> {
let len = s.chars().count();
let get_index = |x: i32| -> usize {
if x >= 0 {
x as usize
} else {
(len as i32 + x) as usize
}
};
let begin_index = get_index(begin_index.unwrap_or(0));
let end_index = get_index(end_index.unwrap_or(len as i32));
if let Some(slice) = tk::tokenizer::normalizer::get_range_of(&s, begin_index..end_index) {
Ok(slice.to_string())
} else {
Err(Error::new(
Status::GenericFailure,
"Error in offsets".to_string(),
))
}
}
#[napi]
pub fn merge_encodings(
encodings: Vec<&JsEncoding>,
growing_offsets: Option<bool>,
) -> Result<JsEncoding> {
let growing_offsets = growing_offsets.unwrap_or(false);
let encodings: Vec<_> = encodings
.into_iter()
.map(|enc| enc.encoding.to_owned().unwrap())
.collect();
let new_encoding = Encoding::merge(encodings, growing_offsets);
let js_encoding = JsEncoding {
encoding: Some(new_encoding),
};
Ok(js_encoding)
}
| tokenizers/bindings/node/src/utils.rs/0 | {
"file_path": "tokenizers/bindings/node/src/utils.rs",
"repo_id": "tokenizers",
"token_count": 503
} |
import argparse
import glob
from os.path import join
from tokenizers import ByteLevelBPETokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bpe-bytelevel", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = ByteLevelBPETokenizer(add_prefix_space=True)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["<s>", "<pad>", "</s>"],
)
# Save the files
tokenizer.save_model(args.out, args.name)
# Restoring model from learned vocab/merges
tokenizer = ByteLevelBPETokenizer(
join(args.out, "{}-vocab.json".format(args.name)),
join(args.out, "{}-merges.txt".format(args.name)),
add_prefix_space=True,
)
# Test encoding
print(tokenizer.encode("Training ByteLevel BPE is very easy").tokens)
| tokenizers/bindings/python/examples/train_bytelevel_bpe.py/0 | {
"file_path": "tokenizers/bindings/python/examples/train_bytelevel_bpe.py",
"repo_id": "tokenizers",
"token_count": 521
} |
from .. import normalizers
Normalizer = normalizers.Normalizer
BertNormalizer = normalizers.BertNormalizer
NFD = normalizers.NFD
NFKD = normalizers.NFKD
NFC = normalizers.NFC
NFKC = normalizers.NFKC
Sequence = normalizers.Sequence
Lowercase = normalizers.Lowercase
Prepend = normalizers.Prepend
Strip = normalizers.Strip
StripAccents = normalizers.StripAccents
Nmt = normalizers.Nmt
Precompiled = normalizers.Precompiled
Replace = normalizers.Replace
ByteLevel = normalizers.ByteLevel
NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
if normalizer not in NORMALIZERS:
raise ValueError(
"{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
)
return NORMALIZERS[normalizer]()
| tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py",
"repo_id": "tokenizers",
"token_count": 304
} |
[isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = transformers
known_third_party =
absl
conllu
datasets
elasticsearch
fairseq
faiss-cpu
fastprogress
fire
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
PIL
psutil
pytest
pytorch_lightning
rouge_score
sacrebleu
seqeval
sklearn
streamlit
tensorboardX
tensorflow
tensorflow_datasets
timeout_decorator
torch
torchaudio
torchtext
torchvision
torch_xla
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
| tokenizers/bindings/python/setup.cfg/0 | {
"file_path": "tokenizers/bindings/python/setup.cfg",
"repo_id": "tokenizers",
"token_count": 386
} |
use pyo3::exceptions;
use pyo3::prelude::*;
use tk::utils::SysRegex;
/// Instantiate a new Regex with the given pattern
#[pyclass(module = "tokenizers", name = "Regex")]
pub struct PyRegex {
pub inner: SysRegex,
pub pattern: String,
}
#[pymethods]
impl PyRegex {
#[new]
#[pyo3(text_signature = "(self, pattern)")]
fn new(s: &str) -> PyResult<Self> {
Ok(Self {
inner: SysRegex::new(s)
.map_err(|e| exceptions::PyException::new_err(e.to_string().to_owned()))?,
pattern: s.to_owned(),
})
}
}
| tokenizers/bindings/python/src/utils/regex.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/regex.rs",
"repo_id": "tokenizers",
"token_count": 273
} |
from tokenizers import Tokenizer
from ..utils import data_dir, doc_wiki_tokenizer
disable_printing = True
original_print = print
def print(*args, **kwargs):
if not disable_printing:
original_print(*args, **kwargs)
class TestQuicktour:
# This method contains everything we don't want to run
@staticmethod
def slow_train():
tokenizer, trainer = TestQuicktour.get_tokenizer_trainer()
# START train
files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]]
tokenizer.train(files, trainer)
# END train
# START save
tokenizer.save("data/tokenizer-wiki.json")
# END save
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
# END init_tokenizer
# START init_trainer
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
# END init_trainer
# START init_pretok
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
# END init_pretok
return tokenizer, trainer
def test_quicktour(self, doc_wiki_tokenizer):
def print(*args, **kwargs):
pass
try:
# START reload_tokenizer
tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json")
# END reload_tokenizer
except Exception:
tokenizer = Tokenizer.from_file(doc_wiki_tokenizer)
# START encode
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
# END encode
# START print_tokens
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
# END print_tokens
assert output.tokens == [
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
]
# START print_ids
print(output.ids)
# [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# END print_ids
assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# START print_offsets
print(output.offsets[9])
# (26, 27)
# END print_offsets
assert output.offsets[9] == (26, 27)
# START use_offsets
sentence = "Hello, y'all! How are you 😁 ?"
sentence[26:27]
# "😁"
# END use_offsets
assert sentence[26:27] == "😁"
# START check_sep
tokenizer.token_to_id("[SEP]")
# 2
# END check_sep
assert tokenizer.token_to_id("[SEP]") == 2
# START init_template_processing
from tokenizers.processors import TemplateProcessing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", tokenizer.token_to_id("[CLS]")),
("[SEP]", tokenizer.token_to_id("[SEP]")),
],
)
# END init_template_processing
# START print_special_tokens
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_special_tokens_pair
output = tokenizer.encode("Hello, y'all!", "How are you 😁 ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens_pair
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"[SEP]",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_type_ids
print(output.type_ids)
# [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# END print_type_ids
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# START encode_batch
output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"])
# END encode_batch
# START encode_batch_pair
output = tokenizer.encode_batch(
[["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]]
)
# END encode_batch_pair
# START enable_padding
tokenizer.enable_padding(pad_id=3, pad_token="[PAD]")
# END enable_padding
# START print_batch_tokens
output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"])
print(output[1].tokens)
# ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# END print_batch_tokens
assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# START print_attention_mask
print(output[1].attention_mask)
# [1, 1, 1, 1, 1, 1, 1, 0]
# END print_attention_mask
assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0]
if __name__ == "__main__":
import os
from urllib import request
from zipfile import ZipFile
disable_printing = False
if not os.path.isdir("data/wikitext-103-raw"):
print("Downloading wikitext-103...")
wiki_text, _ = request.urlretrieve(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
)
with ZipFile(wiki_text, "r") as z:
print("Unzipping in data...")
z.extractall("data")
print("Now training...")
TestQuicktour.slow_train()
| tokenizers/bindings/python/tests/documentation/test_quicktour.py/0 | {
"file_path": "tokenizers/bindings/python/tests/documentation/test_quicktour.py",
"repo_id": "tokenizers",
"token_count": 3290
} |
# Encoding
<tokenizerslangcontent>
<python>
## Encoding
[[autodoc]] tokenizers.Encoding
- all
- attention_mask
- ids
- n_sequences
- offsets
- overflowing
- sequence_ids
- special_tokens_mask
- tokens
- type_ids
- word_ids
- words
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/encoding.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/encoding.mdx",
"repo_id": "tokenizers",
"token_count": 190
} |
from docutils import nodes
import sphinx
from sphinx.locale import _
from conf import rust_version
logger = sphinx.util.logging.getLogger(__name__)
class RustRef:
def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):
doctype = name.split("_")[1]
parts = text.split("::")
if text.startswith("~"):
title = parts[-1]
parts[0] = parts[0][1:]
else:
content = text
link = self.base_link()
if doctype == "struct":
l, title = self.make_struct_link(parts, title)
if doctype == "func":
l, title = self.make_func_link(parts, title)
if doctype == "meth":
l, title = self.make_meth_link(parts, title)
if doctype == "trait":
l, title = self.make_trait_link(parts, title)
link += l
node = nodes.reference(internal=False, refuri=link, text=title)
wrapper = nodes.literal(classes=["xref"])
wrapper += node
return [wrapper], []
def base_link(self):
return f"https://docs.rs/tokenizers/{rust_version}"
def make_struct_link(self, parts, title):
link = ""
struct_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/struct.{struct_name}.html"
return link, title
def make_func_link(self, parts, title):
link = ""
fn_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/fn.{fn_name}.html"
return link, title
def make_meth_link(self, parts, title):
meth_name = parts[-1]
if meth_name.endswith("()"):
meth_name = meth_name[:-2]
link, title = self.make_struct_link(parts[:-1], title)
link += f"#method.{meth_name}"
if not title.endswith(")"):
title += "()"
return link, title
def make_trait_link(self, parts, title):
link = ""
trait_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/trait.{trait_name}.html"
return link, title
def setup(app):
app.add_role("rust_struct", RustRef())
app.add_role("rust_func", RustRef())
app.add_role("rust_meth", RustRef())
app.add_role("rust_trait", RustRef())
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| tokenizers/docs/source/_ext/rust_doc.py/0 | {
"file_path": "tokenizers/docs/source/_ext/rust_doc.py",
"repo_id": "tokenizers",
"token_count": 1221
} |
Tokenizers
====================================================================================================
Fast State-of-the-art tokenizers, optimized for both research and production
`🤗 Tokenizers`_ provides an implementation of today's most used tokenizers, with
a focus on performance and versatility. These tokenizers are also used in
`🤗 Transformers`_.
.. _🤗 Tokenizers: https://github.com/huggingface/tokenizers
.. _🤗 Transformers: https://github.com/huggingface/transformers
Main features:
----------------------------------------------------------------------------------------------------
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get
the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
.. toctree::
:maxdepth: 2
:caption: Getting Started
quicktour
installation/main
pipeline
components
.. toctree-tags::
:maxdepth: 3
:caption: Using 🤗 Tokenizers
:glob:
:python:tutorials/python/*
.. toctree::
:maxdepth: 3
:caption: API Reference
api/reference
.. include:: entities.inc
| tokenizers/docs/source/index.rst/0 | {
"file_path": "tokenizers/docs/source/index.rst",
"repo_id": "tokenizers",
"token_count": 404
} |
use std::time::{Duration, Instant};
use criterion::black_box;
use tokenizers::{
Decoder, EncodeInput, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerImpl, Trainer,
};
pub fn iter_bench_encode<M, N, PT, PP, D>(
iters: u64,
tokenizer: &TokenizerImpl<M, N, PT, PP, D>,
lines: &[EncodeInput],
) -> Duration
where
M: Model,
N: Normalizer,
PT: PreTokenizer,
PP: PostProcessor,
D: Decoder,
{
let mut duration = Duration::new(0, 0);
let mut line_index: usize = 0;
for _i in 0..iters {
if line_index >= lines.len() {
line_index = 0;
}
let input = lines[line_index].clone();
let start = Instant::now();
let _ = black_box(tokenizer.encode(input, false));
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
}
pub fn iter_bench_encode_batch<M, N, PT, PP, D>(
iters: u64,
tokenizer: &TokenizerImpl<M, N, PT, PP, D>,
batches: &[Vec<EncodeInput>],
) -> Duration
where
M: Model + Send + Sync,
N: Normalizer + Send + Sync,
PT: PreTokenizer + Send + Sync,
PP: PostProcessor + Send + Sync,
D: Decoder + Send + Sync,
{
let mut duration = Duration::new(0, 0);
let mut batch_index: usize = 0;
for _i in 0..iters {
if batch_index >= batches.len() {
batch_index = 0;
}
let batch = batches[batch_index].clone();
let start = Instant::now();
let _ = black_box(tokenizer.encode_batch(batch, false));
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
}
pub fn iter_bench_train<T, M, N, PT, PP, D>(
iters: u64,
tokenizer: &mut TokenizerImpl<M, N, PT, PP, D>,
trainer: &mut T,
files: Vec<String>,
) -> Duration
where
T: Trainer<Model = M> + Sync,
M: Model + Send + Sync,
N: Normalizer + Send + Sync,
PT: PreTokenizer + Send + Sync,
PP: PostProcessor + Send + Sync,
D: Decoder + Send + Sync,
{
let mut duration = Duration::new(0, 0);
for _i in 0..iters {
let start = Instant::now();
tokenizer.train_from_files(trainer, files.clone()).unwrap();
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
}
| tokenizers/tokenizers/benches/common/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/common/mod.rs",
"repo_id": "tokenizers",
"token_count": 964
} |
use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize)]
/// The WordPiece decoder takes care of decoding a list of wordpiece tokens
/// back into a readable string.
#[serde(tag = "type")]
#[non_exhaustive]
pub struct WordPiece {
/// The prefix to be used for continuing subwords
pub prefix: String,
/// Whether to cleanup some tokenization artifacts (spaces before punctuation, ...)
pub cleanup: bool,
}
impl WordPiece {
pub fn new(prefix: String, cleanup: bool) -> Self {
Self { prefix, cleanup }
}
}
impl Default for WordPiece {
fn default() -> Self {
Self {
prefix: "##".to_owned(),
cleanup: true,
}
}
}
pub fn cleanup(dirty_input: &str) -> String {
dirty_input
.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" do not", " don't")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
}
impl Decoder for WordPiece {
fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> {
tokens
.iter_mut()
.enumerate()
.map(|(i, token)| {
if i != 0 {
if token.starts_with(&self.prefix) {
*token = token.replacen(&self.prefix, "", 1);
} else {
*token = format!(" {token}");
}
}
if self.cleanup {
*token = cleanup(token);
}
Ok(token.to_string())
})
.collect::<Result<_>>()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn wordpiece_decoder() {
let decoder = WordPiece::new("##".to_string(), false);
assert_eq!(
decoder
.decode(vec![
"##uelo".to_string(),
"Ara".to_string(),
"##új".to_string(),
"##o".to_string(),
"No".to_string(),
"##guera".to_string()
])
.unwrap(),
"##uelo Araújo Noguera"
);
}
}
| tokenizers/tokenizers/src/decoders/wordpiece.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/wordpiece.rs",
"repo_id": "tokenizers",
"token_count": 1275
} |
use super::WordLevel;
use crate::utils::parallelism::*;
use crate::{AddedToken, Result, Trainer};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::HashMap;
#[non_exhaustive]
#[derive(Debug, Clone, Builder, Serialize, Deserialize)]
pub struct WordLevelTrainer {
/// The minimum frequency a word must have to be part of the vocabulary
#[builder(default = "0")]
pub min_frequency: u64,
/// The target vocabulary size
#[builder(default = "30_000")]
pub vocab_size: usize,
/// Whether to show progress while training
#[builder(default = "true")]
pub show_progress: bool,
/// A list of special tokens that the model should know of
#[builder(default)]
pub special_tokens: Vec<AddedToken>,
#[builder(default, private)]
words: HashMap<String, u64>,
}
impl Default for WordLevelTrainer {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl WordLevelTrainer {
pub fn builder() -> WordLevelTrainerBuilder {
WordLevelTrainerBuilder::default()
}
fn do_train(
&self,
word_counts: &HashMap<String, u64>,
model: &mut WordLevel,
) -> Result<Vec<AddedToken>> {
let mut ordered_counts = word_counts.iter().collect::<Vec<_>>();
//sort the word counts first by inverse counts and then by word, in order
//to keep the sorting deterministic in case of equal counts
let cmp = |l: &(&String, &u64), r: &(&String, &u64)| -> Ordering {
let count_comp: Ordering = l.1.cmp(r.1);
if count_comp != Ordering::Equal {
return count_comp.reverse();
}
l.0.cmp(r.0)
};
ordered_counts.sort_by(cmp);
let word_level = WordLevel::builder()
.vocab(
self.special_tokens
.iter()
.map(|token| token.content.clone())
.chain(
ordered_counts
.into_iter()
.filter(|(_, n)| **n >= self.min_frequency)
.map(|(w, _)| w.to_owned()),
)
.take(self.vocab_size)
.enumerate()
.map(|(i, w)| (w, i as u32))
.collect(),
)
.build()?;
// Transfer the vocab
model.vocab = word_level.vocab;
model.vocab_r = word_level.vocab_r;
Ok(self.special_tokens.clone())
}
}
impl Trainer for WordLevelTrainer {
type Model = WordLevel;
/// Train a WordLevel model
fn train(&self, model: &mut WordLevel) -> Result<Vec<AddedToken>> {
self.do_train(&self.words, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u64>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_train() {
let word_counts: HashMap<String, u64> = [
("the".into(), 25),
("roses".into(), 22),
("are".into(), 24),
("red".into(), 12),
("voilets".into(), 10),
("blue".into(), 16),
]
.iter()
.cloned()
.collect();
let mut trainer = WordLevelTrainer {
vocab_size: 5,
..Default::default()
};
let mut model = WordLevel::default();
trainer.do_train(&word_counts, &mut model).unwrap();
let expected_vocab: HashMap<String, u32> = [
("the".into(), 0),
("are".into(), 1),
("roses".into(), 2),
("blue".into(), 3),
("red".into(), 4),
]
.iter()
.cloned()
.collect();
assert_eq!(model.vocab, expected_vocab);
// If we specify a min_frequency
trainer.min_frequency = 15;
let mut model = WordLevel::default();
trainer.do_train(&word_counts, &mut model).unwrap();
let expected_vocab: HashMap<String, u32> = [
("the".into(), 0),
("are".into(), 1),
("roses".into(), 2),
("blue".into(), 3),
]
.iter()
.cloned()
.collect();
assert_eq!(model.vocab, expected_vocab);
}
}
| tokenizers/tokenizers/src/models/wordlevel/trainer.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordlevel/trainer.rs",
"repo_id": "tokenizers",
"token_count": 2735
} |
use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
/// Pre tokenizes the numbers into single tokens. If individual_digits is set
/// to true, then all digits are splitted into individual tokens.
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Digits {
pub individual_digits: bool,
}
impl Digits {
pub fn new(individual_digits: bool) -> Self {
Self { individual_digits }
}
}
impl Default for Digits {
fn default() -> Self {
Self::new(false)
}
}
impl PreTokenizer for Digits {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.individual_digits {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Isolated)
})
} else {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Contiguous)
})
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn numbers() {
let pretok = Digits::new(false);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
}
#[test]
fn individual_digits() {
let pretok = Digits::new(true);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/digits.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/digits.rs",
"repo_id": "tokenizers",
"token_count": 1667
} |
use crate::parallelism::*;
use crate::tokenizer::{Offsets, Token};
use crate::utils::padding::PaddingDirection;
use crate::utils::truncation::TruncationDirection;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::ops::Range;
/// Represents the output of a `Tokenizer`.
#[derive(Default, PartialEq, Debug, Clone, Serialize, Deserialize)]
pub struct Encoding {
/// IDs produced by the `Tokenizer`
ids: Vec<u32>,
/// Type of the IDs
type_ids: Vec<u32>,
/// Tokens associated to each ID
tokens: Vec<String>,
/// Indice of the word associated to each token/ID
words: Vec<Option<u32>>,
/// Offsets of the token/ID from the NormalizedString
offsets: Vec<Offsets>,
/// Mask identifying special tokens
special_tokens_mask: Vec<u32>,
/// Mask identifying padding tokens for the attention mechanism
attention_mask: Vec<u32>,
/// A list of overflowing Encoding generated when we got truncated
overflowing: Vec<Encoding>,
/// Ranges of tokens covered by each sequence. If this is empty we consider
/// there is only one sequence in this Encoding, and that it covers the entire range.
sequence_ranges: HashMap<usize, Range<usize>>,
}
impl Encoding {
#[allow(clippy::too_many_arguments)]
pub fn new(
ids: Vec<u32>,
type_ids: Vec<u32>,
tokens: Vec<String>,
words: Vec<Option<u32>>,
offsets: Vec<Offsets>,
special_tokens_mask: Vec<u32>,
attention_mask: Vec<u32>,
overflowing: Vec<Self>,
sequence_ranges: HashMap<usize, Range<usize>>,
) -> Self {
Self {
ids,
type_ids,
tokens,
words,
offsets,
special_tokens_mask,
attention_mask,
overflowing,
sequence_ranges,
}
}
pub fn with_capacity(len: usize) -> Self {
Self {
ids: Vec::with_capacity(len),
type_ids: Vec::with_capacity(len),
tokens: Vec::with_capacity(len),
words: Vec::with_capacity(len),
offsets: Vec::with_capacity(len),
special_tokens_mask: Vec::with_capacity(len),
attention_mask: Vec::with_capacity(len),
overflowing: vec![],
sequence_ranges: HashMap::new(),
}
}
pub fn from_tokens(tokens: Vec<Token>, type_id: u32) -> Self {
let length = tokens.len();
let (ids, tokens, offsets) = tokens.into_iter().fold(
(
Vec::with_capacity(length),
Vec::with_capacity(length),
Vec::with_capacity(length),
),
|(mut ids, mut tokens, mut offsets), t| {
ids.push(t.id);
tokens.push(t.value);
offsets.push(t.offsets);
(ids, tokens, offsets)
},
);
Self {
ids,
tokens,
offsets,
words: vec![None; length],
type_ids: vec![type_id; length],
attention_mask: vec![1; length],
special_tokens_mask: vec![0; length],
overflowing: vec![],
sequence_ranges: HashMap::new(),
}
}
/// Whether this Encoding is empty
pub fn is_empty(&self) -> bool {
self.ids.is_empty()
}
/// Return the total length of this Encoding
pub fn len(&self) -> usize {
self.ids.len()
}
/// Return the number of sequences combined in this Encoding
pub fn n_sequences(&self) -> usize {
if self.sequence_ranges.is_empty() {
1
} else {
self.sequence_ranges.len()
}
}
/// Set the given sequence id for the whole range of tokens contained in this Encoding
pub fn set_sequence_id(&mut self, sequence_id: usize) {
self.sequence_ranges.insert(sequence_id, 0..self.len());
}
pub fn get_tokens(&self) -> &[String] {
&self.tokens[..]
}
pub fn get_word_ids(&self) -> &[Option<u32>] {
&self.words
}
pub fn get_word_ids_mut(&mut self) -> &mut [Option<u32>] {
&mut self.words
}
pub fn get_sequence_ids(&self) -> Vec<Option<usize>> {
let mut sequences = vec![None; self.len()];
for seq_id in 0..self.n_sequences() {
let range = self.sequence_range(seq_id);
let seq_len = range.len();
sequences.splice(range, std::iter::repeat(Some(seq_id)).take(seq_len));
}
sequences
}
pub fn get_ids(&self) -> &[u32] {
&self.ids
}
pub fn get_type_ids(&self) -> &[u32] {
&self.type_ids
}
pub fn set_type_ids(&mut self, type_ids: Vec<u32>) {
self.type_ids = type_ids;
}
pub fn get_offsets(&self) -> &[Offsets] {
&self.offsets
}
pub fn get_offsets_mut(&mut self) -> &mut [Offsets] {
&mut self.offsets
}
pub fn get_special_tokens_mask(&self) -> &[u32] {
&self.special_tokens_mask
}
pub fn get_attention_mask(&self) -> &[u32] {
&self.attention_mask
}
pub fn get_overflowing(&self) -> &Vec<Encoding> {
&self.overflowing
}
pub fn set_overflowing(&mut self, overflowing: Vec<Encoding>) {
self.overflowing = overflowing;
}
pub fn get_overflowing_mut(&mut self) -> &mut Vec<Encoding> {
&mut self.overflowing
}
pub fn take_overflowing(&mut self) -> Vec<Encoding> {
std::mem::take(&mut self.overflowing)
}
pub(crate) fn process_tokens_with_offsets_mut<F>(&mut self, func: F)
where
F: FnMut((usize, (&String, &mut Offsets))),
{
self.tokens
.iter()
.zip(self.offsets.iter_mut())
.enumerate()
.for_each(func)
}
/// Returns the range to target to retrieve something (word_id, offsets, ..) related to the
/// given sequence id
fn sequence_range(&self, sequence_id: usize) -> Range<usize> {
self.sequence_ranges
.get(&sequence_id)
.cloned()
.unwrap_or(0..self.len())
}
/// Returns the index of the sequence containing the given token
pub fn token_to_sequence(&self, token: usize) -> Option<usize> {
if token > self.len() {
None
} else if self.sequence_ranges.is_empty() {
Some(0)
} else {
self.sequence_ranges.iter().find_map(|(seq_id, range)| {
if range.contains(&token) {
Some(*seq_id)
} else {
None
}
})
}
}
/// Get the encoded tokens corresponding to the word at the given index in the input sequence,
/// with the form (start_token, end_token + 1)
pub fn word_to_tokens(&self, word: u32, sequence_id: usize) -> Option<(usize, usize)> {
let (mut start, mut end) = (None, None);
let sequence_range = self.sequence_range(sequence_id);
self.words
.get(sequence_range.clone())?
.iter()
.enumerate()
.take_while(|(_, w)| **w <= Some(word))
.filter(|(_, w)| **w == Some(word))
.for_each(|(i, _)| {
if start.is_none() || Some(i) < start {
start = Some(i);
}
if end.is_none() || Some(i) >= end {
end = Some(i + 1);
}
});
if let (Some(start), Some(end)) = (start, end) {
Some((sequence_range.start + start, sequence_range.start + end))
} else {
None
}
}
/// Get the offsets of the word at the given index in the input sequence.
pub fn word_to_chars(&self, word: u32, sequence_id: usize) -> Option<Offsets> {
self.word_to_tokens(word, sequence_id)
.and_then(|(start, end)| {
if end == 0 {
None
} else {
Some((self.offsets[start].0, self.offsets[end - 1].1))
}
})
}
/// Get the offsets of the token at the given index.
pub fn token_to_chars(&self, token: usize) -> Option<(usize, Offsets)> {
Some((
self.token_to_sequence(token)?,
self.offsets.get(token).copied()?,
))
}
/// Get the word that contains the token at the given index.
pub fn token_to_word(&self, token: usize) -> Option<(usize, u32)> {
Some((
self.token_to_sequence(token)?,
self.words.get(token).copied().flatten()?,
))
}
/// Get the token that contains the given char.
pub fn char_to_token(&self, pos: usize, sequence_id: usize) -> Option<usize> {
let sequence_range = self.sequence_range(sequence_id);
self.offsets
.get(sequence_range.clone())?
.iter()
.position(|(start, end)| pos >= *start && pos < *end)
.map(|pos| sequence_range.start + pos)
}
/// Get the word that contains the given char.
pub fn char_to_word(&self, pos: usize, sequence_id: usize) -> Option<u32> {
Some(
self.char_to_token(pos, sequence_id)
.and_then(|token| self.token_to_word(token))?
.1,
)
}
/// Truncate the current `Encoding`.
///
/// Panics if `stride >= max_len`
pub fn truncate(&mut self, max_len: usize, stride: usize, direction: TruncationDirection) {
let encoding_len = self.ids.len();
if max_len >= encoding_len {
return;
}
if max_len == 0 {
let o = std::mem::replace(self, Encoding::with_capacity(0));
self.overflowing.push(o);
return;
}
assert!(stride < max_len, "`stride` must be strictly less than `max_len={}` (note that `max_len` may be shorter than the max length of the original model, as it subtracts the number of special characters", max_len);
// When truncating, we lose the `sequence_ranges` information.
self.sequence_ranges.clear();
let offset = max_len - stride;
let mut end = false;
let parts_ranges: Vec<(usize, usize)> = match direction {
TruncationDirection::Right => (0..encoding_len)
.step_by(offset)
.filter_map(|start| {
if !end {
let stop = std::cmp::min(start + max_len, encoding_len);
end = stop == encoding_len;
Some((start, stop))
} else {
None
}
})
.collect(),
TruncationDirection::Left => (0..encoding_len)
.rev()
.step_by(offset)
.filter_map(|stop| {
let stop = stop + 1;
let start = stop.saturating_sub(max_len);
if start < stop && !end {
end = start == 0;
Some((start, stop))
} else {
None
}
})
.collect(),
};
let mut i = 0;
let (start, stop) = parts_ranges[i];
let mut new_encoding = Encoding {
ids: self.ids[start..stop].to_vec(),
type_ids: self.type_ids[start..stop].to_vec(),
tokens: self.tokens[start..stop].to_vec(),
words: self.words[start..stop].to_vec(),
offsets: self.offsets[start..stop].to_vec(),
special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(),
attention_mask: self.attention_mask[start..stop].to_vec(),
overflowing: vec![],
sequence_ranges: HashMap::new(),
};
loop {
if i == parts_ranges.len() - 1 {
break;
}
i += 1;
let (start, stop) = parts_ranges[i];
new_encoding.overflowing.push(Encoding {
ids: self.ids[start..stop].to_vec(),
type_ids: self.type_ids[start..stop].to_vec(),
tokens: self.tokens[start..stop].to_vec(),
words: self.words[start..stop].to_vec(),
offsets: self.offsets[start..stop].to_vec(),
special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(),
attention_mask: self.attention_mask[start..stop].to_vec(),
overflowing: vec![],
sequence_ranges: HashMap::new(),
});
}
*self = new_encoding;
}
/// Merge all Encodings together
pub fn merge<I: IntoIterator<Item = Encoding>>(encodings: I, growing_offsets: bool) -> Self {
let mut encoding = Encoding::default();
// TODO this is suboptimal as we're doing this iteratively instead of preallocating
// all the encodings sizes all at once and only copying into this preallocated vector
// https://github.com/huggingface/tokenizers/pull/1049
// In order to fix, we just need to preallocate all vectors, then copy everything
// into it (and deal with overlowings correctly)
for sub in encodings {
encoding.merge_with(sub, growing_offsets);
}
encoding
}
/// Merge ourself with the given `Encoding`. Happens in place.
pub fn merge_with(&mut self, pair: Encoding, growing_offsets: bool) {
// Handle merging the overflowing parts too: Combine them all
// In most of the cases, we expect `pair.overflowing.len() == 0`
let mut overflowings = vec![];
// 1. All our overflowings with all the others
for self_o in &self.overflowing {
// 1. The pair itself
let mut n_encoding = self_o.clone();
n_encoding.merge_with(pair.clone(), growing_offsets);
overflowings.push(n_encoding);
// 2. Its overflowings (this should rarely happen...)
for other_o in &pair.overflowing {
let mut n_encoding = self_o.clone();
n_encoding.merge_with(other_o.clone(), growing_offsets);
overflowings.push(n_encoding);
}
}
// 2. Ourself with all the other overflowings (this should rarely happen too...)
for other_o in &pair.overflowing {
let mut n_encoding = self.clone();
n_encoding.merge_with(other_o.clone(), growing_offsets);
overflowings.push(n_encoding);
}
// Finish by merging ourself with the other encoding
let original_self_len = self.len(); // Must be before any modification to self.ids
self.sequence_ranges
.extend(pair.sequence_ranges.into_iter().map(|(seq_id, range)| {
(
seq_id,
original_self_len + range.start..original_self_len + range.end,
)
}));
self.ids.extend(pair.ids);
self.type_ids.extend(pair.type_ids);
self.tokens.extend(pair.tokens);
self.words.extend(pair.words);
let starting_offset = if growing_offsets {
self.offsets.last().map_or(0, |o| o.1)
} else {
0
};
self.offsets.extend(
pair.offsets
.into_iter()
.map(|(start, end)| (start + starting_offset, end + starting_offset))
.collect::<Vec<_>>(),
);
self.special_tokens_mask.extend(pair.special_tokens_mask);
self.attention_mask.extend(pair.attention_mask);
self.overflowing = overflowings;
}
pub fn pad(
&mut self,
target_length: usize,
pad_id: u32,
pad_type_id: u32,
pad_token: &str,
direction: PaddingDirection,
) {
// Dispatch call to all the overflowings first
self.overflowing.maybe_par_iter_mut().for_each(|encoding| {
encoding.pad(target_length, pad_id, pad_type_id, pad_token, direction)
});
// Then check if we should pad ourself
if self.ids.len() >= target_length {
// We just do nothing if the wanted padding length is smaller than us
return;
}
let pad_length = target_length - self.ids.len();
match direction {
PaddingDirection::Left => {
self.ids = (0..pad_length)
.map(|_| pad_id)
.chain(self.ids.drain(..))
.collect();
self.type_ids = (0..pad_length)
.map(|_| pad_type_id)
.chain(self.type_ids.drain(..))
.collect();
self.tokens = (0..pad_length)
.map(|_| pad_token.to_owned())
.chain(self.tokens.drain(..))
.collect();
self.words = (0..pad_length)
.map(|_| None)
.chain(self.words.drain(..))
.collect();
self.attention_mask = (0..pad_length)
.map(|_| 0)
.chain(self.attention_mask.drain(..))
.collect();
self.special_tokens_mask = (0..pad_length)
.map(|_| 1)
.chain(self.special_tokens_mask.drain(..))
.collect();
self.offsets = (0..pad_length)
.map(|_| (0, 0))
.chain(self.offsets.drain(..))
.collect();
self.sequence_ranges
.iter_mut()
.for_each(|(_seq_id, range)| {
*range = (range.start + pad_length)..(range.end + pad_length)
});
}
PaddingDirection::Right => {
self.ids.extend((0..pad_length).map(|_| pad_id));
self.type_ids.extend((0..pad_length).map(|_| pad_type_id));
self.tokens
.extend((0..pad_length).map(|_| pad_token.to_owned()));
self.words.extend((0..pad_length).map(|_| None));
self.attention_mask.extend((0..pad_length).map(|_| 0));
self.special_tokens_mask.extend((0..pad_length).map(|_| 1));
self.offsets.extend((0..pad_length).map(|_| (0, 0)));
}
}
}
}
impl std::iter::FromIterator<Encoding> for Encoding {
fn from_iter<I: IntoIterator<Item = Encoding>>(iter: I) -> Self {
Self::merge(iter, false)
}
}
impl std::iter::FromIterator<(u32, String, (usize, usize), Option<u32>, u32)> for Encoding {
fn from_iter<I: IntoIterator<Item = (u32, String, (usize, usize), Option<u32>, u32)>>(
iter: I,
) -> Self {
let items = iter.into_iter();
let (lower, upper) = items.size_hint();
let length = upper.unwrap_or(lower);
let mut encoding = Self::with_capacity(length);
for (id, token, offsets, word, type_id) in items {
encoding.ids.push(id);
encoding.tokens.push(token);
encoding.offsets.push(offsets);
encoding.type_ids.push(type_id);
encoding.words.push(word);
encoding.special_tokens_mask.push(0);
encoding.attention_mask.push(1);
}
encoding
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::iter::FromIterator;
#[test]
fn merge_encodings() {
let mut a = Encoding {
ids: vec![1],
type_ids: vec![0],
tokens: vec![String::from("Hello ")],
words: vec![Some(0)],
offsets: vec![(0, 6)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
};
let b = Encoding {
ids: vec![2],
type_ids: vec![1],
tokens: vec![String::from("World!")],
words: vec![Some(0)],
offsets: vec![(0, 6)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
};
a.merge_with(b, true);
assert_eq!(
a,
Encoding {
ids: vec![1, 2],
type_ids: vec![0, 1],
tokens: vec![String::from("Hello "), String::from("World!")],
words: vec![Some(0), Some(0)],
offsets: vec![(0, 6), (6, 12)],
special_tokens_mask: vec![0, 0],
attention_mask: vec![1, 1],
..Default::default()
}
);
}
#[test]
fn truncate() {
let mut a = Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
..Default::default()
};
a.truncate(2, 0, TruncationDirection::Right);
assert_eq!(
a,
Encoding {
ids: vec![1, 2],
type_ids: vec![0, 0],
tokens: vec![String::from("Hello"), String::from("World")],
words: vec![Some(0), Some(1)],
offsets: vec![(0, 5), (6, 11)],
special_tokens_mask: vec![0, 0],
attention_mask: vec![1, 1],
overflowing: vec![Encoding {
ids: vec![3],
type_ids: vec![0],
tokens: vec![String::from("!")],
words: vec![Some(2)],
offsets: vec![(11, 12)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn truncate_to_empty() {
let mut a = Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
..Default::default()
};
a.truncate(0, 0, TruncationDirection::Right);
assert_eq!(
a,
Encoding {
overflowing: vec![Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
overflowing: vec![],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn truncate_overflow_with_stride() {
let mut enc = Encoding {
ids: vec![1, 2, 3, 4, 5],
type_ids: vec![0, 0, 0, 0, 0],
tokens: vec![
String::from("42"),
String::from("is"),
String::from("the"),
String::from("answer"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2), Some(3), Some(4)],
offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13), (13, 14)],
special_tokens_mask: vec![0, 0, 0, 0, 0],
attention_mask: vec![1, 1, 1, 1, 1],
overflowing: vec![],
..Default::default()
};
enc.truncate(4, 2, TruncationDirection::Right);
assert_eq!(
enc,
Encoding {
ids: vec![1, 2, 3, 4],
type_ids: vec![0, 0, 0, 0],
tokens: vec![
String::from("42"),
String::from("is"),
String::from("the"),
String::from("answer"),
],
words: vec![Some(0), Some(1), Some(2), Some(3)],
offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13)],
special_tokens_mask: vec![0, 0, 0, 0],
attention_mask: vec![1, 1, 1, 1],
overflowing: vec![Encoding {
ids: vec![3, 4, 5],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("the"),
String::from("answer"),
String::from("!"),
],
words: vec![Some(2), Some(3), Some(4)],
offsets: vec![(4, 7), (7, 13), (13, 14)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
overflowing: vec![],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn truncate_left() {
let mut a = Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
..Default::default()
};
a.truncate(2, 0, TruncationDirection::Left);
assert_eq!(
a,
Encoding {
ids: vec![2, 3],
type_ids: vec![0, 0],
tokens: vec![String::from("World"), String::from("!")],
words: vec![Some(1), Some(2)],
offsets: vec![(6, 11), (11, 12)],
special_tokens_mask: vec![0, 0],
attention_mask: vec![1, 1],
overflowing: vec![Encoding {
ids: vec![1],
type_ids: vec![0],
tokens: vec![String::from("Hello")],
words: vec![Some(0)],
offsets: vec![(0, 5)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn mappings() {
let encoding = Encoding {
ids: vec![0; 11], // Needed for Encoding::len
tokens: vec![
// First sequence:
"He".into(),
"llo".into(),
"won".into(),
"der".into(),
"ful".into(),
"friend".into(),
"!".into(),
// Second sequence:
"How".into(),
"are".into(),
"you".into(),
"?".into(),
],
offsets: vec![
// First sequence:
(0, 2),
(2, 5),
(7, 10),
(10, 13),
(13, 16),
(17, 23),
(23, 24),
// Second sequence:
(0, 3),
(4, 7),
(8, 11),
(11, 12),
],
words: vec![
// First sequence:
Some(0),
Some(0),
Some(1),
Some(1),
Some(1),
Some(2),
Some(3),
// Second sequence:
Some(0),
Some(1),
Some(2),
Some(3),
],
sequence_ranges: HashMap::from_iter(vec![(0, 0..7), (1, 7..11)]),
..Default::default()
};
assert_eq!(encoding.word_to_tokens(0, 0), Some((0, 2)));
assert_eq!(encoding.word_to_tokens(1, 0), Some((2, 5)));
assert_eq!(encoding.word_to_tokens(2, 0), Some((5, 6)));
assert_eq!(encoding.word_to_tokens(3, 0), Some((6, 7)));
assert_eq!(encoding.word_to_tokens(0, 1), Some((7, 8)));
assert_eq!(encoding.word_to_tokens(1, 1), Some((8, 9)));
assert_eq!(encoding.word_to_tokens(2, 1), Some((9, 10)));
assert_eq!(encoding.word_to_tokens(3, 1), Some((10, 11)));
assert_eq!(encoding.word_to_chars(0, 0), Some((0, 5)));
assert_eq!(encoding.word_to_chars(1, 0), Some((7, 16)));
assert_eq!(encoding.word_to_chars(0, 1), Some((0, 3)));
assert_eq!(encoding.word_to_chars(1, 1), Some((4, 7)));
assert_eq!(encoding.token_to_chars(0), Some((0, (0, 2))));
assert_eq!(encoding.token_to_chars(1), Some((0, (2, 5))));
assert_eq!(encoding.token_to_chars(7), Some((1, (0, 3))));
assert_eq!(encoding.token_to_chars(9), Some((1, (8, 11))));
assert_eq!(encoding.token_to_word(1), Some((0, 0)));
assert_eq!(encoding.token_to_word(2), Some((0, 1)));
assert_eq!(encoding.token_to_word(7), Some((1, 0)));
assert_eq!(encoding.token_to_word(9), Some((1, 2)));
assert_eq!(encoding.token_to_word(11), None);
assert_eq!(encoding.char_to_token(3, 0), Some(1));
assert_eq!(encoding.char_to_token(8, 0), Some(2));
assert_eq!(encoding.char_to_token(16, 0), None);
assert_eq!(encoding.char_to_token(23, 0), Some(6));
assert_eq!(encoding.char_to_token(2, 1), Some(7));
assert_eq!(encoding.char_to_token(9, 1), Some(9));
assert_eq!(encoding.char_to_word(3, 0), Some(0));
assert_eq!(encoding.char_to_word(8, 0), Some(1));
assert_eq!(encoding.char_to_word(16, 0), None);
assert_eq!(encoding.char_to_word(23, 0), Some(3));
assert_eq!(encoding.char_to_word(2, 1), Some(0));
assert_eq!(encoding.char_to_word(9, 1), Some(2));
}
#[test]
fn padding() {
let mut a = Encoding {
ids: vec![1],
type_ids: vec![0],
tokens: vec![String::from("Hello ")],
words: vec![Some(0)],
offsets: vec![(0, 6)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
sequence_ranges: HashMap::from([(0, 0..1)]),
..Default::default()
};
let target_length = 2;
let pad_id = 99;
let pad_type_id = 0;
let pad_token = "[PAD]";
a.pad(
target_length,
pad_id,
pad_type_id,
pad_token,
PaddingDirection::Left,
);
assert_eq!(a.sequence_ranges, HashMap::from([(0, 1..2)]));
}
}
| tokenizers/tokenizers/src/tokenizer/encoding.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/encoding.rs",
"repo_id": "tokenizers",
"token_count": 17192
} |
mod common;
use common::*;
use tokenizers::tokenizer::AddedToken;
#[test]
fn add_tokens() {
let mut tokenizer = get_empty();
assert_eq!(
tokenizer.add_special_tokens(&[
AddedToken::from("<cls>", true),
AddedToken::from("<sep>", true)
]),
2
);
assert_eq!(tokenizer.token_to_id("<cls>"), Some(0));
assert_eq!(tokenizer.token_to_id("<sep>"), Some(1));
assert_eq!(
tokenizer.add_tokens(&[
AddedToken::from("hello", false),
AddedToken::from("world", false)
]),
2
);
assert_eq!(tokenizer.token_to_id("hello"), Some(2));
assert_eq!(tokenizer.token_to_id("world"), Some(3));
}
#[test]
fn lstrip_tokens() {
let mut tokenizer = get_byte_level(true, false);
tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).lstrip(true)]);
let input = "I saw a <mask> 😺";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_tokens(),
&["ĠI", "Ġsaw", "Ġa", " <mask>", "ĠðŁĺ", "º"]
);
assert_eq!(
output.get_offsets(),
&[(0, 1), (1, 5), (5, 7), (7, 14), (14, 19), (15, 19)]
);
}
#[test]
fn rstrip_tokens() {
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]);
let input = "I saw a <mask> 😺";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_tokens(),
&["I", "Ġsaw", "Ġa", "Ġ", "<mask> ", "ðŁĺ", "º"]
);
// When `add_prefix_space = true` rstrip cannot work as a prefix space is added
// to the next token
let mut tokenizer = get_byte_level(true, false);
tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]);
let input = "I saw a <mask> 😺";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_tokens(),
&["ĠI", "Ġsaw", "Ġa", "Ġ", "<mask> ", "ĠðŁĺ", "º"]
);
}
#[test]
fn single_word_tokens() {
// If `single_word = true` it shouldn't split `dancing`
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(true)]);
let input = "I like dancing";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġdancing"]);
// If `single_word = false` it should split `dancing`
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(false)]);
let input = "I like dancing";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġd", "anc", "ing"]);
}
#[test]
fn overlapping_tokens() {
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]);
let input = "I like dancing";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġ", "danc", "ing"]);
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("ike", true)]);
let output = tokenizer.encode(input, false).unwrap();
// Breaking change but following `transformers` breaking change.
// This behavior is deemed not used in practice:
// https://github.com/huggingface/transformers/pull/13220
// Order does NOT matter. (We could make it work again but the trie
// would need to keep insertion order too)
//
// assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġda", "nci", "ng"]);
assert_eq!(output.get_tokens(), &["I", "Ġl", "ike", "Ġ", "danc", "ing"]);
}
| tokenizers/tokenizers/tests/added_tokens.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/added_tokens.rs",
"repo_id": "tokenizers",
"token_count": 1770
} |
# Building a React application
In this tutorial, we'll be building a simple React application that performs multilingual translation using Transformers.js! The final product will look something like this:

Useful links:
- [Demo site](https://huggingface.co/spaces/Xenova/react-translator)
- [Source code](https://github.com/huggingface/transformers.js-examples/tree/main/react-translator)
## Prerequisites
- [Node.js](https://nodejs.org/en/) version 18+
- [npm](https://www.npmjs.com/) version 9+
## Step 1: Initialise the project
For this tutorial, we will use [Vite](https://vitejs.dev/) to initialise our project. Vite is a build tool that allows us to quickly set up a React application with minimal configuration. Run the following command in your terminal:
```bash
npm create vite@latest react-translator -- --template react
```
If prompted to install `create-vite`, type <kbd>y</kbd> and press <kbd>Enter</kbd>.
Next, enter the project directory and install the necessary development dependencies:
```bash
cd react-translator
npm install
```
To test that our application is working, we can run the following command:
```bash
npm run dev
```
Visiting the URL shown in the terminal (e.g., [http://localhost:5173/](http://localhost:5173/)) should show the default "React + Vite" landing page.
You can stop the development server by pressing <kbd>Ctrl</kbd> + <kbd>C</kbd> in the terminal.
## Step 2: Install and configure Transformers.js
Now we get to the fun part: adding machine learning to our application! First, install Transformers.js from [NPM](https://www.npmjs.com/package/@huggingface/transformers) with the following command:
```bash
npm install @huggingface/transformers
```
For this application, we will use the [Xenova/nllb-200-distilled-600M](https://huggingface.co/Xenova/nllb-200-distilled-600M) model, which can perform multilingual translation among 200 languages. Before we start, there are 2 things we need to take note of:
1. ML inference can be quite computationally intensive, so it's better to load and run the models in a separate thread from the main (UI) thread.
2. Since the model is quite large (>1 GB), we don't want to download it until the user clicks the "Translate" button.
We can achieve both of these goals by using a [Web Worker](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) and some [React hooks](https://react.dev/reference/react).
1. Create a file called `worker.js` in the `src` directory. This script will do all the heavy-lifing for us, including loading and running of the translation pipeline. To ensure the model is only loaded once, we will create the `MyTranslationPipeline` class which use the [singleton pattern](https://en.wikipedia.org/wiki/Singleton_pattern) to lazily create a single instance of the pipeline when `getInstance` is first called, and use this pipeline for all subsequent calls:
```javascript
import { pipeline, TextStreamer } from '@huggingface/transformers';
class MyTranslationPipeline {
static task = 'translation';
static model = 'Xenova/nllb-200-distilled-600M';
static instance = null;
static async getInstance(progress_callback = null) {
this.instance ??= pipeline(this.task, this.model, { progress_callback });
return this.instance;
}
}
```
2. Modify `App.jsx` in the `src` directory. This file is automatically created when initializing our React project, and will contain some boilerplate code. Inside the `App` function, let's create the web worker and store a reference to it using the `useRef` hook:
```jsx
// Remember to import the relevant hooks
import { useEffect, useRef, useState } from 'react'
import './App.css'
function App() {
// Create a reference to the worker object.
const worker = useRef(null);
// We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted.
useEffect(() => {
// Create the worker if it does not yet exist.
worker.current ??= new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
// Create a callback function for messages from the worker thread.
const onMessageReceived = (e) => {
// TODO: Will fill in later
};
// Attach the callback function as an event listener.
worker.current.addEventListener('message', onMessageReceived);
// Define a cleanup function for when the component is unmounted.
return () => worker.current.removeEventListener('message', onMessageReceived);
});
return (
// TODO: Rest of our app goes here...
)
}
export default App
```
## Step 3: Design the user interface
<Tip>
We recommend starting the development server again with `npm run dev`
(if not already running) so that you can see your changes in real-time.
</Tip>
First, let's define our components. Create a folder called `components` in the `src` directory, and create the following files:
1. `LanguageSelector.jsx`: This component will allow the user to select the input and output languages. Check out the full list of languages [here](https://github.com/huggingface/transformers.js-examples/tree/main/react-translator/src/components/LanguageSelector.jsx).
```jsx
const LANGUAGES = {
"Acehnese (Arabic script)": "ace_Arab",
"Acehnese (Latin script)": "ace_Latn",
"Afrikaans": "afr_Latn",
...
"Zulu": "zul_Latn",
}
export default function LanguageSelector({ type, onChange, defaultLanguage }) {
return (
<div className='language-selector'>
<label>{type}: </label>
<select onChange={onChange} defaultValue={defaultLanguage}>
{Object.entries(LANGUAGES).map(([key, value]) => {
return <option key={key} value={value}>{key}</option>
})}
</select>
</div>
)
}
```
2. `Progress.jsx`: This component will display the progress for downloading each model file.
```jsx
export default function Progress({ text, percentage }) {
percentage = percentage ?? 0;
return (
<div className="progress-container">
<div className='progress-bar' style={{ 'width': `${percentage}%` }}>
{text} ({`${percentage.toFixed(2)}%`})
</div>
</div>
);
}
```
We can now use these components in `App.jsx` by adding these imports to the top of the file:
```jsx
import LanguageSelector from './components/LanguageSelector';
import Progress from './components/Progress';
```
Let's also add some state variables to keep track of a few things in our application, like model loading, languages, input text, and output text. Add the following code to the beginning of the `App` function in `src/App.jsx`:
```jsx
function App() {
// Model loading
const [ready, setReady] = useState(null);
const [disabled, setDisabled] = useState(false);
const [progressItems, setProgressItems] = useState([]);
// Inputs and outputs
const [input, setInput] = useState('I love walking my dog.');
const [sourceLanguage, setSourceLanguage] = useState('eng_Latn');
const [targetLanguage, setTargetLanguage] = useState('fra_Latn');
const [output, setOutput] = useState('');
// rest of the code...
}
```
Next, we can add our custom components to the main `App` component. We will also add two `textarea` elements for input and output text, and a `button` to trigger the translation. Modify the `return` statement to look like this:
```jsx
return (
<>
<h1>Transformers.js</h1>
<h2>ML-powered multilingual translation in React!</h2>
<div className='container'>
<div className='language-container'>
<LanguageSelector type={"Source"} defaultLanguage={"eng_Latn"} onChange={x => setSourceLanguage(x.target.value)} />
<LanguageSelector type={"Target"} defaultLanguage={"fra_Latn"} onChange={x => setTargetLanguage(x.target.value)} />
</div>
<div className='textbox-container'>
<textarea value={input} rows={3} onChange={e => setInput(e.target.value)}></textarea>
<textarea value={output} rows={3} readOnly></textarea>
</div>
</div>
<button disabled={disabled} onClick={translate}>Translate</button>
<div className='progress-bars-container'>
{ready === false && (
<label>Loading models... (only run once)</label>
)}
{progressItems.map(data => (
<div key={data.file}>
<Progress text={data.file} percentage={data.progress} />
</div>
))}
</div>
</>
)
```
Don't worry about the `translate` function for now. We will define it in the next section.
Finally, we can add some CSS to make our app look a little nicer. Modify the following files in the `src` directory:
1. `index.css`:
<details>
<summary>View code</summary>
```css
:root {
font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif;
line-height: 1.5;
font-weight: 400;
color: #213547;
background-color: #ffffff;
font-synthesis: none;
text-rendering: optimizeLegibility;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
-webkit-text-size-adjust: 100%;
}
body {
margin: 0;
display: flex;
place-items: center;
min-width: 320px;
min-height: 100vh;
}
h1 {
font-size: 3.2em;
line-height: 1;
}
h1,
h2 {
margin: 8px;
}
select {
padding: 0.3em;
cursor: pointer;
}
textarea {
padding: 0.6em;
}
button {
padding: 0.6em 1.2em;
cursor: pointer;
font-weight: 500;
}
button[disabled] {
cursor: not-allowed;
}
select,
textarea,
button {
border-radius: 8px;
border: 1px solid transparent;
font-size: 1em;
font-family: inherit;
background-color: #f9f9f9;
transition: border-color 0.25s;
}
select:hover,
textarea:hover,
button:not([disabled]):hover {
border-color: #646cff;
}
select:focus,
select:focus-visible,
textarea:focus,
textarea:focus-visible,
button:focus,
button:focus-visible {
outline: 4px auto -webkit-focus-ring-color;
}
```
</details>
1. `App.css`
<details>
<summary>View code</summary>
```css
#root {
max-width: 1280px;
margin: 0 auto;
padding: 2rem;
text-align: center;
}
.language-container {
display: flex;
gap: 20px;
}
.textbox-container {
display: flex;
justify-content: center;
gap: 20px;
width: 800px;
}
.textbox-container>textarea, .language-selector {
width: 50%;
}
.language-selector>select {
width: 150px;
}
.progress-container {
position: relative;
font-size: 14px;
color: white;
background-color: #e9ecef;
border: solid 1px;
border-radius: 8px;
text-align: left;
overflow: hidden;
}
.progress-bar {
padding: 0 4px;
z-index: 0;
top: 0;
width: 1%;
overflow: hidden;
background-color: #007bff;
white-space: nowrap;
}
.progress-text {
z-index: 2;
}
.selector-container {
display: flex;
gap: 20px;
}
.progress-bars-container {
padding: 8px;
height: 140px;
}
.container {
margin: 25px;
display: flex;
flex-direction: column;
gap: 10px;
}
```
</details>
## Step 4: Connecting everything together
Now that we have a basic user interface set up, we can finally connect everything together.
First, let's define the `translate` function, which will be called when the user clicks the `Translate` button. This sends a message (containing the input text, source language, and target language) to the worker thread for processing. We will also disable the button so the user doesn't click it multiple times. Add the following code just before the `return` statement in the `App` function:
```jsx
const translate = () => {
setDisabled(true);
setOutput('');
worker.current.postMessage({
text: input,
src_lang: sourceLanguage,
tgt_lang: targetLanguage,
});
}
```
Now, let's add an event listener in `src/worker.js` to listen for messages from the main thread. We will send back messages (e.g., for model loading progress and text streaming) to the main thread with `self.postMessage`.
```javascript
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the translation pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
const translator = await MyTranslationPipeline.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
// Capture partial output as it streams from the pipeline
const streamer = new TextStreamer(translator.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (text) {
self.postMessage({
status: 'update',
output: text
});
}
});
// Actually perform the translation
const output = await translator(event.data.text, {
tgt_lang: event.data.tgt_lang,
src_lang: event.data.src_lang,
// Allows for partial output to be captured
streamer,
});
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output,
});
});
```
Finally, let's fill in our `onMessageReceived` function in `src/App.jsx`, which will update the application state in response to messages from the worker thread. Add the following code inside the `useEffect` hook we defined earlier:
```jsx
const onMessageReceived = (e) => {
switch (e.data.status) {
case 'initiate':
// Model file start load: add a new progress item to the list.
setReady(false);
setProgressItems(prev => [...prev, e.data]);
break;
case 'progress':
// Model file progress: update one of the progress items.
setProgressItems(
prev => prev.map(item => {
if (item.file === e.data.file) {
return { ...item, progress: e.data.progress }
}
return item;
})
);
break;
case 'done':
// Model file loaded: remove the progress item from the list.
setProgressItems(
prev => prev.filter(item => item.file !== e.data.file)
);
break;
case 'ready':
// Pipeline ready: the worker is ready to accept messages.
setReady(true);
break;
case 'update':
// Generation update: update the output text.
setOutput(o => o + e.data.output);
break;
case 'complete':
// Generation complete: re-enable the "Translate" button
setDisabled(false);
break;
}
};
```
You can now run the application with `npm run dev` and perform multilingual translation directly in your browser!
## (Optional) Step 5: Build and deploy
To build your application, simply run `npm run build`. This will bundle your application and output the static files to the `dist` folder.
For this demo, we will deploy our application as a static [Hugging Face Space](https://huggingface.co/docs/hub/spaces), but you can deploy it anywhere you like! If you haven't already, you can create a free Hugging Face account [here](https://huggingface.co/join).
1. Visit [https://huggingface.co/new-space](https://huggingface.co/new-space) and fill in the form. Remember to select "Static" as the space type.
2. Go to "Files" → "Add file" → "Upload files". Drag the `index.html` file and `public/` folder from the `dist` folder into the upload box and click "Upload". After they have uploaded, scroll down to the button and click "Commit changes to main".
**That's it!** Your application should now be live at `https://huggingface.co/spaces/<your-username>/<your-space-name>`!
| transformers.js/docs/source/tutorials/react.md/0 | {
"file_path": "transformers.js/docs/source/tutorials/react.md",
"repo_id": "transformers.js",
"token_count": 5891
} |
@font-face {
font-family: "bootstrap-icons";
src: url("./bootstrap-icons.woff2") format("woff2"),
url("./bootstrap-icons.woff") format("woff");
}
.bi::before,
[class^="bi-"]::before,
[class*=" bi-"]::before {
display: inline-block;
font-family: bootstrap-icons !important;
font-style: normal;
font-weight: normal !important;
font-variant: normal;
text-transform: none;
line-height: 1;
vertical-align: -.125em;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
.bi-123::before { content: "\f67f"; }
.bi-alarm-fill::before { content: "\f101"; }
.bi-alarm::before { content: "\f102"; }
.bi-align-bottom::before { content: "\f103"; }
.bi-align-center::before { content: "\f104"; }
.bi-align-end::before { content: "\f105"; }
.bi-align-middle::before { content: "\f106"; }
.bi-align-start::before { content: "\f107"; }
.bi-align-top::before { content: "\f108"; }
.bi-alt::before { content: "\f109"; }
.bi-app-indicator::before { content: "\f10a"; }
.bi-app::before { content: "\f10b"; }
.bi-archive-fill::before { content: "\f10c"; }
.bi-archive::before { content: "\f10d"; }
.bi-arrow-90deg-down::before { content: "\f10e"; }
.bi-arrow-90deg-left::before { content: "\f10f"; }
.bi-arrow-90deg-right::before { content: "\f110"; }
.bi-arrow-90deg-up::before { content: "\f111"; }
.bi-arrow-bar-down::before { content: "\f112"; }
.bi-arrow-bar-left::before { content: "\f113"; }
.bi-arrow-bar-right::before { content: "\f114"; }
.bi-arrow-bar-up::before { content: "\f115"; }
.bi-arrow-clockwise::before { content: "\f116"; }
.bi-arrow-counterclockwise::before { content: "\f117"; }
.bi-arrow-down-circle-fill::before { content: "\f118"; }
.bi-arrow-down-circle::before { content: "\f119"; }
.bi-arrow-down-left-circle-fill::before { content: "\f11a"; }
.bi-arrow-down-left-circle::before { content: "\f11b"; }
.bi-arrow-down-left-square-fill::before { content: "\f11c"; }
.bi-arrow-down-left-square::before { content: "\f11d"; }
.bi-arrow-down-left::before { content: "\f11e"; }
.bi-arrow-down-right-circle-fill::before { content: "\f11f"; }
.bi-arrow-down-right-circle::before { content: "\f120"; }
.bi-arrow-down-right-square-fill::before { content: "\f121"; }
.bi-arrow-down-right-square::before { content: "\f122"; }
.bi-arrow-down-right::before { content: "\f123"; }
.bi-arrow-down-short::before { content: "\f124"; }
.bi-arrow-down-square-fill::before { content: "\f125"; }
.bi-arrow-down-square::before { content: "\f126"; }
.bi-arrow-down-up::before { content: "\f127"; }
.bi-arrow-down::before { content: "\f128"; }
.bi-arrow-left-circle-fill::before { content: "\f129"; }
.bi-arrow-left-circle::before { content: "\f12a"; }
.bi-arrow-left-right::before { content: "\f12b"; }
.bi-arrow-left-short::before { content: "\f12c"; }
.bi-arrow-left-square-fill::before { content: "\f12d"; }
.bi-arrow-left-square::before { content: "\f12e"; }
.bi-arrow-left::before { content: "\f12f"; }
.bi-arrow-repeat::before { content: "\f130"; }
.bi-arrow-return-left::before { content: "\f131"; }
.bi-arrow-return-right::before { content: "\f132"; }
.bi-arrow-right-circle-fill::before { content: "\f133"; }
.bi-arrow-right-circle::before { content: "\f134"; }
.bi-arrow-right-short::before { content: "\f135"; }
.bi-arrow-right-square-fill::before { content: "\f136"; }
.bi-arrow-right-square::before { content: "\f137"; }
.bi-arrow-right::before { content: "\f138"; }
.bi-arrow-up-circle-fill::before { content: "\f139"; }
.bi-arrow-up-circle::before { content: "\f13a"; }
.bi-arrow-up-left-circle-fill::before { content: "\f13b"; }
.bi-arrow-up-left-circle::before { content: "\f13c"; }
.bi-arrow-up-left-square-fill::before { content: "\f13d"; }
.bi-arrow-up-left-square::before { content: "\f13e"; }
.bi-arrow-up-left::before { content: "\f13f"; }
.bi-arrow-up-right-circle-fill::before { content: "\f140"; }
.bi-arrow-up-right-circle::before { content: "\f141"; }
.bi-arrow-up-right-square-fill::before { content: "\f142"; }
.bi-arrow-up-right-square::before { content: "\f143"; }
.bi-arrow-up-right::before { content: "\f144"; }
.bi-arrow-up-short::before { content: "\f145"; }
.bi-arrow-up-square-fill::before { content: "\f146"; }
.bi-arrow-up-square::before { content: "\f147"; }
.bi-arrow-up::before { content: "\f148"; }
.bi-arrows-angle-contract::before { content: "\f149"; }
.bi-arrows-angle-expand::before { content: "\f14a"; }
.bi-arrows-collapse::before { content: "\f14b"; }
.bi-arrows-expand::before { content: "\f14c"; }
.bi-arrows-fullscreen::before { content: "\f14d"; }
.bi-arrows-move::before { content: "\f14e"; }
.bi-aspect-ratio-fill::before { content: "\f14f"; }
.bi-aspect-ratio::before { content: "\f150"; }
.bi-asterisk::before { content: "\f151"; }
.bi-at::before { content: "\f152"; }
.bi-award-fill::before { content: "\f153"; }
.bi-award::before { content: "\f154"; }
.bi-back::before { content: "\f155"; }
.bi-backspace-fill::before { content: "\f156"; }
.bi-backspace-reverse-fill::before { content: "\f157"; }
.bi-backspace-reverse::before { content: "\f158"; }
.bi-backspace::before { content: "\f159"; }
.bi-badge-3d-fill::before { content: "\f15a"; }
.bi-badge-3d::before { content: "\f15b"; }
.bi-badge-4k-fill::before { content: "\f15c"; }
.bi-badge-4k::before { content: "\f15d"; }
.bi-badge-8k-fill::before { content: "\f15e"; }
.bi-badge-8k::before { content: "\f15f"; }
.bi-badge-ad-fill::before { content: "\f160"; }
.bi-badge-ad::before { content: "\f161"; }
.bi-badge-ar-fill::before { content: "\f162"; }
.bi-badge-ar::before { content: "\f163"; }
.bi-badge-cc-fill::before { content: "\f164"; }
.bi-badge-cc::before { content: "\f165"; }
.bi-badge-hd-fill::before { content: "\f166"; }
.bi-badge-hd::before { content: "\f167"; }
.bi-badge-tm-fill::before { content: "\f168"; }
.bi-badge-tm::before { content: "\f169"; }
.bi-badge-vo-fill::before { content: "\f16a"; }
.bi-badge-vo::before { content: "\f16b"; }
.bi-badge-vr-fill::before { content: "\f16c"; }
.bi-badge-vr::before { content: "\f16d"; }
.bi-badge-wc-fill::before { content: "\f16e"; }
.bi-badge-wc::before { content: "\f16f"; }
.bi-bag-check-fill::before { content: "\f170"; }
.bi-bag-check::before { content: "\f171"; }
.bi-bag-dash-fill::before { content: "\f172"; }
.bi-bag-dash::before { content: "\f173"; }
.bi-bag-fill::before { content: "\f174"; }
.bi-bag-plus-fill::before { content: "\f175"; }
.bi-bag-plus::before { content: "\f176"; }
.bi-bag-x-fill::before { content: "\f177"; }
.bi-bag-x::before { content: "\f178"; }
.bi-bag::before { content: "\f179"; }
.bi-bar-chart-fill::before { content: "\f17a"; }
.bi-bar-chart-line-fill::before { content: "\f17b"; }
.bi-bar-chart-line::before { content: "\f17c"; }
.bi-bar-chart-steps::before { content: "\f17d"; }
.bi-bar-chart::before { content: "\f17e"; }
.bi-basket-fill::before { content: "\f17f"; }
.bi-basket::before { content: "\f180"; }
.bi-basket2-fill::before { content: "\f181"; }
.bi-basket2::before { content: "\f182"; }
.bi-basket3-fill::before { content: "\f183"; }
.bi-basket3::before { content: "\f184"; }
.bi-battery-charging::before { content: "\f185"; }
.bi-battery-full::before { content: "\f186"; }
.bi-battery-half::before { content: "\f187"; }
.bi-battery::before { content: "\f188"; }
.bi-bell-fill::before { content: "\f189"; }
.bi-bell::before { content: "\f18a"; }
.bi-bezier::before { content: "\f18b"; }
.bi-bezier2::before { content: "\f18c"; }
.bi-bicycle::before { content: "\f18d"; }
.bi-binoculars-fill::before { content: "\f18e"; }
.bi-binoculars::before { content: "\f18f"; }
.bi-blockquote-left::before { content: "\f190"; }
.bi-blockquote-right::before { content: "\f191"; }
.bi-book-fill::before { content: "\f192"; }
.bi-book-half::before { content: "\f193"; }
.bi-book::before { content: "\f194"; }
.bi-bookmark-check-fill::before { content: "\f195"; }
.bi-bookmark-check::before { content: "\f196"; }
.bi-bookmark-dash-fill::before { content: "\f197"; }
.bi-bookmark-dash::before { content: "\f198"; }
.bi-bookmark-fill::before { content: "\f199"; }
.bi-bookmark-heart-fill::before { content: "\f19a"; }
.bi-bookmark-heart::before { content: "\f19b"; }
.bi-bookmark-plus-fill::before { content: "\f19c"; }
.bi-bookmark-plus::before { content: "\f19d"; }
.bi-bookmark-star-fill::before { content: "\f19e"; }
.bi-bookmark-star::before { content: "\f19f"; }
.bi-bookmark-x-fill::before { content: "\f1a0"; }
.bi-bookmark-x::before { content: "\f1a1"; }
.bi-bookmark::before { content: "\f1a2"; }
.bi-bookmarks-fill::before { content: "\f1a3"; }
.bi-bookmarks::before { content: "\f1a4"; }
.bi-bookshelf::before { content: "\f1a5"; }
.bi-bootstrap-fill::before { content: "\f1a6"; }
.bi-bootstrap-reboot::before { content: "\f1a7"; }
.bi-bootstrap::before { content: "\f1a8"; }
.bi-border-all::before { content: "\f1a9"; }
.bi-border-bottom::before { content: "\f1aa"; }
.bi-border-center::before { content: "\f1ab"; }
.bi-border-inner::before { content: "\f1ac"; }
.bi-border-left::before { content: "\f1ad"; }
.bi-border-middle::before { content: "\f1ae"; }
.bi-border-outer::before { content: "\f1af"; }
.bi-border-right::before { content: "\f1b0"; }
.bi-border-style::before { content: "\f1b1"; }
.bi-border-top::before { content: "\f1b2"; }
.bi-border-width::before { content: "\f1b3"; }
.bi-border::before { content: "\f1b4"; }
.bi-bounding-box-circles::before { content: "\f1b5"; }
.bi-bounding-box::before { content: "\f1b6"; }
.bi-box-arrow-down-left::before { content: "\f1b7"; }
.bi-box-arrow-down-right::before { content: "\f1b8"; }
.bi-box-arrow-down::before { content: "\f1b9"; }
.bi-box-arrow-in-down-left::before { content: "\f1ba"; }
.bi-box-arrow-in-down-right::before { content: "\f1bb"; }
.bi-box-arrow-in-down::before { content: "\f1bc"; }
.bi-box-arrow-in-left::before { content: "\f1bd"; }
.bi-box-arrow-in-right::before { content: "\f1be"; }
.bi-box-arrow-in-up-left::before { content: "\f1bf"; }
.bi-box-arrow-in-up-right::before { content: "\f1c0"; }
.bi-box-arrow-in-up::before { content: "\f1c1"; }
.bi-box-arrow-left::before { content: "\f1c2"; }
.bi-box-arrow-right::before { content: "\f1c3"; }
.bi-box-arrow-up-left::before { content: "\f1c4"; }
.bi-box-arrow-up-right::before { content: "\f1c5"; }
.bi-box-arrow-up::before { content: "\f1c6"; }
.bi-box-seam::before { content: "\f1c7"; }
.bi-box::before { content: "\f1c8"; }
.bi-braces::before { content: "\f1c9"; }
.bi-bricks::before { content: "\f1ca"; }
.bi-briefcase-fill::before { content: "\f1cb"; }
.bi-briefcase::before { content: "\f1cc"; }
.bi-brightness-alt-high-fill::before { content: "\f1cd"; }
.bi-brightness-alt-high::before { content: "\f1ce"; }
.bi-brightness-alt-low-fill::before { content: "\f1cf"; }
.bi-brightness-alt-low::before { content: "\f1d0"; }
.bi-brightness-high-fill::before { content: "\f1d1"; }
.bi-brightness-high::before { content: "\f1d2"; }
.bi-brightness-low-fill::before { content: "\f1d3"; }
.bi-brightness-low::before { content: "\f1d4"; }
.bi-broadcast-pin::before { content: "\f1d5"; }
.bi-broadcast::before { content: "\f1d6"; }
.bi-brush-fill::before { content: "\f1d7"; }
.bi-brush::before { content: "\f1d8"; }
.bi-bucket-fill::before { content: "\f1d9"; }
.bi-bucket::before { content: "\f1da"; }
.bi-bug-fill::before { content: "\f1db"; }
.bi-bug::before { content: "\f1dc"; }
.bi-building::before { content: "\f1dd"; }
.bi-bullseye::before { content: "\f1de"; }
.bi-calculator-fill::before { content: "\f1df"; }
.bi-calculator::before { content: "\f1e0"; }
.bi-calendar-check-fill::before { content: "\f1e1"; }
.bi-calendar-check::before { content: "\f1e2"; }
.bi-calendar-date-fill::before { content: "\f1e3"; }
.bi-calendar-date::before { content: "\f1e4"; }
.bi-calendar-day-fill::before { content: "\f1e5"; }
.bi-calendar-day::before { content: "\f1e6"; }
.bi-calendar-event-fill::before { content: "\f1e7"; }
.bi-calendar-event::before { content: "\f1e8"; }
.bi-calendar-fill::before { content: "\f1e9"; }
.bi-calendar-minus-fill::before { content: "\f1ea"; }
.bi-calendar-minus::before { content: "\f1eb"; }
.bi-calendar-month-fill::before { content: "\f1ec"; }
.bi-calendar-month::before { content: "\f1ed"; }
.bi-calendar-plus-fill::before { content: "\f1ee"; }
.bi-calendar-plus::before { content: "\f1ef"; }
.bi-calendar-range-fill::before { content: "\f1f0"; }
.bi-calendar-range::before { content: "\f1f1"; }
.bi-calendar-week-fill::before { content: "\f1f2"; }
.bi-calendar-week::before { content: "\f1f3"; }
.bi-calendar-x-fill::before { content: "\f1f4"; }
.bi-calendar-x::before { content: "\f1f5"; }
.bi-calendar::before { content: "\f1f6"; }
.bi-calendar2-check-fill::before { content: "\f1f7"; }
.bi-calendar2-check::before { content: "\f1f8"; }
.bi-calendar2-date-fill::before { content: "\f1f9"; }
.bi-calendar2-date::before { content: "\f1fa"; }
.bi-calendar2-day-fill::before { content: "\f1fb"; }
.bi-calendar2-day::before { content: "\f1fc"; }
.bi-calendar2-event-fill::before { content: "\f1fd"; }
.bi-calendar2-event::before { content: "\f1fe"; }
.bi-calendar2-fill::before { content: "\f1ff"; }
.bi-calendar2-minus-fill::before { content: "\f200"; }
.bi-calendar2-minus::before { content: "\f201"; }
.bi-calendar2-month-fill::before { content: "\f202"; }
.bi-calendar2-month::before { content: "\f203"; }
.bi-calendar2-plus-fill::before { content: "\f204"; }
.bi-calendar2-plus::before { content: "\f205"; }
.bi-calendar2-range-fill::before { content: "\f206"; }
.bi-calendar2-range::before { content: "\f207"; }
.bi-calendar2-week-fill::before { content: "\f208"; }
.bi-calendar2-week::before { content: "\f209"; }
.bi-calendar2-x-fill::before { content: "\f20a"; }
.bi-calendar2-x::before { content: "\f20b"; }
.bi-calendar2::before { content: "\f20c"; }
.bi-calendar3-event-fill::before { content: "\f20d"; }
.bi-calendar3-event::before { content: "\f20e"; }
.bi-calendar3-fill::before { content: "\f20f"; }
.bi-calendar3-range-fill::before { content: "\f210"; }
.bi-calendar3-range::before { content: "\f211"; }
.bi-calendar3-week-fill::before { content: "\f212"; }
.bi-calendar3-week::before { content: "\f213"; }
.bi-calendar3::before { content: "\f214"; }
.bi-calendar4-event::before { content: "\f215"; }
.bi-calendar4-range::before { content: "\f216"; }
.bi-calendar4-week::before { content: "\f217"; }
.bi-calendar4::before { content: "\f218"; }
.bi-camera-fill::before { content: "\f219"; }
.bi-camera-reels-fill::before { content: "\f21a"; }
.bi-camera-reels::before { content: "\f21b"; }
.bi-camera-video-fill::before { content: "\f21c"; }
.bi-camera-video-off-fill::before { content: "\f21d"; }
.bi-camera-video-off::before { content: "\f21e"; }
.bi-camera-video::before { content: "\f21f"; }
.bi-camera::before { content: "\f220"; }
.bi-camera2::before { content: "\f221"; }
.bi-capslock-fill::before { content: "\f222"; }
.bi-capslock::before { content: "\f223"; }
.bi-card-checklist::before { content: "\f224"; }
.bi-card-heading::before { content: "\f225"; }
.bi-card-image::before { content: "\f226"; }
.bi-card-list::before { content: "\f227"; }
.bi-card-text::before { content: "\f228"; }
.bi-caret-down-fill::before { content: "\f229"; }
.bi-caret-down-square-fill::before { content: "\f22a"; }
.bi-caret-down-square::before { content: "\f22b"; }
.bi-caret-down::before { content: "\f22c"; }
.bi-caret-left-fill::before { content: "\f22d"; }
.bi-caret-left-square-fill::before { content: "\f22e"; }
.bi-caret-left-square::before { content: "\f22f"; }
.bi-caret-left::before { content: "\f230"; }
.bi-caret-right-fill::before { content: "\f231"; }
.bi-caret-right-square-fill::before { content: "\f232"; }
.bi-caret-right-square::before { content: "\f233"; }
.bi-caret-right::before { content: "\f234"; }
.bi-caret-up-fill::before { content: "\f235"; }
.bi-caret-up-square-fill::before { content: "\f236"; }
.bi-caret-up-square::before { content: "\f237"; }
.bi-caret-up::before { content: "\f238"; }
.bi-cart-check-fill::before { content: "\f239"; }
.bi-cart-check::before { content: "\f23a"; }
.bi-cart-dash-fill::before { content: "\f23b"; }
.bi-cart-dash::before { content: "\f23c"; }
.bi-cart-fill::before { content: "\f23d"; }
.bi-cart-plus-fill::before { content: "\f23e"; }
.bi-cart-plus::before { content: "\f23f"; }
.bi-cart-x-fill::before { content: "\f240"; }
.bi-cart-x::before { content: "\f241"; }
.bi-cart::before { content: "\f242"; }
.bi-cart2::before { content: "\f243"; }
.bi-cart3::before { content: "\f244"; }
.bi-cart4::before { content: "\f245"; }
.bi-cash-stack::before { content: "\f246"; }
.bi-cash::before { content: "\f247"; }
.bi-cast::before { content: "\f248"; }
.bi-chat-dots-fill::before { content: "\f249"; }
.bi-chat-dots::before { content: "\f24a"; }
.bi-chat-fill::before { content: "\f24b"; }
.bi-chat-left-dots-fill::before { content: "\f24c"; }
.bi-chat-left-dots::before { content: "\f24d"; }
.bi-chat-left-fill::before { content: "\f24e"; }
.bi-chat-left-quote-fill::before { content: "\f24f"; }
.bi-chat-left-quote::before { content: "\f250"; }
.bi-chat-left-text-fill::before { content: "\f251"; }
.bi-chat-left-text::before { content: "\f252"; }
.bi-chat-left::before { content: "\f253"; }
.bi-chat-quote-fill::before { content: "\f254"; }
.bi-chat-quote::before { content: "\f255"; }
.bi-chat-right-dots-fill::before { content: "\f256"; }
.bi-chat-right-dots::before { content: "\f257"; }
.bi-chat-right-fill::before { content: "\f258"; }
.bi-chat-right-quote-fill::before { content: "\f259"; }
.bi-chat-right-quote::before { content: "\f25a"; }
.bi-chat-right-text-fill::before { content: "\f25b"; }
.bi-chat-right-text::before { content: "\f25c"; }
.bi-chat-right::before { content: "\f25d"; }
.bi-chat-square-dots-fill::before { content: "\f25e"; }
.bi-chat-square-dots::before { content: "\f25f"; }
.bi-chat-square-fill::before { content: "\f260"; }
.bi-chat-square-quote-fill::before { content: "\f261"; }
.bi-chat-square-quote::before { content: "\f262"; }
.bi-chat-square-text-fill::before { content: "\f263"; }
.bi-chat-square-text::before { content: "\f264"; }
.bi-chat-square::before { content: "\f265"; }
.bi-chat-text-fill::before { content: "\f266"; }
.bi-chat-text::before { content: "\f267"; }
.bi-chat::before { content: "\f268"; }
.bi-check-all::before { content: "\f269"; }
.bi-check-circle-fill::before { content: "\f26a"; }
.bi-check-circle::before { content: "\f26b"; }
.bi-check-square-fill::before { content: "\f26c"; }
.bi-check-square::before { content: "\f26d"; }
.bi-check::before { content: "\f26e"; }
.bi-check2-all::before { content: "\f26f"; }
.bi-check2-circle::before { content: "\f270"; }
.bi-check2-square::before { content: "\f271"; }
.bi-check2::before { content: "\f272"; }
.bi-chevron-bar-contract::before { content: "\f273"; }
.bi-chevron-bar-down::before { content: "\f274"; }
.bi-chevron-bar-expand::before { content: "\f275"; }
.bi-chevron-bar-left::before { content: "\f276"; }
.bi-chevron-bar-right::before { content: "\f277"; }
.bi-chevron-bar-up::before { content: "\f278"; }
.bi-chevron-compact-down::before { content: "\f279"; }
.bi-chevron-compact-left::before { content: "\f27a"; }
.bi-chevron-compact-right::before { content: "\f27b"; }
.bi-chevron-compact-up::before { content: "\f27c"; }
.bi-chevron-contract::before { content: "\f27d"; }
.bi-chevron-double-down::before { content: "\f27e"; }
.bi-chevron-double-left::before { content: "\f27f"; }
.bi-chevron-double-right::before { content: "\f280"; }
.bi-chevron-double-up::before { content: "\f281"; }
.bi-chevron-down::before { content: "\f282"; }
.bi-chevron-expand::before { content: "\f283"; }
.bi-chevron-left::before { content: "\f284"; }
.bi-chevron-right::before { content: "\f285"; }
.bi-chevron-up::before { content: "\f286"; }
.bi-circle-fill::before { content: "\f287"; }
.bi-circle-half::before { content: "\f288"; }
.bi-circle-square::before { content: "\f289"; }
.bi-circle::before { content: "\f28a"; }
.bi-clipboard-check::before { content: "\f28b"; }
.bi-clipboard-data::before { content: "\f28c"; }
.bi-clipboard-minus::before { content: "\f28d"; }
.bi-clipboard-plus::before { content: "\f28e"; }
.bi-clipboard-x::before { content: "\f28f"; }
.bi-clipboard::before { content: "\f290"; }
.bi-clock-fill::before { content: "\f291"; }
.bi-clock-history::before { content: "\f292"; }
.bi-clock::before { content: "\f293"; }
.bi-cloud-arrow-down-fill::before { content: "\f294"; }
.bi-cloud-arrow-down::before { content: "\f295"; }
.bi-cloud-arrow-up-fill::before { content: "\f296"; }
.bi-cloud-arrow-up::before { content: "\f297"; }
.bi-cloud-check-fill::before { content: "\f298"; }
.bi-cloud-check::before { content: "\f299"; }
.bi-cloud-download-fill::before { content: "\f29a"; }
.bi-cloud-download::before { content: "\f29b"; }
.bi-cloud-drizzle-fill::before { content: "\f29c"; }
.bi-cloud-drizzle::before { content: "\f29d"; }
.bi-cloud-fill::before { content: "\f29e"; }
.bi-cloud-fog-fill::before { content: "\f29f"; }
.bi-cloud-fog::before { content: "\f2a0"; }
.bi-cloud-fog2-fill::before { content: "\f2a1"; }
.bi-cloud-fog2::before { content: "\f2a2"; }
.bi-cloud-hail-fill::before { content: "\f2a3"; }
.bi-cloud-hail::before { content: "\f2a4"; }
.bi-cloud-haze-1::before { content: "\f2a5"; }
.bi-cloud-haze-fill::before { content: "\f2a6"; }
.bi-cloud-haze::before { content: "\f2a7"; }
.bi-cloud-haze2-fill::before { content: "\f2a8"; }
.bi-cloud-lightning-fill::before { content: "\f2a9"; }
.bi-cloud-lightning-rain-fill::before { content: "\f2aa"; }
.bi-cloud-lightning-rain::before { content: "\f2ab"; }
.bi-cloud-lightning::before { content: "\f2ac"; }
.bi-cloud-minus-fill::before { content: "\f2ad"; }
.bi-cloud-minus::before { content: "\f2ae"; }
.bi-cloud-moon-fill::before { content: "\f2af"; }
.bi-cloud-moon::before { content: "\f2b0"; }
.bi-cloud-plus-fill::before { content: "\f2b1"; }
.bi-cloud-plus::before { content: "\f2b2"; }
.bi-cloud-rain-fill::before { content: "\f2b3"; }
.bi-cloud-rain-heavy-fill::before { content: "\f2b4"; }
.bi-cloud-rain-heavy::before { content: "\f2b5"; }
.bi-cloud-rain::before { content: "\f2b6"; }
.bi-cloud-slash-fill::before { content: "\f2b7"; }
.bi-cloud-slash::before { content: "\f2b8"; }
.bi-cloud-sleet-fill::before { content: "\f2b9"; }
.bi-cloud-sleet::before { content: "\f2ba"; }
.bi-cloud-snow-fill::before { content: "\f2bb"; }
.bi-cloud-snow::before { content: "\f2bc"; }
.bi-cloud-sun-fill::before { content: "\f2bd"; }
.bi-cloud-sun::before { content: "\f2be"; }
.bi-cloud-upload-fill::before { content: "\f2bf"; }
.bi-cloud-upload::before { content: "\f2c0"; }
.bi-cloud::before { content: "\f2c1"; }
.bi-clouds-fill::before { content: "\f2c2"; }
.bi-clouds::before { content: "\f2c3"; }
.bi-cloudy-fill::before { content: "\f2c4"; }
.bi-cloudy::before { content: "\f2c5"; }
.bi-code-slash::before { content: "\f2c6"; }
.bi-code-square::before { content: "\f2c7"; }
.bi-code::before { content: "\f2c8"; }
.bi-collection-fill::before { content: "\f2c9"; }
.bi-collection-play-fill::before { content: "\f2ca"; }
.bi-collection-play::before { content: "\f2cb"; }
.bi-collection::before { content: "\f2cc"; }
.bi-columns-gap::before { content: "\f2cd"; }
.bi-columns::before { content: "\f2ce"; }
.bi-command::before { content: "\f2cf"; }
.bi-compass-fill::before { content: "\f2d0"; }
.bi-compass::before { content: "\f2d1"; }
.bi-cone-striped::before { content: "\f2d2"; }
.bi-cone::before { content: "\f2d3"; }
.bi-controller::before { content: "\f2d4"; }
.bi-cpu-fill::before { content: "\f2d5"; }
.bi-cpu::before { content: "\f2d6"; }
.bi-credit-card-2-back-fill::before { content: "\f2d7"; }
.bi-credit-card-2-back::before { content: "\f2d8"; }
.bi-credit-card-2-front-fill::before { content: "\f2d9"; }
.bi-credit-card-2-front::before { content: "\f2da"; }
.bi-credit-card-fill::before { content: "\f2db"; }
.bi-credit-card::before { content: "\f2dc"; }
.bi-crop::before { content: "\f2dd"; }
.bi-cup-fill::before { content: "\f2de"; }
.bi-cup-straw::before { content: "\f2df"; }
.bi-cup::before { content: "\f2e0"; }
.bi-cursor-fill::before { content: "\f2e1"; }
.bi-cursor-text::before { content: "\f2e2"; }
.bi-cursor::before { content: "\f2e3"; }
.bi-dash-circle-dotted::before { content: "\f2e4"; }
.bi-dash-circle-fill::before { content: "\f2e5"; }
.bi-dash-circle::before { content: "\f2e6"; }
.bi-dash-square-dotted::before { content: "\f2e7"; }
.bi-dash-square-fill::before { content: "\f2e8"; }
.bi-dash-square::before { content: "\f2e9"; }
.bi-dash::before { content: "\f2ea"; }
.bi-diagram-2-fill::before { content: "\f2eb"; }
.bi-diagram-2::before { content: "\f2ec"; }
.bi-diagram-3-fill::before { content: "\f2ed"; }
.bi-diagram-3::before { content: "\f2ee"; }
.bi-diamond-fill::before { content: "\f2ef"; }
.bi-diamond-half::before { content: "\f2f0"; }
.bi-diamond::before { content: "\f2f1"; }
.bi-dice-1-fill::before { content: "\f2f2"; }
.bi-dice-1::before { content: "\f2f3"; }
.bi-dice-2-fill::before { content: "\f2f4"; }
.bi-dice-2::before { content: "\f2f5"; }
.bi-dice-3-fill::before { content: "\f2f6"; }
.bi-dice-3::before { content: "\f2f7"; }
.bi-dice-4-fill::before { content: "\f2f8"; }
.bi-dice-4::before { content: "\f2f9"; }
.bi-dice-5-fill::before { content: "\f2fa"; }
.bi-dice-5::before { content: "\f2fb"; }
.bi-dice-6-fill::before { content: "\f2fc"; }
.bi-dice-6::before { content: "\f2fd"; }
.bi-disc-fill::before { content: "\f2fe"; }
.bi-disc::before { content: "\f2ff"; }
.bi-discord::before { content: "\f300"; }
.bi-display-fill::before { content: "\f301"; }
.bi-display::before { content: "\f302"; }
.bi-distribute-horizontal::before { content: "\f303"; }
.bi-distribute-vertical::before { content: "\f304"; }
.bi-door-closed-fill::before { content: "\f305"; }
.bi-door-closed::before { content: "\f306"; }
.bi-door-open-fill::before { content: "\f307"; }
.bi-door-open::before { content: "\f308"; }
.bi-dot::before { content: "\f309"; }
.bi-download::before { content: "\f30a"; }
.bi-droplet-fill::before { content: "\f30b"; }
.bi-droplet-half::before { content: "\f30c"; }
.bi-droplet::before { content: "\f30d"; }
.bi-earbuds::before { content: "\f30e"; }
.bi-easel-fill::before { content: "\f30f"; }
.bi-easel::before { content: "\f310"; }
.bi-egg-fill::before { content: "\f311"; }
.bi-egg-fried::before { content: "\f312"; }
.bi-egg::before { content: "\f313"; }
.bi-eject-fill::before { content: "\f314"; }
.bi-eject::before { content: "\f315"; }
.bi-emoji-angry-fill::before { content: "\f316"; }
.bi-emoji-angry::before { content: "\f317"; }
.bi-emoji-dizzy-fill::before { content: "\f318"; }
.bi-emoji-dizzy::before { content: "\f319"; }
.bi-emoji-expressionless-fill::before { content: "\f31a"; }
.bi-emoji-expressionless::before { content: "\f31b"; }
.bi-emoji-frown-fill::before { content: "\f31c"; }
.bi-emoji-frown::before { content: "\f31d"; }
.bi-emoji-heart-eyes-fill::before { content: "\f31e"; }
.bi-emoji-heart-eyes::before { content: "\f31f"; }
.bi-emoji-laughing-fill::before { content: "\f320"; }
.bi-emoji-laughing::before { content: "\f321"; }
.bi-emoji-neutral-fill::before { content: "\f322"; }
.bi-emoji-neutral::before { content: "\f323"; }
.bi-emoji-smile-fill::before { content: "\f324"; }
.bi-emoji-smile-upside-down-fill::before { content: "\f325"; }
.bi-emoji-smile-upside-down::before { content: "\f326"; }
.bi-emoji-smile::before { content: "\f327"; }
.bi-emoji-sunglasses-fill::before { content: "\f328"; }
.bi-emoji-sunglasses::before { content: "\f329"; }
.bi-emoji-wink-fill::before { content: "\f32a"; }
.bi-emoji-wink::before { content: "\f32b"; }
.bi-envelope-fill::before { content: "\f32c"; }
.bi-envelope-open-fill::before { content: "\f32d"; }
.bi-envelope-open::before { content: "\f32e"; }
.bi-envelope::before { content: "\f32f"; }
.bi-eraser-fill::before { content: "\f330"; }
.bi-eraser::before { content: "\f331"; }
.bi-exclamation-circle-fill::before { content: "\f332"; }
.bi-exclamation-circle::before { content: "\f333"; }
.bi-exclamation-diamond-fill::before { content: "\f334"; }
.bi-exclamation-diamond::before { content: "\f335"; }
.bi-exclamation-octagon-fill::before { content: "\f336"; }
.bi-exclamation-octagon::before { content: "\f337"; }
.bi-exclamation-square-fill::before { content: "\f338"; }
.bi-exclamation-square::before { content: "\f339"; }
.bi-exclamation-triangle-fill::before { content: "\f33a"; }
.bi-exclamation-triangle::before { content: "\f33b"; }
.bi-exclamation::before { content: "\f33c"; }
.bi-exclude::before { content: "\f33d"; }
.bi-eye-fill::before { content: "\f33e"; }
.bi-eye-slash-fill::before { content: "\f33f"; }
.bi-eye-slash::before { content: "\f340"; }
.bi-eye::before { content: "\f341"; }
.bi-eyedropper::before { content: "\f342"; }
.bi-eyeglasses::before { content: "\f343"; }
.bi-facebook::before { content: "\f344"; }
.bi-file-arrow-down-fill::before { content: "\f345"; }
.bi-file-arrow-down::before { content: "\f346"; }
.bi-file-arrow-up-fill::before { content: "\f347"; }
.bi-file-arrow-up::before { content: "\f348"; }
.bi-file-bar-graph-fill::before { content: "\f349"; }
.bi-file-bar-graph::before { content: "\f34a"; }
.bi-file-binary-fill::before { content: "\f34b"; }
.bi-file-binary::before { content: "\f34c"; }
.bi-file-break-fill::before { content: "\f34d"; }
.bi-file-break::before { content: "\f34e"; }
.bi-file-check-fill::before { content: "\f34f"; }
.bi-file-check::before { content: "\f350"; }
.bi-file-code-fill::before { content: "\f351"; }
.bi-file-code::before { content: "\f352"; }
.bi-file-diff-fill::before { content: "\f353"; }
.bi-file-diff::before { content: "\f354"; }
.bi-file-earmark-arrow-down-fill::before { content: "\f355"; }
.bi-file-earmark-arrow-down::before { content: "\f356"; }
.bi-file-earmark-arrow-up-fill::before { content: "\f357"; }
.bi-file-earmark-arrow-up::before { content: "\f358"; }
.bi-file-earmark-bar-graph-fill::before { content: "\f359"; }
.bi-file-earmark-bar-graph::before { content: "\f35a"; }
.bi-file-earmark-binary-fill::before { content: "\f35b"; }
.bi-file-earmark-binary::before { content: "\f35c"; }
.bi-file-earmark-break-fill::before { content: "\f35d"; }
.bi-file-earmark-break::before { content: "\f35e"; }
.bi-file-earmark-check-fill::before { content: "\f35f"; }
.bi-file-earmark-check::before { content: "\f360"; }
.bi-file-earmark-code-fill::before { content: "\f361"; }
.bi-file-earmark-code::before { content: "\f362"; }
.bi-file-earmark-diff-fill::before { content: "\f363"; }
.bi-file-earmark-diff::before { content: "\f364"; }
.bi-file-earmark-easel-fill::before { content: "\f365"; }
.bi-file-earmark-easel::before { content: "\f366"; }
.bi-file-earmark-excel-fill::before { content: "\f367"; }
.bi-file-earmark-excel::before { content: "\f368"; }
.bi-file-earmark-fill::before { content: "\f369"; }
.bi-file-earmark-font-fill::before { content: "\f36a"; }
.bi-file-earmark-font::before { content: "\f36b"; }
.bi-file-earmark-image-fill::before { content: "\f36c"; }
.bi-file-earmark-image::before { content: "\f36d"; }
.bi-file-earmark-lock-fill::before { content: "\f36e"; }
.bi-file-earmark-lock::before { content: "\f36f"; }
.bi-file-earmark-lock2-fill::before { content: "\f370"; }
.bi-file-earmark-lock2::before { content: "\f371"; }
.bi-file-earmark-medical-fill::before { content: "\f372"; }
.bi-file-earmark-medical::before { content: "\f373"; }
.bi-file-earmark-minus-fill::before { content: "\f374"; }
.bi-file-earmark-minus::before { content: "\f375"; }
.bi-file-earmark-music-fill::before { content: "\f376"; }
.bi-file-earmark-music::before { content: "\f377"; }
.bi-file-earmark-person-fill::before { content: "\f378"; }
.bi-file-earmark-person::before { content: "\f379"; }
.bi-file-earmark-play-fill::before { content: "\f37a"; }
.bi-file-earmark-play::before { content: "\f37b"; }
.bi-file-earmark-plus-fill::before { content: "\f37c"; }
.bi-file-earmark-plus::before { content: "\f37d"; }
.bi-file-earmark-post-fill::before { content: "\f37e"; }
.bi-file-earmark-post::before { content: "\f37f"; }
.bi-file-earmark-ppt-fill::before { content: "\f380"; }
.bi-file-earmark-ppt::before { content: "\f381"; }
.bi-file-earmark-richtext-fill::before { content: "\f382"; }
.bi-file-earmark-richtext::before { content: "\f383"; }
.bi-file-earmark-ruled-fill::before { content: "\f384"; }
.bi-file-earmark-ruled::before { content: "\f385"; }
.bi-file-earmark-slides-fill::before { content: "\f386"; }
.bi-file-earmark-slides::before { content: "\f387"; }
.bi-file-earmark-spreadsheet-fill::before { content: "\f388"; }
.bi-file-earmark-spreadsheet::before { content: "\f389"; }
.bi-file-earmark-text-fill::before { content: "\f38a"; }
.bi-file-earmark-text::before { content: "\f38b"; }
.bi-file-earmark-word-fill::before { content: "\f38c"; }
.bi-file-earmark-word::before { content: "\f38d"; }
.bi-file-earmark-x-fill::before { content: "\f38e"; }
.bi-file-earmark-x::before { content: "\f38f"; }
.bi-file-earmark-zip-fill::before { content: "\f390"; }
.bi-file-earmark-zip::before { content: "\f391"; }
.bi-file-earmark::before { content: "\f392"; }
.bi-file-easel-fill::before { content: "\f393"; }
.bi-file-easel::before { content: "\f394"; }
.bi-file-excel-fill::before { content: "\f395"; }
.bi-file-excel::before { content: "\f396"; }
.bi-file-fill::before { content: "\f397"; }
.bi-file-font-fill::before { content: "\f398"; }
.bi-file-font::before { content: "\f399"; }
.bi-file-image-fill::before { content: "\f39a"; }
.bi-file-image::before { content: "\f39b"; }
.bi-file-lock-fill::before { content: "\f39c"; }
.bi-file-lock::before { content: "\f39d"; }
.bi-file-lock2-fill::before { content: "\f39e"; }
.bi-file-lock2::before { content: "\f39f"; }
.bi-file-medical-fill::before { content: "\f3a0"; }
.bi-file-medical::before { content: "\f3a1"; }
.bi-file-minus-fill::before { content: "\f3a2"; }
.bi-file-minus::before { content: "\f3a3"; }
.bi-file-music-fill::before { content: "\f3a4"; }
.bi-file-music::before { content: "\f3a5"; }
.bi-file-person-fill::before { content: "\f3a6"; }
.bi-file-person::before { content: "\f3a7"; }
.bi-file-play-fill::before { content: "\f3a8"; }
.bi-file-play::before { content: "\f3a9"; }
.bi-file-plus-fill::before { content: "\f3aa"; }
.bi-file-plus::before { content: "\f3ab"; }
.bi-file-post-fill::before { content: "\f3ac"; }
.bi-file-post::before { content: "\f3ad"; }
.bi-file-ppt-fill::before { content: "\f3ae"; }
.bi-file-ppt::before { content: "\f3af"; }
.bi-file-richtext-fill::before { content: "\f3b0"; }
.bi-file-richtext::before { content: "\f3b1"; }
.bi-file-ruled-fill::before { content: "\f3b2"; }
.bi-file-ruled::before { content: "\f3b3"; }
.bi-file-slides-fill::before { content: "\f3b4"; }
.bi-file-slides::before { content: "\f3b5"; }
.bi-file-spreadsheet-fill::before { content: "\f3b6"; }
.bi-file-spreadsheet::before { content: "\f3b7"; }
.bi-file-text-fill::before { content: "\f3b8"; }
.bi-file-text::before { content: "\f3b9"; }
.bi-file-word-fill::before { content: "\f3ba"; }
.bi-file-word::before { content: "\f3bb"; }
.bi-file-x-fill::before { content: "\f3bc"; }
.bi-file-x::before { content: "\f3bd"; }
.bi-file-zip-fill::before { content: "\f3be"; }
.bi-file-zip::before { content: "\f3bf"; }
.bi-file::before { content: "\f3c0"; }
.bi-files-alt::before { content: "\f3c1"; }
.bi-files::before { content: "\f3c2"; }
.bi-film::before { content: "\f3c3"; }
.bi-filter-circle-fill::before { content: "\f3c4"; }
.bi-filter-circle::before { content: "\f3c5"; }
.bi-filter-left::before { content: "\f3c6"; }
.bi-filter-right::before { content: "\f3c7"; }
.bi-filter-square-fill::before { content: "\f3c8"; }
.bi-filter-square::before { content: "\f3c9"; }
.bi-filter::before { content: "\f3ca"; }
.bi-flag-fill::before { content: "\f3cb"; }
.bi-flag::before { content: "\f3cc"; }
.bi-flower1::before { content: "\f3cd"; }
.bi-flower2::before { content: "\f3ce"; }
.bi-flower3::before { content: "\f3cf"; }
.bi-folder-check::before { content: "\f3d0"; }
.bi-folder-fill::before { content: "\f3d1"; }
.bi-folder-minus::before { content: "\f3d2"; }
.bi-folder-plus::before { content: "\f3d3"; }
.bi-folder-symlink-fill::before { content: "\f3d4"; }
.bi-folder-symlink::before { content: "\f3d5"; }
.bi-folder-x::before { content: "\f3d6"; }
.bi-folder::before { content: "\f3d7"; }
.bi-folder2-open::before { content: "\f3d8"; }
.bi-folder2::before { content: "\f3d9"; }
.bi-fonts::before { content: "\f3da"; }
.bi-forward-fill::before { content: "\f3db"; }
.bi-forward::before { content: "\f3dc"; }
.bi-front::before { content: "\f3dd"; }
.bi-fullscreen-exit::before { content: "\f3de"; }
.bi-fullscreen::before { content: "\f3df"; }
.bi-funnel-fill::before { content: "\f3e0"; }
.bi-funnel::before { content: "\f3e1"; }
.bi-gear-fill::before { content: "\f3e2"; }
.bi-gear-wide-connected::before { content: "\f3e3"; }
.bi-gear-wide::before { content: "\f3e4"; }
.bi-gear::before { content: "\f3e5"; }
.bi-gem::before { content: "\f3e6"; }
.bi-geo-alt-fill::before { content: "\f3e7"; }
.bi-geo-alt::before { content: "\f3e8"; }
.bi-geo-fill::before { content: "\f3e9"; }
.bi-geo::before { content: "\f3ea"; }
.bi-gift-fill::before { content: "\f3eb"; }
.bi-gift::before { content: "\f3ec"; }
.bi-github::before { content: "\f3ed"; }
.bi-globe::before { content: "\f3ee"; }
.bi-globe2::before { content: "\f3ef"; }
.bi-google::before { content: "\f3f0"; }
.bi-graph-down::before { content: "\f3f1"; }
.bi-graph-up::before { content: "\f3f2"; }
.bi-grid-1x2-fill::before { content: "\f3f3"; }
.bi-grid-1x2::before { content: "\f3f4"; }
.bi-grid-3x2-gap-fill::before { content: "\f3f5"; }
.bi-grid-3x2-gap::before { content: "\f3f6"; }
.bi-grid-3x2::before { content: "\f3f7"; }
.bi-grid-3x3-gap-fill::before { content: "\f3f8"; }
.bi-grid-3x3-gap::before { content: "\f3f9"; }
.bi-grid-3x3::before { content: "\f3fa"; }
.bi-grid-fill::before { content: "\f3fb"; }
.bi-grid::before { content: "\f3fc"; }
.bi-grip-horizontal::before { content: "\f3fd"; }
.bi-grip-vertical::before { content: "\f3fe"; }
.bi-hammer::before { content: "\f3ff"; }
.bi-hand-index-fill::before { content: "\f400"; }
.bi-hand-index-thumb-fill::before { content: "\f401"; }
.bi-hand-index-thumb::before { content: "\f402"; }
.bi-hand-index::before { content: "\f403"; }
.bi-hand-thumbs-down-fill::before { content: "\f404"; }
.bi-hand-thumbs-down::before { content: "\f405"; }
.bi-hand-thumbs-up-fill::before { content: "\f406"; }
.bi-hand-thumbs-up::before { content: "\f407"; }
.bi-handbag-fill::before { content: "\f408"; }
.bi-handbag::before { content: "\f409"; }
.bi-hash::before { content: "\f40a"; }
.bi-hdd-fill::before { content: "\f40b"; }
.bi-hdd-network-fill::before { content: "\f40c"; }
.bi-hdd-network::before { content: "\f40d"; }
.bi-hdd-rack-fill::before { content: "\f40e"; }
.bi-hdd-rack::before { content: "\f40f"; }
.bi-hdd-stack-fill::before { content: "\f410"; }
.bi-hdd-stack::before { content: "\f411"; }
.bi-hdd::before { content: "\f412"; }
.bi-headphones::before { content: "\f413"; }
.bi-headset::before { content: "\f414"; }
.bi-heart-fill::before { content: "\f415"; }
.bi-heart-half::before { content: "\f416"; }
.bi-heart::before { content: "\f417"; }
.bi-heptagon-fill::before { content: "\f418"; }
.bi-heptagon-half::before { content: "\f419"; }
.bi-heptagon::before { content: "\f41a"; }
.bi-hexagon-fill::before { content: "\f41b"; }
.bi-hexagon-half::before { content: "\f41c"; }
.bi-hexagon::before { content: "\f41d"; }
.bi-hourglass-bottom::before { content: "\f41e"; }
.bi-hourglass-split::before { content: "\f41f"; }
.bi-hourglass-top::before { content: "\f420"; }
.bi-hourglass::before { content: "\f421"; }
.bi-house-door-fill::before { content: "\f422"; }
.bi-house-door::before { content: "\f423"; }
.bi-house-fill::before { content: "\f424"; }
.bi-house::before { content: "\f425"; }
.bi-hr::before { content: "\f426"; }
.bi-hurricane::before { content: "\f427"; }
.bi-image-alt::before { content: "\f428"; }
.bi-image-fill::before { content: "\f429"; }
.bi-image::before { content: "\f42a"; }
.bi-images::before { content: "\f42b"; }
.bi-inbox-fill::before { content: "\f42c"; }
.bi-inbox::before { content: "\f42d"; }
.bi-inboxes-fill::before { content: "\f42e"; }
.bi-inboxes::before { content: "\f42f"; }
.bi-info-circle-fill::before { content: "\f430"; }
.bi-info-circle::before { content: "\f431"; }
.bi-info-square-fill::before { content: "\f432"; }
.bi-info-square::before { content: "\f433"; }
.bi-info::before { content: "\f434"; }
.bi-input-cursor-text::before { content: "\f435"; }
.bi-input-cursor::before { content: "\f436"; }
.bi-instagram::before { content: "\f437"; }
.bi-intersect::before { content: "\f438"; }
.bi-journal-album::before { content: "\f439"; }
.bi-journal-arrow-down::before { content: "\f43a"; }
.bi-journal-arrow-up::before { content: "\f43b"; }
.bi-journal-bookmark-fill::before { content: "\f43c"; }
.bi-journal-bookmark::before { content: "\f43d"; }
.bi-journal-check::before { content: "\f43e"; }
.bi-journal-code::before { content: "\f43f"; }
.bi-journal-medical::before { content: "\f440"; }
.bi-journal-minus::before { content: "\f441"; }
.bi-journal-plus::before { content: "\f442"; }
.bi-journal-richtext::before { content: "\f443"; }
.bi-journal-text::before { content: "\f444"; }
.bi-journal-x::before { content: "\f445"; }
.bi-journal::before { content: "\f446"; }
.bi-journals::before { content: "\f447"; }
.bi-joystick::before { content: "\f448"; }
.bi-justify-left::before { content: "\f449"; }
.bi-justify-right::before { content: "\f44a"; }
.bi-justify::before { content: "\f44b"; }
.bi-kanban-fill::before { content: "\f44c"; }
.bi-kanban::before { content: "\f44d"; }
.bi-key-fill::before { content: "\f44e"; }
.bi-key::before { content: "\f44f"; }
.bi-keyboard-fill::before { content: "\f450"; }
.bi-keyboard::before { content: "\f451"; }
.bi-ladder::before { content: "\f452"; }
.bi-lamp-fill::before { content: "\f453"; }
.bi-lamp::before { content: "\f454"; }
.bi-laptop-fill::before { content: "\f455"; }
.bi-laptop::before { content: "\f456"; }
.bi-layer-backward::before { content: "\f457"; }
.bi-layer-forward::before { content: "\f458"; }
.bi-layers-fill::before { content: "\f459"; }
.bi-layers-half::before { content: "\f45a"; }
.bi-layers::before { content: "\f45b"; }
.bi-layout-sidebar-inset-reverse::before { content: "\f45c"; }
.bi-layout-sidebar-inset::before { content: "\f45d"; }
.bi-layout-sidebar-reverse::before { content: "\f45e"; }
.bi-layout-sidebar::before { content: "\f45f"; }
.bi-layout-split::before { content: "\f460"; }
.bi-layout-text-sidebar-reverse::before { content: "\f461"; }
.bi-layout-text-sidebar::before { content: "\f462"; }
.bi-layout-text-window-reverse::before { content: "\f463"; }
.bi-layout-text-window::before { content: "\f464"; }
.bi-layout-three-columns::before { content: "\f465"; }
.bi-layout-wtf::before { content: "\f466"; }
.bi-life-preserver::before { content: "\f467"; }
.bi-lightbulb-fill::before { content: "\f468"; }
.bi-lightbulb-off-fill::before { content: "\f469"; }
.bi-lightbulb-off::before { content: "\f46a"; }
.bi-lightbulb::before { content: "\f46b"; }
.bi-lightning-charge-fill::before { content: "\f46c"; }
.bi-lightning-charge::before { content: "\f46d"; }
.bi-lightning-fill::before { content: "\f46e"; }
.bi-lightning::before { content: "\f46f"; }
.bi-link-45deg::before { content: "\f470"; }
.bi-link::before { content: "\f471"; }
.bi-linkedin::before { content: "\f472"; }
.bi-list-check::before { content: "\f473"; }
.bi-list-nested::before { content: "\f474"; }
.bi-list-ol::before { content: "\f475"; }
.bi-list-stars::before { content: "\f476"; }
.bi-list-task::before { content: "\f477"; }
.bi-list-ul::before { content: "\f478"; }
.bi-list::before { content: "\f479"; }
.bi-lock-fill::before { content: "\f47a"; }
.bi-lock::before { content: "\f47b"; }
.bi-mailbox::before { content: "\f47c"; }
.bi-mailbox2::before { content: "\f47d"; }
.bi-map-fill::before { content: "\f47e"; }
.bi-map::before { content: "\f47f"; }
.bi-markdown-fill::before { content: "\f480"; }
.bi-markdown::before { content: "\f481"; }
.bi-mask::before { content: "\f482"; }
.bi-megaphone-fill::before { content: "\f483"; }
.bi-megaphone::before { content: "\f484"; }
.bi-menu-app-fill::before { content: "\f485"; }
.bi-menu-app::before { content: "\f486"; }
.bi-menu-button-fill::before { content: "\f487"; }
.bi-menu-button-wide-fill::before { content: "\f488"; }
.bi-menu-button-wide::before { content: "\f489"; }
.bi-menu-button::before { content: "\f48a"; }
.bi-menu-down::before { content: "\f48b"; }
.bi-menu-up::before { content: "\f48c"; }
.bi-mic-fill::before { content: "\f48d"; }
.bi-mic-mute-fill::before { content: "\f48e"; }
.bi-mic-mute::before { content: "\f48f"; }
.bi-mic::before { content: "\f490"; }
.bi-minecart-loaded::before { content: "\f491"; }
.bi-minecart::before { content: "\f492"; }
.bi-moisture::before { content: "\f493"; }
.bi-moon-fill::before { content: "\f494"; }
.bi-moon-stars-fill::before { content: "\f495"; }
.bi-moon-stars::before { content: "\f496"; }
.bi-moon::before { content: "\f497"; }
.bi-mouse-fill::before { content: "\f498"; }
.bi-mouse::before { content: "\f499"; }
.bi-mouse2-fill::before { content: "\f49a"; }
.bi-mouse2::before { content: "\f49b"; }
.bi-mouse3-fill::before { content: "\f49c"; }
.bi-mouse3::before { content: "\f49d"; }
.bi-music-note-beamed::before { content: "\f49e"; }
.bi-music-note-list::before { content: "\f49f"; }
.bi-music-note::before { content: "\f4a0"; }
.bi-music-player-fill::before { content: "\f4a1"; }
.bi-music-player::before { content: "\f4a2"; }
.bi-newspaper::before { content: "\f4a3"; }
.bi-node-minus-fill::before { content: "\f4a4"; }
.bi-node-minus::before { content: "\f4a5"; }
.bi-node-plus-fill::before { content: "\f4a6"; }
.bi-node-plus::before { content: "\f4a7"; }
.bi-nut-fill::before { content: "\f4a8"; }
.bi-nut::before { content: "\f4a9"; }
.bi-octagon-fill::before { content: "\f4aa"; }
.bi-octagon-half::before { content: "\f4ab"; }
.bi-octagon::before { content: "\f4ac"; }
.bi-option::before { content: "\f4ad"; }
.bi-outlet::before { content: "\f4ae"; }
.bi-paint-bucket::before { content: "\f4af"; }
.bi-palette-fill::before { content: "\f4b0"; }
.bi-palette::before { content: "\f4b1"; }
.bi-palette2::before { content: "\f4b2"; }
.bi-paperclip::before { content: "\f4b3"; }
.bi-paragraph::before { content: "\f4b4"; }
.bi-patch-check-fill::before { content: "\f4b5"; }
.bi-patch-check::before { content: "\f4b6"; }
.bi-patch-exclamation-fill::before { content: "\f4b7"; }
.bi-patch-exclamation::before { content: "\f4b8"; }
.bi-patch-minus-fill::before { content: "\f4b9"; }
.bi-patch-minus::before { content: "\f4ba"; }
.bi-patch-plus-fill::before { content: "\f4bb"; }
.bi-patch-plus::before { content: "\f4bc"; }
.bi-patch-question-fill::before { content: "\f4bd"; }
.bi-patch-question::before { content: "\f4be"; }
.bi-pause-btn-fill::before { content: "\f4bf"; }
.bi-pause-btn::before { content: "\f4c0"; }
.bi-pause-circle-fill::before { content: "\f4c1"; }
.bi-pause-circle::before { content: "\f4c2"; }
.bi-pause-fill::before { content: "\f4c3"; }
.bi-pause::before { content: "\f4c4"; }
.bi-peace-fill::before { content: "\f4c5"; }
.bi-peace::before { content: "\f4c6"; }
.bi-pen-fill::before { content: "\f4c7"; }
.bi-pen::before { content: "\f4c8"; }
.bi-pencil-fill::before { content: "\f4c9"; }
.bi-pencil-square::before { content: "\f4ca"; }
.bi-pencil::before { content: "\f4cb"; }
.bi-pentagon-fill::before { content: "\f4cc"; }
.bi-pentagon-half::before { content: "\f4cd"; }
.bi-pentagon::before { content: "\f4ce"; }
.bi-people-fill::before { content: "\f4cf"; }
.bi-people::before { content: "\f4d0"; }
.bi-percent::before { content: "\f4d1"; }
.bi-person-badge-fill::before { content: "\f4d2"; }
.bi-person-badge::before { content: "\f4d3"; }
.bi-person-bounding-box::before { content: "\f4d4"; }
.bi-person-check-fill::before { content: "\f4d5"; }
.bi-person-check::before { content: "\f4d6"; }
.bi-person-circle::before { content: "\f4d7"; }
.bi-person-dash-fill::before { content: "\f4d8"; }
.bi-person-dash::before { content: "\f4d9"; }
.bi-person-fill::before { content: "\f4da"; }
.bi-person-lines-fill::before { content: "\f4db"; }
.bi-person-plus-fill::before { content: "\f4dc"; }
.bi-person-plus::before { content: "\f4dd"; }
.bi-person-square::before { content: "\f4de"; }
.bi-person-x-fill::before { content: "\f4df"; }
.bi-person-x::before { content: "\f4e0"; }
.bi-person::before { content: "\f4e1"; }
.bi-phone-fill::before { content: "\f4e2"; }
.bi-phone-landscape-fill::before { content: "\f4e3"; }
.bi-phone-landscape::before { content: "\f4e4"; }
.bi-phone-vibrate-fill::before { content: "\f4e5"; }
.bi-phone-vibrate::before { content: "\f4e6"; }
.bi-phone::before { content: "\f4e7"; }
.bi-pie-chart-fill::before { content: "\f4e8"; }
.bi-pie-chart::before { content: "\f4e9"; }
.bi-pin-angle-fill::before { content: "\f4ea"; }
.bi-pin-angle::before { content: "\f4eb"; }
.bi-pin-fill::before { content: "\f4ec"; }
.bi-pin::before { content: "\f4ed"; }
.bi-pip-fill::before { content: "\f4ee"; }
.bi-pip::before { content: "\f4ef"; }
.bi-play-btn-fill::before { content: "\f4f0"; }
.bi-play-btn::before { content: "\f4f1"; }
.bi-play-circle-fill::before { content: "\f4f2"; }
.bi-play-circle::before { content: "\f4f3"; }
.bi-play-fill::before { content: "\f4f4"; }
.bi-play::before { content: "\f4f5"; }
.bi-plug-fill::before { content: "\f4f6"; }
.bi-plug::before { content: "\f4f7"; }
.bi-plus-circle-dotted::before { content: "\f4f8"; }
.bi-plus-circle-fill::before { content: "\f4f9"; }
.bi-plus-circle::before { content: "\f4fa"; }
.bi-plus-square-dotted::before { content: "\f4fb"; }
.bi-plus-square-fill::before { content: "\f4fc"; }
.bi-plus-square::before { content: "\f4fd"; }
.bi-plus::before { content: "\f4fe"; }
.bi-power::before { content: "\f4ff"; }
.bi-printer-fill::before { content: "\f500"; }
.bi-printer::before { content: "\f501"; }
.bi-puzzle-fill::before { content: "\f502"; }
.bi-puzzle::before { content: "\f503"; }
.bi-question-circle-fill::before { content: "\f504"; }
.bi-question-circle::before { content: "\f505"; }
.bi-question-diamond-fill::before { content: "\f506"; }
.bi-question-diamond::before { content: "\f507"; }
.bi-question-octagon-fill::before { content: "\f508"; }
.bi-question-octagon::before { content: "\f509"; }
.bi-question-square-fill::before { content: "\f50a"; }
.bi-question-square::before { content: "\f50b"; }
.bi-question::before { content: "\f50c"; }
.bi-rainbow::before { content: "\f50d"; }
.bi-receipt-cutoff::before { content: "\f50e"; }
.bi-receipt::before { content: "\f50f"; }
.bi-reception-0::before { content: "\f510"; }
.bi-reception-1::before { content: "\f511"; }
.bi-reception-2::before { content: "\f512"; }
.bi-reception-3::before { content: "\f513"; }
.bi-reception-4::before { content: "\f514"; }
.bi-record-btn-fill::before { content: "\f515"; }
.bi-record-btn::before { content: "\f516"; }
.bi-record-circle-fill::before { content: "\f517"; }
.bi-record-circle::before { content: "\f518"; }
.bi-record-fill::before { content: "\f519"; }
.bi-record::before { content: "\f51a"; }
.bi-record2-fill::before { content: "\f51b"; }
.bi-record2::before { content: "\f51c"; }
.bi-reply-all-fill::before { content: "\f51d"; }
.bi-reply-all::before { content: "\f51e"; }
.bi-reply-fill::before { content: "\f51f"; }
.bi-reply::before { content: "\f520"; }
.bi-rss-fill::before { content: "\f521"; }
.bi-rss::before { content: "\f522"; }
.bi-rulers::before { content: "\f523"; }
.bi-save-fill::before { content: "\f524"; }
.bi-save::before { content: "\f525"; }
.bi-save2-fill::before { content: "\f526"; }
.bi-save2::before { content: "\f527"; }
.bi-scissors::before { content: "\f528"; }
.bi-screwdriver::before { content: "\f529"; }
.bi-search::before { content: "\f52a"; }
.bi-segmented-nav::before { content: "\f52b"; }
.bi-server::before { content: "\f52c"; }
.bi-share-fill::before { content: "\f52d"; }
.bi-share::before { content: "\f52e"; }
.bi-shield-check::before { content: "\f52f"; }
.bi-shield-exclamation::before { content: "\f530"; }
.bi-shield-fill-check::before { content: "\f531"; }
.bi-shield-fill-exclamation::before { content: "\f532"; }
.bi-shield-fill-minus::before { content: "\f533"; }
.bi-shield-fill-plus::before { content: "\f534"; }
.bi-shield-fill-x::before { content: "\f535"; }
.bi-shield-fill::before { content: "\f536"; }
.bi-shield-lock-fill::before { content: "\f537"; }
.bi-shield-lock::before { content: "\f538"; }
.bi-shield-minus::before { content: "\f539"; }
.bi-shield-plus::before { content: "\f53a"; }
.bi-shield-shaded::before { content: "\f53b"; }
.bi-shield-slash-fill::before { content: "\f53c"; }
.bi-shield-slash::before { content: "\f53d"; }
.bi-shield-x::before { content: "\f53e"; }
.bi-shield::before { content: "\f53f"; }
.bi-shift-fill::before { content: "\f540"; }
.bi-shift::before { content: "\f541"; }
.bi-shop-window::before { content: "\f542"; }
.bi-shop::before { content: "\f543"; }
.bi-shuffle::before { content: "\f544"; }
.bi-signpost-2-fill::before { content: "\f545"; }
.bi-signpost-2::before { content: "\f546"; }
.bi-signpost-fill::before { content: "\f547"; }
.bi-signpost-split-fill::before { content: "\f548"; }
.bi-signpost-split::before { content: "\f549"; }
.bi-signpost::before { content: "\f54a"; }
.bi-sim-fill::before { content: "\f54b"; }
.bi-sim::before { content: "\f54c"; }
.bi-skip-backward-btn-fill::before { content: "\f54d"; }
.bi-skip-backward-btn::before { content: "\f54e"; }
.bi-skip-backward-circle-fill::before { content: "\f54f"; }
.bi-skip-backward-circle::before { content: "\f550"; }
.bi-skip-backward-fill::before { content: "\f551"; }
.bi-skip-backward::before { content: "\f552"; }
.bi-skip-end-btn-fill::before { content: "\f553"; }
.bi-skip-end-btn::before { content: "\f554"; }
.bi-skip-end-circle-fill::before { content: "\f555"; }
.bi-skip-end-circle::before { content: "\f556"; }
.bi-skip-end-fill::before { content: "\f557"; }
.bi-skip-end::before { content: "\f558"; }
.bi-skip-forward-btn-fill::before { content: "\f559"; }
.bi-skip-forward-btn::before { content: "\f55a"; }
.bi-skip-forward-circle-fill::before { content: "\f55b"; }
.bi-skip-forward-circle::before { content: "\f55c"; }
.bi-skip-forward-fill::before { content: "\f55d"; }
.bi-skip-forward::before { content: "\f55e"; }
.bi-skip-start-btn-fill::before { content: "\f55f"; }
.bi-skip-start-btn::before { content: "\f560"; }
.bi-skip-start-circle-fill::before { content: "\f561"; }
.bi-skip-start-circle::before { content: "\f562"; }
.bi-skip-start-fill::before { content: "\f563"; }
.bi-skip-start::before { content: "\f564"; }
.bi-slack::before { content: "\f565"; }
.bi-slash-circle-fill::before { content: "\f566"; }
.bi-slash-circle::before { content: "\f567"; }
.bi-slash-square-fill::before { content: "\f568"; }
.bi-slash-square::before { content: "\f569"; }
.bi-slash::before { content: "\f56a"; }
.bi-sliders::before { content: "\f56b"; }
.bi-smartwatch::before { content: "\f56c"; }
.bi-snow::before { content: "\f56d"; }
.bi-snow2::before { content: "\f56e"; }
.bi-snow3::before { content: "\f56f"; }
.bi-sort-alpha-down-alt::before { content: "\f570"; }
.bi-sort-alpha-down::before { content: "\f571"; }
.bi-sort-alpha-up-alt::before { content: "\f572"; }
.bi-sort-alpha-up::before { content: "\f573"; }
.bi-sort-down-alt::before { content: "\f574"; }
.bi-sort-down::before { content: "\f575"; }
.bi-sort-numeric-down-alt::before { content: "\f576"; }
.bi-sort-numeric-down::before { content: "\f577"; }
.bi-sort-numeric-up-alt::before { content: "\f578"; }
.bi-sort-numeric-up::before { content: "\f579"; }
.bi-sort-up-alt::before { content: "\f57a"; }
.bi-sort-up::before { content: "\f57b"; }
.bi-soundwave::before { content: "\f57c"; }
.bi-speaker-fill::before { content: "\f57d"; }
.bi-speaker::before { content: "\f57e"; }
.bi-speedometer::before { content: "\f57f"; }
.bi-speedometer2::before { content: "\f580"; }
.bi-spellcheck::before { content: "\f581"; }
.bi-square-fill::before { content: "\f582"; }
.bi-square-half::before { content: "\f583"; }
.bi-square::before { content: "\f584"; }
.bi-stack::before { content: "\f585"; }
.bi-star-fill::before { content: "\f586"; }
.bi-star-half::before { content: "\f587"; }
.bi-star::before { content: "\f588"; }
.bi-stars::before { content: "\f589"; }
.bi-stickies-fill::before { content: "\f58a"; }
.bi-stickies::before { content: "\f58b"; }
.bi-sticky-fill::before { content: "\f58c"; }
.bi-sticky::before { content: "\f58d"; }
.bi-stop-btn-fill::before { content: "\f58e"; }
.bi-stop-btn::before { content: "\f58f"; }
.bi-stop-circle-fill::before { content: "\f590"; }
.bi-stop-circle::before { content: "\f591"; }
.bi-stop-fill::before { content: "\f592"; }
.bi-stop::before { content: "\f593"; }
.bi-stoplights-fill::before { content: "\f594"; }
.bi-stoplights::before { content: "\f595"; }
.bi-stopwatch-fill::before { content: "\f596"; }
.bi-stopwatch::before { content: "\f597"; }
.bi-subtract::before { content: "\f598"; }
.bi-suit-club-fill::before { content: "\f599"; }
.bi-suit-club::before { content: "\f59a"; }
.bi-suit-diamond-fill::before { content: "\f59b"; }
.bi-suit-diamond::before { content: "\f59c"; }
.bi-suit-heart-fill::before { content: "\f59d"; }
.bi-suit-heart::before { content: "\f59e"; }
.bi-suit-spade-fill::before { content: "\f59f"; }
.bi-suit-spade::before { content: "\f5a0"; }
.bi-sun-fill::before { content: "\f5a1"; }
.bi-sun::before { content: "\f5a2"; }
.bi-sunglasses::before { content: "\f5a3"; }
.bi-sunrise-fill::before { content: "\f5a4"; }
.bi-sunrise::before { content: "\f5a5"; }
.bi-sunset-fill::before { content: "\f5a6"; }
.bi-sunset::before { content: "\f5a7"; }
.bi-symmetry-horizontal::before { content: "\f5a8"; }
.bi-symmetry-vertical::before { content: "\f5a9"; }
.bi-table::before { content: "\f5aa"; }
.bi-tablet-fill::before { content: "\f5ab"; }
.bi-tablet-landscape-fill::before { content: "\f5ac"; }
.bi-tablet-landscape::before { content: "\f5ad"; }
.bi-tablet::before { content: "\f5ae"; }
.bi-tag-fill::before { content: "\f5af"; }
.bi-tag::before { content: "\f5b0"; }
.bi-tags-fill::before { content: "\f5b1"; }
.bi-tags::before { content: "\f5b2"; }
.bi-telegram::before { content: "\f5b3"; }
.bi-telephone-fill::before { content: "\f5b4"; }
.bi-telephone-forward-fill::before { content: "\f5b5"; }
.bi-telephone-forward::before { content: "\f5b6"; }
.bi-telephone-inbound-fill::before { content: "\f5b7"; }
.bi-telephone-inbound::before { content: "\f5b8"; }
.bi-telephone-minus-fill::before { content: "\f5b9"; }
.bi-telephone-minus::before { content: "\f5ba"; }
.bi-telephone-outbound-fill::before { content: "\f5bb"; }
.bi-telephone-outbound::before { content: "\f5bc"; }
.bi-telephone-plus-fill::before { content: "\f5bd"; }
.bi-telephone-plus::before { content: "\f5be"; }
.bi-telephone-x-fill::before { content: "\f5bf"; }
.bi-telephone-x::before { content: "\f5c0"; }
.bi-telephone::before { content: "\f5c1"; }
.bi-terminal-fill::before { content: "\f5c2"; }
.bi-terminal::before { content: "\f5c3"; }
.bi-text-center::before { content: "\f5c4"; }
.bi-text-indent-left::before { content: "\f5c5"; }
.bi-text-indent-right::before { content: "\f5c6"; }
.bi-text-left::before { content: "\f5c7"; }
.bi-text-paragraph::before { content: "\f5c8"; }
.bi-text-right::before { content: "\f5c9"; }
.bi-textarea-resize::before { content: "\f5ca"; }
.bi-textarea-t::before { content: "\f5cb"; }
.bi-textarea::before { content: "\f5cc"; }
.bi-thermometer-half::before { content: "\f5cd"; }
.bi-thermometer-high::before { content: "\f5ce"; }
.bi-thermometer-low::before { content: "\f5cf"; }
.bi-thermometer-snow::before { content: "\f5d0"; }
.bi-thermometer-sun::before { content: "\f5d1"; }
.bi-thermometer::before { content: "\f5d2"; }
.bi-three-dots-vertical::before { content: "\f5d3"; }
.bi-three-dots::before { content: "\f5d4"; }
.bi-toggle-off::before { content: "\f5d5"; }
.bi-toggle-on::before { content: "\f5d6"; }
.bi-toggle2-off::before { content: "\f5d7"; }
.bi-toggle2-on::before { content: "\f5d8"; }
.bi-toggles::before { content: "\f5d9"; }
.bi-toggles2::before { content: "\f5da"; }
.bi-tools::before { content: "\f5db"; }
.bi-tornado::before { content: "\f5dc"; }
.bi-trash-fill::before { content: "\f5dd"; }
.bi-trash::before { content: "\f5de"; }
.bi-trash2-fill::before { content: "\f5df"; }
.bi-trash2::before { content: "\f5e0"; }
.bi-tree-fill::before { content: "\f5e1"; }
.bi-tree::before { content: "\f5e2"; }
.bi-triangle-fill::before { content: "\f5e3"; }
.bi-triangle-half::before { content: "\f5e4"; }
.bi-triangle::before { content: "\f5e5"; }
.bi-trophy-fill::before { content: "\f5e6"; }
.bi-trophy::before { content: "\f5e7"; }
.bi-tropical-storm::before { content: "\f5e8"; }
.bi-truck-flatbed::before { content: "\f5e9"; }
.bi-truck::before { content: "\f5ea"; }
.bi-tsunami::before { content: "\f5eb"; }
.bi-tv-fill::before { content: "\f5ec"; }
.bi-tv::before { content: "\f5ed"; }
.bi-twitch::before { content: "\f5ee"; }
.bi-twitter::before { content: "\f5ef"; }
.bi-type-bold::before { content: "\f5f0"; }
.bi-type-h1::before { content: "\f5f1"; }
.bi-type-h2::before { content: "\f5f2"; }
.bi-type-h3::before { content: "\f5f3"; }
.bi-type-italic::before { content: "\f5f4"; }
.bi-type-strikethrough::before { content: "\f5f5"; }
.bi-type-underline::before { content: "\f5f6"; }
.bi-type::before { content: "\f5f7"; }
.bi-ui-checks-grid::before { content: "\f5f8"; }
.bi-ui-checks::before { content: "\f5f9"; }
.bi-ui-radios-grid::before { content: "\f5fa"; }
.bi-ui-radios::before { content: "\f5fb"; }
.bi-umbrella-fill::before { content: "\f5fc"; }
.bi-umbrella::before { content: "\f5fd"; }
.bi-union::before { content: "\f5fe"; }
.bi-unlock-fill::before { content: "\f5ff"; }
.bi-unlock::before { content: "\f600"; }
.bi-upc-scan::before { content: "\f601"; }
.bi-upc::before { content: "\f602"; }
.bi-upload::before { content: "\f603"; }
.bi-vector-pen::before { content: "\f604"; }
.bi-view-list::before { content: "\f605"; }
.bi-view-stacked::before { content: "\f606"; }
.bi-vinyl-fill::before { content: "\f607"; }
.bi-vinyl::before { content: "\f608"; }
.bi-voicemail::before { content: "\f609"; }
.bi-volume-down-fill::before { content: "\f60a"; }
.bi-volume-down::before { content: "\f60b"; }
.bi-volume-mute-fill::before { content: "\f60c"; }
.bi-volume-mute::before { content: "\f60d"; }
.bi-volume-off-fill::before { content: "\f60e"; }
.bi-volume-off::before { content: "\f60f"; }
.bi-volume-up-fill::before { content: "\f610"; }
.bi-volume-up::before { content: "\f611"; }
.bi-vr::before { content: "\f612"; }
.bi-wallet-fill::before { content: "\f613"; }
.bi-wallet::before { content: "\f614"; }
.bi-wallet2::before { content: "\f615"; }
.bi-watch::before { content: "\f616"; }
.bi-water::before { content: "\f617"; }
.bi-whatsapp::before { content: "\f618"; }
.bi-wifi-1::before { content: "\f619"; }
.bi-wifi-2::before { content: "\f61a"; }
.bi-wifi-off::before { content: "\f61b"; }
.bi-wifi::before { content: "\f61c"; }
.bi-wind::before { content: "\f61d"; }
.bi-window-dock::before { content: "\f61e"; }
.bi-window-sidebar::before { content: "\f61f"; }
.bi-window::before { content: "\f620"; }
.bi-wrench::before { content: "\f621"; }
.bi-x-circle-fill::before { content: "\f622"; }
.bi-x-circle::before { content: "\f623"; }
.bi-x-diamond-fill::before { content: "\f624"; }
.bi-x-diamond::before { content: "\f625"; }
.bi-x-octagon-fill::before { content: "\f626"; }
.bi-x-octagon::before { content: "\f627"; }
.bi-x-square-fill::before { content: "\f628"; }
.bi-x-square::before { content: "\f629"; }
.bi-x::before { content: "\f62a"; }
.bi-youtube::before { content: "\f62b"; }
.bi-zoom-in::before { content: "\f62c"; }
.bi-zoom-out::before { content: "\f62d"; }
.bi-bank::before { content: "\f62e"; }
.bi-bank2::before { content: "\f62f"; }
.bi-bell-slash-fill::before { content: "\f630"; }
.bi-bell-slash::before { content: "\f631"; }
.bi-cash-coin::before { content: "\f632"; }
.bi-check-lg::before { content: "\f633"; }
.bi-coin::before { content: "\f634"; }
.bi-currency-bitcoin::before { content: "\f635"; }
.bi-currency-dollar::before { content: "\f636"; }
.bi-currency-euro::before { content: "\f637"; }
.bi-currency-exchange::before { content: "\f638"; }
.bi-currency-pound::before { content: "\f639"; }
.bi-currency-yen::before { content: "\f63a"; }
.bi-dash-lg::before { content: "\f63b"; }
.bi-exclamation-lg::before { content: "\f63c"; }
.bi-file-earmark-pdf-fill::before { content: "\f63d"; }
.bi-file-earmark-pdf::before { content: "\f63e"; }
.bi-file-pdf-fill::before { content: "\f63f"; }
.bi-file-pdf::before { content: "\f640"; }
.bi-gender-ambiguous::before { content: "\f641"; }
.bi-gender-female::before { content: "\f642"; }
.bi-gender-male::before { content: "\f643"; }
.bi-gender-trans::before { content: "\f644"; }
.bi-headset-vr::before { content: "\f645"; }
.bi-info-lg::before { content: "\f646"; }
.bi-mastodon::before { content: "\f647"; }
.bi-messenger::before { content: "\f648"; }
.bi-piggy-bank-fill::before { content: "\f649"; }
.bi-piggy-bank::before { content: "\f64a"; }
.bi-pin-map-fill::before { content: "\f64b"; }
.bi-pin-map::before { content: "\f64c"; }
.bi-plus-lg::before { content: "\f64d"; }
.bi-question-lg::before { content: "\f64e"; }
.bi-recycle::before { content: "\f64f"; }
.bi-reddit::before { content: "\f650"; }
.bi-safe-fill::before { content: "\f651"; }
.bi-safe2-fill::before { content: "\f652"; }
.bi-safe2::before { content: "\f653"; }
.bi-sd-card-fill::before { content: "\f654"; }
.bi-sd-card::before { content: "\f655"; }
.bi-skype::before { content: "\f656"; }
.bi-slash-lg::before { content: "\f657"; }
.bi-translate::before { content: "\f658"; }
.bi-x-lg::before { content: "\f659"; }
.bi-safe::before { content: "\f65a"; }
.bi-apple::before { content: "\f65b"; }
.bi-microsoft::before { content: "\f65d"; }
.bi-windows::before { content: "\f65e"; }
.bi-behance::before { content: "\f65c"; }
.bi-dribbble::before { content: "\f65f"; }
.bi-line::before { content: "\f660"; }
.bi-medium::before { content: "\f661"; }
.bi-paypal::before { content: "\f662"; }
.bi-pinterest::before { content: "\f663"; }
.bi-signal::before { content: "\f664"; }
.bi-snapchat::before { content: "\f665"; }
.bi-spotify::before { content: "\f666"; }
.bi-stack-overflow::before { content: "\f667"; }
.bi-strava::before { content: "\f668"; }
.bi-wordpress::before { content: "\f669"; }
.bi-vimeo::before { content: "\f66a"; }
.bi-activity::before { content: "\f66b"; }
.bi-easel2-fill::before { content: "\f66c"; }
.bi-easel2::before { content: "\f66d"; }
.bi-easel3-fill::before { content: "\f66e"; }
.bi-easel3::before { content: "\f66f"; }
.bi-fan::before { content: "\f670"; }
.bi-fingerprint::before { content: "\f671"; }
.bi-graph-down-arrow::before { content: "\f672"; }
.bi-graph-up-arrow::before { content: "\f673"; }
.bi-hypnotize::before { content: "\f674"; }
.bi-magic::before { content: "\f675"; }
.bi-person-rolodex::before { content: "\f676"; }
.bi-person-video::before { content: "\f677"; }
.bi-person-video2::before { content: "\f678"; }
.bi-person-video3::before { content: "\f679"; }
.bi-person-workspace::before { content: "\f67a"; }
.bi-radioactive::before { content: "\f67b"; }
.bi-webcam-fill::before { content: "\f67c"; }
.bi-webcam::before { content: "\f67d"; }
.bi-yin-yang::before { content: "\f67e"; }
.bi-bandaid-fill::before { content: "\f680"; }
.bi-bandaid::before { content: "\f681"; }
.bi-bluetooth::before { content: "\f682"; }
.bi-body-text::before { content: "\f683"; }
.bi-boombox::before { content: "\f684"; }
.bi-boxes::before { content: "\f685"; }
.bi-dpad-fill::before { content: "\f686"; }
.bi-dpad::before { content: "\f687"; }
.bi-ear-fill::before { content: "\f688"; }
.bi-ear::before { content: "\f689"; }
.bi-envelope-check-1::before { content: "\f68a"; }
.bi-envelope-check-fill::before { content: "\f68b"; }
.bi-envelope-check::before { content: "\f68c"; }
.bi-envelope-dash-1::before { content: "\f68d"; }
.bi-envelope-dash-fill::before { content: "\f68e"; }
.bi-envelope-dash::before { content: "\f68f"; }
.bi-envelope-exclamation-1::before { content: "\f690"; }
.bi-envelope-exclamation-fill::before { content: "\f691"; }
.bi-envelope-exclamation::before { content: "\f692"; }
.bi-envelope-plus-fill::before { content: "\f693"; }
.bi-envelope-plus::before { content: "\f694"; }
.bi-envelope-slash-1::before { content: "\f695"; }
.bi-envelope-slash-fill::before { content: "\f696"; }
.bi-envelope-slash::before { content: "\f697"; }
.bi-envelope-x-1::before { content: "\f698"; }
.bi-envelope-x-fill::before { content: "\f699"; }
.bi-envelope-x::before { content: "\f69a"; }
.bi-explicit-fill::before { content: "\f69b"; }
.bi-explicit::before { content: "\f69c"; }
.bi-git::before { content: "\f69d"; }
.bi-infinity::before { content: "\f69e"; }
.bi-list-columns-reverse::before { content: "\f69f"; }
.bi-list-columns::before { content: "\f6a0"; }
.bi-meta::before { content: "\f6a1"; }
.bi-mortorboard-fill::before { content: "\f6a2"; }
.bi-mortorboard::before { content: "\f6a3"; }
.bi-nintendo-switch::before { content: "\f6a4"; }
.bi-pc-display-horizontal::before { content: "\f6a5"; }
.bi-pc-display::before { content: "\f6a6"; }
.bi-pc-horizontal::before { content: "\f6a7"; }
.bi-pc::before { content: "\f6a8"; }
.bi-playstation::before { content: "\f6a9"; }
.bi-plus-slash-minus::before { content: "\f6aa"; }
.bi-projector-fill::before { content: "\f6ab"; }
.bi-projector::before { content: "\f6ac"; }
.bi-qr-code-scan::before { content: "\f6ad"; }
.bi-qr-code::before { content: "\f6ae"; }
.bi-quora::before { content: "\f6af"; }
.bi-quote::before { content: "\f6b0"; }
.bi-robot::before { content: "\f6b1"; }
.bi-send-check-fill::before { content: "\f6b2"; }
.bi-send-check::before { content: "\f6b3"; }
.bi-send-dash-fill::before { content: "\f6b4"; }
.bi-send-dash::before { content: "\f6b5"; }
.bi-send-exclamation-1::before { content: "\f6b6"; }
.bi-send-exclamation-fill::before { content: "\f6b7"; }
.bi-send-exclamation::before { content: "\f6b8"; }
.bi-send-fill::before { content: "\f6b9"; }
.bi-send-plus-fill::before { content: "\f6ba"; }
.bi-send-plus::before { content: "\f6bb"; }
.bi-send-slash-fill::before { content: "\f6bc"; }
.bi-send-slash::before { content: "\f6bd"; }
.bi-send-x-fill::before { content: "\f6be"; }
.bi-send-x::before { content: "\f6bf"; }
.bi-send::before { content: "\f6c0"; }
.bi-steam::before { content: "\f6c1"; }
.bi-terminal-dash-1::before { content: "\f6c2"; }
.bi-terminal-dash::before { content: "\f6c3"; }
.bi-terminal-plus::before { content: "\f6c4"; }
.bi-terminal-split::before { content: "\f6c5"; }
.bi-ticket-detailed-fill::before { content: "\f6c6"; }
.bi-ticket-detailed::before { content: "\f6c7"; }
.bi-ticket-fill::before { content: "\f6c8"; }
.bi-ticket-perferated-fill::before { content: "\f6c9"; }
.bi-ticket-perferated::before { content: "\f6ca"; }
.bi-ticket::before { content: "\f6cb"; }
.bi-tiktok::before { content: "\f6cc"; }
.bi-window-dash::before { content: "\f6cd"; }
.bi-window-desktop::before { content: "\f6ce"; }
.bi-window-fullscreen::before { content: "\f6cf"; }
.bi-window-plus::before { content: "\f6d0"; }
.bi-window-split::before { content: "\f6d1"; }
.bi-window-stack::before { content: "\f6d2"; }
.bi-window-x::before { content: "\f6d3"; }
.bi-xbox::before { content: "\f6d4"; }
.bi-ethernet::before { content: "\f6d5"; }
.bi-hdmi-fill::before { content: "\f6d6"; }
.bi-hdmi::before { content: "\f6d7"; }
.bi-usb-c-fill::before { content: "\f6d8"; }
.bi-usb-c::before { content: "\f6d9"; }
.bi-usb-fill::before { content: "\f6da"; }
.bi-usb-plug-fill::before { content: "\f6db"; }
.bi-usb-plug::before { content: "\f6dc"; }
.bi-usb-symbol::before { content: "\f6dd"; }
.bi-usb::before { content: "\f6de"; }
.bi-boombox-fill::before { content: "\f6df"; }
.bi-displayport-1::before { content: "\f6e0"; }
.bi-displayport::before { content: "\f6e1"; }
.bi-gpu-card::before { content: "\f6e2"; }
.bi-memory::before { content: "\f6e3"; }
.bi-modem-fill::before { content: "\f6e4"; }
.bi-modem::before { content: "\f6e5"; }
.bi-motherboard-fill::before { content: "\f6e6"; }
.bi-motherboard::before { content: "\f6e7"; }
.bi-optical-audio-fill::before { content: "\f6e8"; }
.bi-optical-audio::before { content: "\f6e9"; }
.bi-pci-card::before { content: "\f6ea"; }
.bi-router-fill::before { content: "\f6eb"; }
.bi-router::before { content: "\f6ec"; }
.bi-ssd-fill::before { content: "\f6ed"; }
.bi-ssd::before { content: "\f6ee"; }
.bi-thunderbolt-fill::before { content: "\f6ef"; }
.bi-thunderbolt::before { content: "\f6f0"; }
.bi-usb-drive-fill::before { content: "\f6f1"; }
.bi-usb-drive::before { content: "\f6f2"; }
.bi-usb-micro-fill::before { content: "\f6f3"; }
.bi-usb-micro::before { content: "\f6f4"; }
.bi-usb-mini-fill::before { content: "\f6f5"; }
.bi-usb-mini::before { content: "\f6f6"; }
.bi-cloud-haze2::before { content: "\f6f7"; }
.bi-device-hdd-fill::before { content: "\f6f8"; }
.bi-device-hdd::before { content: "\f6f9"; }
.bi-device-ssd-fill::before { content: "\f6fa"; }
.bi-device-ssd::before { content: "\f6fb"; }
.bi-displayport-fill::before { content: "\f6fc"; }
.bi-mortarboard-fill::before { content: "\f6fd"; }
.bi-mortarboard::before { content: "\f6fe"; }
| transformers.js/examples/demo-site/public/css/bootstrap-icons.css/0 | {
"file_path": "transformers.js/examples/demo-site/public/css/bootstrap-icons.css",
"repo_id": "transformers.js",
"token_count": 29385
} |
/////////////////////////////////////////////////////////////////
// Worker.js file for doing all transformer-based computations //
// Needed to ensure the UI thread is not blocked when running //
/////////////////////////////////////////////////////////////////
import { pipeline, env } from "@xenova/transformers";
env.allowLocalModels = false;
// Define task function mapping
const TASK_FUNCTION_MAPPING = {
'translation': translate,
'text-generation': text_generation,
'code-completion': code_completion,
'masked-language-modelling': masked_lm,
'sequence-classification': sequence_classification,
'token-classification': token_classification,
'zero-shot-classification': zero_shot_classification,
'question-answering': question_answering,
'summarization': summarize,
'automatic-speech-recognition': speech_to_text,
'image-to-text': image_to_text,
'image-classification': image_classification,
'zero-shot-image-classification': zero_shot_image_classification,
'object-detection': object_detection,
}
// Listen for messages from UI
self.addEventListener('message', async (event) => {
const data = event.data;
let fn = TASK_FUNCTION_MAPPING[data.task];
if (!fn) return;
let result = await fn(data);
self.postMessage({
task: data.task,
type: 'result',
data: result
});
});
// Define model factories
// Ensures only one model is created of each type
class PipelineFactory {
static task = null;
static model = null;
// NOTE: instance stores a promise that resolves to the pipeline
static instance = null;
constructor(tokenizer, model) {
this.tokenizer = tokenizer;
this.model = model;
}
/**
* Get pipeline instance
* @param {*} progressCallback
* @returns {Promise}
*/
static getInstance(progressCallback = null) {
if (this.task === null || this.model === null) {
throw Error("Must set task and model")
}
if (this.instance === null) {
this.instance = pipeline(this.task, this.model, {
progress_callback: progressCallback
});
}
return this.instance;
}
}
class TranslationPipelineFactory extends PipelineFactory {
static task = 'translation';
static model = 'Xenova/t5-small';
}
class TextGenerationPipelineFactory extends PipelineFactory {
static task = 'text-generation';
static model = 'Xenova/distilgpt2';
}
class CodeCompletionPipelineFactory extends PipelineFactory {
static task = 'text-generation';
static model = 'Xenova/codegen-350M-mono';
}
class MaskedLMPipelineFactory extends PipelineFactory {
static task = 'fill-mask';
static model = 'Xenova/bert-base-cased';
}
class SequenceClassificationPipelineFactory extends PipelineFactory {
static task = 'text-classification';
static model = 'Xenova/bert-base-multilingual-uncased-sentiment';
}
class TokenClassificationPipelineFactory extends PipelineFactory {
static task = 'token-classification';
static model = 'Xenova/bert-base-multilingual-cased-ner-hrl';
}
class ZeroShotClassificationPipelineFactory extends PipelineFactory {
static task = 'zero-shot-classification';
static model = 'Xenova/distilbert-base-uncased-mnli';
}
class QuestionAnsweringPipelineFactory extends PipelineFactory {
static task = 'question-answering';
static model = 'Xenova/distilbert-base-cased-distilled-squad';
}
class SummarizationPipelineFactory extends PipelineFactory {
static task = 'summarization';
static model = 'Xenova/distilbart-cnn-6-6';
}
class AutomaticSpeechRecognitionPipelineFactory extends PipelineFactory {
static task = 'automatic-speech-recognition';
static model = 'Xenova/whisper-tiny.en';
}
class ImageToTextPipelineFactory extends PipelineFactory {
static task = 'image-to-text';
static model = 'Xenova/vit-gpt2-image-captioning';
}
class ImageClassificationPipelineFactory extends PipelineFactory {
static task = 'image-classification';
static model = 'Xenova/vit-base-patch16-224';
}
class ZeroShotImageClassificationPipelineFactory extends PipelineFactory {
static task = 'zero-shot-image-classification';
static model = 'Xenova/clip-vit-base-patch16';
}
class ObjectDetectionPipelineFactory extends PipelineFactory {
static task = 'object-detection';
static model = 'Xenova/detr-resnet-50';
}
async function translate(data) {
let pipeline = await TranslationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'translation',
data: data
});
})
// Update task based on source and target languages
// Doing it this way prevents the same model from being loaded multiple times
pipeline.task = `translation_${data.languageFrom}_to_${data.languageTo}`;
return await pipeline(data.text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText
});
}
})
}
async function text_generation(data) {
let pipeline = await TextGenerationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'text-generation',
data: data
});
})
let text = data.text.trim();
return await pipeline(text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText
});
}
})
}
async function code_completion(data) {
let pipeline = await CodeCompletionPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'code-completion',
data: data,
});
})
let text = data.text;
return await pipeline(text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
targetType: data.targetType,
data: decodedText
});
}
})
}
async function masked_lm(data) {
let pipeline = await MaskedLMPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'masked-language-modelling',
data: data
});
})
let output = await pipeline(data.text, data.generation)
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: output.map(x => x.sequence).join('\n')
});
return output;
}
async function sequence_classification(data) {
let pipeline = await SequenceClassificationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'sequence-classification',
data: data
});
});
let outputs = await pipeline(data.text, {
topk: 5 // return all
})
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
targetType: data.targetType,
data: outputs
});
}
async function token_classification(data) {
let pipeline = await TokenClassificationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'token-classification',
data: data
});
});
let outputs = await pipeline(data.text, {
ignore_labels: [] // Return all labels
});
let chunks = [];
let currentChunk = { type: '', text: [] };
for (let i = 0; i < outputs.length; i++) {
let word = pipeline.tokenizer.model.tokens_to_ids.get(outputs[i].word);
let entity = outputs[i].entity;
if (entity.startsWith('B-')) { // beginning of a new chunk
if (currentChunk.text.length > 0) { // push the current chunk if it exists
chunks.push(currentChunk);
currentChunk = { type: '', text: [] };
}
currentChunk.type = entity.slice(2); // get the type of the chunk
currentChunk.text = [word];
} else if (entity.startsWith('I-')) { // continuation of a chunk
currentChunk.text.push(word);
} else { // not part of a chunk (O tag)
if (currentChunk.text.length > 0) { // push the current chunk if it exists
if (currentChunk.type === 'O') {
currentChunk.text.push(word);
} else {
chunks.push(currentChunk);
currentChunk = { type: 'O', text: [word] };
}
} else {
currentChunk = { type: 'O', text: [word] };
}
}
}
// push the last chunk if it exists
if (currentChunk.text.length > 0) {
chunks.push(currentChunk);
}
let postProcessedChunks = chunks.map(
x => ({
type: x.type,
text: pipeline.tokenizer.decode(x.text)
})
)
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
targetType: data.targetType,
data: postProcessedChunks,
});
}
async function zero_shot_classification(data) {
let pipeline = await ZeroShotClassificationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'zero-shot-classification',
data: data
});
});
let outputs = await pipeline(data.text, data.classes, data.generation);
let formattedOutputs = outputs.labels.map((x, i) => {
return {
label: x,
score: outputs.scores[i],
}
});
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
targetType: data.targetType,
data: formattedOutputs
});
}
async function question_answering(data) {
let pipeline = await QuestionAnsweringPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'question-answering',
data: data
});
})
let answer = await pipeline(data.question, data.context)
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
data: answer.answer
});
return answer;
}
async function summarize(data) {
let pipeline = await SummarizationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'summarization',
data: data
});
})
return await pipeline(data.text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText.trim()
});
}
})
}
async function speech_to_text(data) {
let pipeline = await AutomaticSpeechRecognitionPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'automatic-speech-recognition',
data: data
});
})
return await pipeline(data.audio, {
// Choose good defaults for the demo
chunk_length_s: 30,
stride_length_s: 5,
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText.trim()
});
}
})
}
async function image_to_text(data) {
let pipeline = await ImageToTextPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'image-to-text',
data: data
});
})
return await pipeline(data.image, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText.trim()
});
}
})
}
async function image_classification(data) {
let pipeline = await ImageClassificationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'image-classification',
data: data
});
})
let outputs = await pipeline(data.image, {
topk: 5 // return all
})
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
targetType: data.targetType,
updateLabels: data.updateLabels,
data: outputs
});
}
async function zero_shot_image_classification(data) {
let pipeline = await ZeroShotImageClassificationPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'image-classification',
data: data
});
})
let outputs = await pipeline(data.image, data.classes)
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
targetType: data.targetType,
updateLabels: data.updateLabels,
data: outputs
});
}
async function object_detection(data) {
let pipeline = await ObjectDetectionPipelineFactory.getInstance(data => {
self.postMessage({
type: 'download',
task: 'object-detection',
data: data
});
})
let outputs = await pipeline(data.image, {
threshold: 0.9,
percentage: true
})
self.postMessage({
type: 'complete',
target: data.elementIdToUpdate,
targetType: data.targetType,
chartId: data.chartId,
data: outputs
});
}
| transformers.js/examples/demo-site/src/worker.js/0 | {
"file_path": "transformers.js/examples/demo-site/src/worker.js",
"repo_id": "transformers.js",
"token_count": 6373
} |
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>Transformers.js | Sample Electron application</title>
<!-- Load styles -->
<link rel="stylesheet" href="index.css" />
<!-- Load the client script -->
<script src="./client.js" defer></script>
</head>
<body>
<div class="container">
<h1>Transformers.js</h1>
<h2>Run 🤗 Transformers in Electron!</h2>
<input id="text" placeholder="Enter text here">
<pre id="output"></pre>
</div>
</body>
</html> | transformers.js/examples/electron/src/index.html/0 | {
"file_path": "transformers.js/examples/electron/src/index.html",
"repo_id": "transformers.js",
"token_count": 220
} |
/** @type {import('next').NextConfig} */
const nextConfig = {
// (Optional) Export as a standalone site
// See https://nextjs.org/docs/pages/api-reference/next-config-js/output#automatically-copying-traced-files
output: 'standalone', // Feel free to modify/remove this option
// Indicate that these packages should not be bundled by webpack
experimental: {
serverComponentsExternalPackages: ['sharp', 'onnxruntime-node'],
},
};
module.exports = nextConfig;
| transformers.js/examples/next-server/next.config.js/0 | {
"file_path": "transformers.js/examples/next-server/next.config.js",
"repo_id": "transformers.js",
"token_count": 164
} |
{
"name": "commonjs",
"version": "1.0.0",
"description": "Server-side inference with Transformers.js (CommonJS)",
"main": "app.js",
"keywords": [],
"author": "Xenova",
"license": "ISC",
"dependencies": {
"@xenova/transformers": "^2.0.0"
}
}
| transformers.js/examples/node/commonjs/package.json/0 | {
"file_path": "transformers.js/examples/node/commonjs/package.json",
"repo_id": "transformers.js",
"token_count": 109
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js - Background Removal</title>
</head>
<body>
<h1>Background Removal w/ <a href="https://github.com/huggingface/transformers.js" target="_blank">🤗 Transformers.js</a>
</h1>
<h4>Runs locally in your browser, powered by the <a href="https://huggingface.co/briaai/RMBG-1.4" target="_blank">RMBG V1.4 model</a> from <a
href="https://bria.ai/" target="_blank">BRIA AI</a>
</h4>
<div id="container">
<label id="upload-button" for="upload">
<svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill="#000"
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z">
</path>
</svg>
Click to upload image
<label id="example">(or try example)</label>
</label>
</div>
<label id="status"></label>
<input id="upload" type="file" accept="image/*" />
<script type="module" src="/main.js"></script>
</body>
</html> | transformers.js/examples/remove-background-client/index.html/0 | {
"file_path": "transformers.js/examples/remove-background-client/index.html",
"repo_id": "transformers.js",
"token_count": 719
} |
'use client'
import Image from 'next/image'
import { downloadImage } from '../utils.js'
export function Modal({ currentImage, setCurrentImage }) {
const photo_url = currentImage ? `https://unsplash.com/photos/${currentImage.id}` : null;
const photo_image_url = currentImage ? `https://images.unsplash.com/${currentImage.url}?auto=format&fit=crop&w=480&q=80` : null;
return (
<div
className='fixed inset-0 z-30 backdrop-blur-2xl w-full h-full bg-black top-0 left-0 transition'
style={{
backgroundColor: `rgba(0, 0, 0, ${currentImage ? 0.8 : 0})`,
opacity: currentImage ? 1 : 0,
pointerEvents: currentImage ? 'auto' : 'none',
}}
>
{currentImage && <>
<Image
alt=''
className="transform rounded-lg transition will-change-auto"
style={
{ transform: 'translate3d(0, 0, 0)', }
}
layout={'fill'}
objectFit={'contain'}
src={photo_image_url}
unoptimized={true}
/>
<div
className='absolute top-0 left-0 flex items-center gap-2 p-3 text-white'
>
<button
onClick={() => setCurrentImage(null)}
className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5">
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12"></path>
</svg>
</button>
</div>
<div className="absolute top-0 right-0 flex items-center gap-2 p-3 text-white">
<a
href={photo_url}
className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white"
target="_blank" title="View on Unsplash"
rel="noreferrer">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5">
<path strokeLinecap="round" strokeLinejoin="round" d="M13.5 6H5.25A2.25 2.25 0 003 8.25v10.5A2.25 2.25 0 005.25 21h10.5A2.25 2.25 0 0018 18.75V10.5m-10.5 6L21 3m0 0h-5.25M21 3v5.25"></path>
</svg>
</a>
<button
onClick={() => downloadImage(photo_image_url, `${currentImage.id}.png`)}
className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white" title="Download">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5">
<path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3">
</path>
</svg>
</button>
</div>
</>
}
</div>)
} | transformers.js/examples/semantic-image-search-client/src/app/components/Modal.jsx/0 | {
"file_path": "transformers.js/examples/semantic-image-search-client/src/app/components/Modal.jsx",
"repo_id": "transformers.js",
"token_count": 2014
} |
/** @type {import('tailwindcss').Config} */
module.exports = {
content: [
'./src/pages/**/*.{js,ts,jsx,tsx,mdx}',
'./src/components/**/*.{js,ts,jsx,tsx,mdx}',
'./src/app/**/*.{js,ts,jsx,tsx,mdx}',
],
theme: {
extend: {
boxShadow: {
highlight: 'inset 0 0 0 1px rgba(255, 255, 255, 0.1)',
},
},
},
plugins: [],
}
| transformers.js/examples/semantic-image-search/tailwind.config.js/0 | {
"file_path": "transformers.js/examples/semantic-image-search/tailwind.config.js",
"repo_id": "transformers.js",
"token_count": 188
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js | real-time CLIP</title>
</head>
<body>
<h1>
Real-time zero-shot image classification (WebGPU)
</h1>
<h3>
Runs locally in your browser w/
<a href="https://github.com/huggingface/transformers.js" target="_blank">🤗 Transformers.js</a>
</h3>
<div id="container">
<video id="video" autoplay muted playsinline></video>
<div id="overlay"></div>
</div>
<div id="controls">
<div title="Labels used to perform zero-shot image classification">
<label>Labels (comma-separated)</label>
<br>
<input id="labels" type="text" disabled>
</div>
<div title="Template used to perform zero-shot image classification">
<label>Hypothesis template</label>
<br>
<input id="template" type="text" value="A photo of a {}" disabled>
</div>
</div>
<label id="status"></label>
<script type="module" src="/main.js"></script>
</body>
</html> | transformers.js/examples/webgpu-clip/index.html/0 | {
"file_path": "transformers.js/examples/webgpu-clip/index.html",
"repo_id": "transformers.js",
"token_count": 417
} |
import { marked } from 'marked';
import DOMPurify from 'dompurify';
import BotIcon from './icons/BotIcon';
import UserIcon from './icons/UserIcon';
import './Chat.css';
export default function Chat({ messages }) {
const empty = messages.length === 0;
return (<div className={`flex-1 p-6 max-w-[960px] w-full ${empty ? 'flex flex-col items-center justify-end' : 'space-y-4'}`}>
{empty
? <div className="text-xl">Ready!</div>
: messages.map((msg, i) => (
<div key={`message-${i}`} className="flex items-start space-x-4">
{msg.role === 'assistant'
? (<>
<BotIcon className="h-6 w-6 min-h-6 min-w-6 my-3 text-gray-500 dark:text-gray-300" />
<div className="bg-gray-200 dark:bg-gray-700 rounded-lg p-4">
<p className="min-h-6 text-gray-800 dark:text-gray-200 overflow-wrap-anywhere">{
msg.content.length > 0
? <span className="markdown" dangerouslySetInnerHTML={{ __html: DOMPurify.sanitize(marked.parse(msg.content)) }} />
: (<span className="h-6 flex items-center gap-1">
<span className="w-2.5 h-2.5 bg-gray-600 dark:bg-gray-300 rounded-full animate-pulse"></span>
<span className="w-2.5 h-2.5 bg-gray-600 dark:bg-gray-300 rounded-full animate-pulse animation-delay-200"></span>
<span className="w-2.5 h-2.5 bg-gray-600 dark:bg-gray-300 rounded-full animate-pulse animation-delay-400"></span>
</span>)
}</p>
</div>
</>
) : (<>
<UserIcon className="h-6 w-6 min-h-6 min-w-6 my-3 text-gray-500 dark:text-gray-300" />
<div className="bg-blue-500 text-white rounded-lg p-4">
{msg.image && <img src={msg.image} className="max-w-full max-h-64 rounded-md mb-3" />}
<p className="min-h-6 overflow-wrap-anywhere">{msg.content}</p>
</div>
</>)
}
</div>
))}
</div>)
}
| transformers.js/examples/webgpu-vlm/src/components/Chat.jsx/0 | {
"file_path": "transformers.js/examples/webgpu-vlm/src/components/Chat.jsx",
"repo_id": "transformers.js",
"token_count": 1430
} |
# TODO: Enable once https://github.com/huggingface/optimum/pull/1552 is merged
# # Support exporting vision and text models separately:
# # Adapted from https://github.com/huggingface/optimum/issues/1186#issuecomment-1637641760
# from optimum.exporters.onnx.model_configs import CLAPTextWithProjectionOnnxConfig, AudioOnnxConfig
# from optimum.utils.normalized_config import NormalizedAudioConfig
# from optimum.utils.input_generators import DummyAudioInputGenerator
# from typing import Dict
# class ClapAudioModelWithProjectionOnnxConfig(AudioOnnxConfig):
# NORMALIZED_CONFIG_CLASS = NormalizedAudioConfig
# DUMMY_INPUT_GENERATOR_CLASSES = (DummyAudioInputGenerator, )
# @property
# def inputs(self) -> Dict[str, Dict[int, str]]:
# return {
# "input_features": {0: "audio_batch_size", 1: "num_channels", 2: "height", 3: "width"}, # As described in modeling_clap.py
# }
# @property
# def outputs(self) -> Dict[str, Dict[int, str]]:
# return {
# "audio_embeds": {0: "batch_size"},
# }
# class ClapTextModelWithProjectionOnnxConfig(CLAPTextWithProjectionOnnxConfig):
# @property
# def outputs(self) -> Dict[str, Dict[int, str]]:
# return {
# "text_embeds": {0: "batch_size"},
# }
# def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
# dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)
# if framework == "pt":
# import torch
# dummy_inputs["input_ids"] = dummy_inputs["input_ids"].to(dtype=torch.int64)
# return dummy_inputs
| transformers.js/scripts/extra/clap.py/0 | {
"file_path": "transformers.js/scripts/extra/clap.py",
"repo_id": "transformers.js",
"token_count": 684
} |
/**
* @file Helper module for using model configs. For more information, see the corresponding
* [Python documentation](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoConfig).
*
* **Example:** Load an `AutoConfig`.
*
* ```javascript
* import { AutoConfig } from '@huggingface/transformers';
* const config = await AutoConfig.from_pretrained('bert-base-uncased');
* console.log(config);
* // PretrainedConfig {
* // "model_type": "bert",
* // "is_encoder_decoder": false,
* // "architectures": [
* // "BertForMaskedLM"
* // ],
* // "vocab_size": 30522
* // "num_attention_heads": 12,
* // "num_hidden_layers": 12,
* // "hidden_size": 768,
* // "max_position_embeddings": 512,
* // ...
* // }
* ```
*
* @module configs
*/
import { pick } from './utils/core.js';
import {
getModelJSON,
} from './utils/hub.js';
/**
* @typedef {import('./utils/hub.js').PretrainedOptions} PretrainedOptions
*/
/**
* @typedef {import('./utils/core.js').ProgressCallback} ProgressCallback
*/
/**
* @typedef {import('./utils/core.js').ProgressInfo} ProgressInfo
*/
/**
* Loads a config from the specified path.
* @param {string} pretrained_model_name_or_path The path to the config directory.
* @param {PretrainedOptions} options Additional options for loading the config.
* @returns {Promise<Object>} A promise that resolves with information about the loaded config.
*/
async function loadConfig(pretrained_model_name_or_path, options) {
return await getModelJSON(pretrained_model_name_or_path, 'config.json', true, options);
}
/**
*
* @param {PretrainedConfig} config
* @returns {Object} The normalized configuration.
*/
function getNormalizedConfig(config) {
const mapping = {};
let init_normalized_config = {};
switch (config.model_type) {
// Sub-configs
case 'llava':
case 'paligemma':
case 'florence2':
case 'llava_onevision':
case 'idefics3':
// @ts-expect-error TS2339
init_normalized_config = getNormalizedConfig(config.text_config);
break;
case 'moondream1':
// @ts-expect-error TS2339
init_normalized_config = getNormalizedConfig(config.phi_config);
break;
case 'musicgen':
// @ts-expect-error TS2339
init_normalized_config = getNormalizedConfig(config.decoder);
break;
case 'multi_modality':
// @ts-expect-error TS2339
init_normalized_config = getNormalizedConfig(config.language_config);
break;
// Decoder-only models
case 'gpt2':
case 'gptj':
case 'jais':
case 'codegen':
case 'gpt_bigcode':
mapping['num_heads'] = 'n_head';
mapping['num_layers'] = 'n_layer';
mapping['hidden_size'] = 'n_embd';
break;
case 'gpt_neox':
case 'stablelm':
case 'opt':
case 'falcon':
mapping['num_heads'] = 'num_attention_heads';
mapping['num_layers'] = 'num_hidden_layers';
mapping['hidden_size'] = 'hidden_size';
break;
case 'llama':
case 'olmo':
case 'olmo2':
case 'mobilellm':
case 'granite':
case 'cohere':
case 'mistral':
case 'starcoder2':
case 'qwen2':
case 'qwen2_vl':
case 'phi':
case 'phi3':
case 'phi3_v':
mapping['num_heads'] = 'num_key_value_heads';
mapping['num_layers'] = 'num_hidden_layers';
mapping['hidden_size'] = 'hidden_size';
mapping['num_attention_heads'] = 'num_attention_heads';
break;
case 'gemma':
case 'gemma2':
case 'glm':
case 'helium':
mapping['num_heads'] = 'num_key_value_heads';
mapping['num_layers'] = 'num_hidden_layers';
mapping['dim_kv'] = 'head_dim';
break;
case 'openelm':
mapping['num_heads'] = 'num_kv_heads';
mapping['num_layers'] = 'num_transformer_layers';
mapping['dim_kv'] = 'head_dim';
break;
case 'gpt_neo':
case 'donut-swin':
mapping['num_heads'] = 'num_heads';
mapping['num_layers'] = 'num_layers';
mapping['hidden_size'] = 'hidden_size';
break;
case 'bloom':
mapping['num_heads'] = 'n_head';
mapping['num_layers'] = 'n_layer';
mapping['hidden_size'] = 'hidden_size';
break;
case 'mpt':
mapping['num_heads'] = 'n_heads';
mapping['num_layers'] = 'n_layers';
mapping['hidden_size'] = 'd_model';
break;
case 'exaone':
mapping['num_heads'] = 'num_key_value_heads';
mapping['num_layers'] = 'num_layers';
mapping['dim_kv'] = 'head_dim';
mapping['num_attention_heads'] = 'num_attention_heads';
break;
// Encoder-decoder models
case 't5':
case 'mt5':
case 'longt5':
mapping['num_decoder_layers'] = 'num_decoder_layers';
mapping['num_decoder_heads'] = 'num_heads';
mapping['decoder_dim_kv'] = 'd_kv';
mapping['num_encoder_layers'] = 'num_layers';
mapping['num_encoder_heads'] = 'num_heads';
mapping['encoder_dim_kv'] = 'd_kv';
break;
case 'bart':
case 'mbart':
case 'marian':
case 'whisper':
case 'm2m_100':
case 'blenderbot':
case 'blenderbot-small':
case 'florence2_language':
mapping['num_decoder_layers'] = 'decoder_layers';
mapping['num_decoder_heads'] = 'decoder_attention_heads';
mapping['decoder_hidden_size'] = 'd_model';
mapping['num_encoder_layers'] = 'encoder_layers';
mapping['num_encoder_heads'] = 'encoder_attention_heads';
mapping['encoder_hidden_size'] = 'd_model';
break;
case 'speecht5':
mapping['num_decoder_layers'] = 'decoder_layers';
mapping['num_decoder_heads'] = 'decoder_attention_heads';
mapping['decoder_hidden_size'] = 'hidden_size';
mapping['num_encoder_layers'] = 'encoder_layers';
mapping['num_encoder_heads'] = 'encoder_attention_heads';
mapping['encoder_hidden_size'] = 'hidden_size';
break;
case 'trocr':
mapping['num_encoder_layers'] = mapping['num_decoder_layers'] = 'decoder_layers';
mapping['num_encoder_heads'] = mapping['num_decoder_heads'] = 'decoder_attention_heads';
mapping['encoder_hidden_size'] = mapping['decoder_hidden_size'] = 'd_model';
break;
case 'musicgen_decoder':
mapping['num_encoder_layers'] = mapping['num_decoder_layers'] = 'num_hidden_layers';
mapping['num_encoder_heads'] = mapping['num_decoder_heads'] = 'num_attention_heads';
mapping['encoder_hidden_size'] = mapping['decoder_hidden_size'] = 'hidden_size';
break;
case 'moonshine':
mapping['num_decoder_layers'] = 'decoder_num_hidden_layers';
mapping['num_decoder_heads'] = 'decoder_num_key_value_heads';
mapping['num_encoder_layers'] = 'encoder_num_hidden_layers';
mapping['num_encoder_heads'] = 'encoder_num_key_value_heads';
mapping['encoder_hidden_size'] = mapping['decoder_hidden_size'] = 'hidden_size';
break;
case 'vision-encoder-decoder':
// @ts-expect-error TS2339
const decoderConfig = getNormalizedConfig(config.decoder);
const add_encoder_pkv = 'num_decoder_layers' in decoderConfig;
const result = pick(config, ['model_type', 'is_encoder_decoder']);
if (add_encoder_pkv) {
// Decoder is part of an encoder-decoder model
result.num_decoder_layers = decoderConfig.num_decoder_layers;
result.num_decoder_heads = decoderConfig.num_decoder_heads;
result.decoder_hidden_size = decoderConfig.decoder_hidden_size;
result.num_encoder_layers = decoderConfig.num_encoder_layers;
result.num_encoder_heads = decoderConfig.num_encoder_heads;
result.encoder_hidden_size = decoderConfig.encoder_hidden_size;
} else {
// Decoder is a decoder-only model
result.num_layers = decoderConfig.num_layers;
result.num_heads = decoderConfig.num_heads;
result.hidden_size = decoderConfig.hidden_size;
}
return result;
}
// NOTE: If `num_attention_heads` is not set, it is assumed to be equal to `num_heads`
const normalized_config = {
...init_normalized_config,
...pick(config, ['model_type', 'multi_query', 'is_encoder_decoder']),
};
for (const key in mapping) {
normalized_config[key] = config[mapping[key]];
}
return normalized_config;
}
/**
*
* @param {PretrainedConfig} config
* @returns {Record<string, number[]>}
*/
export function getKeyValueShapes(config, {
prefix = 'past_key_values',
batch_size=1,
} = {}) {
/** @type {Record<string, number[]>} */
const decoderFeeds = {};
const normalized_config = config.normalized_config;
if (normalized_config.is_encoder_decoder && (
'num_encoder_heads' in normalized_config && 'num_decoder_heads' in normalized_config
)) {
const encoder_dim_kv = normalized_config.encoder_dim_kv ?? (
normalized_config.encoder_hidden_size / normalized_config.num_encoder_heads
);
const decoder_dim_kv = normalized_config.decoder_dim_kv ?? (
normalized_config.decoder_hidden_size / normalized_config.num_decoder_heads
);
const encoder_dims = [batch_size, normalized_config.num_encoder_heads, 0, encoder_dim_kv];
const decoder_dims = [batch_size, normalized_config.num_decoder_heads, 0, decoder_dim_kv];
for (let i = 0; i < normalized_config.num_decoder_layers; ++i) {
decoderFeeds[`${prefix}.${i}.encoder.key`] = encoder_dims;
decoderFeeds[`${prefix}.${i}.encoder.value`] = encoder_dims;
decoderFeeds[`${prefix}.${i}.decoder.key`] = decoder_dims;
decoderFeeds[`${prefix}.${i}.decoder.value`] = decoder_dims;
}
} else { // Decoders
const num_heads = normalized_config.num_heads;
const num_layers = normalized_config.num_layers;
const dim_kv = normalized_config.dim_kv ?? (
normalized_config.hidden_size /
(normalized_config.num_attention_heads ?? num_heads)
);
if (normalized_config.model_type === 'falcon') {
// NOTE: Custom implementation for Falcon
const dims = [batch_size * num_heads, 0, dim_kv]
for (let i = 0; i < num_layers; ++i) {
decoderFeeds[`${prefix}.${i}.key`] = dims;
decoderFeeds[`${prefix}.${i}.value`] = dims;
}
} else if (normalized_config.multi_query) { // e.g., for `gpt_bigcode`
const dims = [batch_size * num_heads, 0, 2 * dim_kv]
for (let i = 0; i < num_layers; ++i) {
decoderFeeds[`${prefix}.${i}.key_value`] = dims;
}
} else if (normalized_config.model_type === 'bloom') {
// NOTE: Custom implementation for Bloom
const keyDims = [batch_size * num_heads, dim_kv, 0] // [batch_size x num_heads,64,past_sequence_length]
const valueDims = [batch_size * num_heads, 0, dim_kv] // [batch_size x num_heads,past_sequence_length,64]
for (let i = 0; i < num_layers; ++i) {
decoderFeeds[`${prefix}.${i}.key`] = keyDims;
decoderFeeds[`${prefix}.${i}.value`] = valueDims;
}
} else if (normalized_config.model_type === 'openelm') {
for (let i = 0; i < num_layers; ++i) {
const dims = [batch_size, num_heads[i], 0, dim_kv]
decoderFeeds[`${prefix}.${i}.key`] = dims;
decoderFeeds[`${prefix}.${i}.value`] = dims;
}
} else { // Decoder-only
const dims = [batch_size, num_heads, 0, dim_kv]
for (let i = 0; i < num_layers; ++i) {
decoderFeeds[`${prefix}.${i}.key`] = dims;
decoderFeeds[`${prefix}.${i}.value`] = dims;
}
}
}
return decoderFeeds;
}
/**
* Base class for all configuration classes. For more information, see the corresponding
* [Python documentation](https://huggingface.co/docs/transformers/main/en/main_classes/configuration#transformers.PretrainedConfig).
*/
export class PretrainedConfig {
// NOTE: Typo in original
/** @type {string|null} */
model_type = null;
/** @type {boolean} */
is_encoder_decoder = false;
/** @type {number} */
max_position_embeddings;
/** @type {TransformersJSConfig} */
'transformers.js_config';
/**
* Create a new PreTrainedTokenizer instance.
* @param {Object} configJSON The JSON of the config.
*/
constructor(configJSON) {
Object.assign(this, configJSON);
this.normalized_config = getNormalizedConfig(this);
}
/**
* Loads a pre-trained config from the given `pretrained_model_name_or_path`.
*
* @param {string} pretrained_model_name_or_path The path to the pre-trained config.
* @param {PretrainedOptions} options Additional options for loading the config.
* @throws {Error} Throws an error if the config.json is not found in the `pretrained_model_name_or_path`.
*
* @returns {Promise<PretrainedConfig>} A new instance of the `PretrainedConfig` class.
*/
static async from_pretrained(pretrained_model_name_or_path, {
progress_callback = null,
config = null,
cache_dir = null,
local_files_only = false,
revision = 'main',
} = {}) {
if (config && !(config instanceof PretrainedConfig)) {
config = new PretrainedConfig(config);
}
const data = config ?? await loadConfig(pretrained_model_name_or_path, {
progress_callback,
config,
cache_dir,
local_files_only,
revision,
})
return new this(data);
}
}
/**
* Helper class which is used to instantiate pretrained configs with the `from_pretrained` function.
*
* @example
* const config = await AutoConfig.from_pretrained('Xenova/bert-base-uncased');
*/
export class AutoConfig {
/** @type {typeof PretrainedConfig.from_pretrained} */
static async from_pretrained(...args) {
return PretrainedConfig.from_pretrained(...args);
}
}
/**
* Transformers.js-specific configuration, possibly present in config.json under the key `transformers.js_config`.
* @typedef {Object} TransformersJSConfig
* @property {import('./utils/tensor.js').DataType|Record<import('./utils/dtypes.js').DataType, import('./utils/tensor.js').DataType>} [kv_cache_dtype] The data type of the key-value cache.
* @property {Record<string, number>} [free_dimension_overrides] Override the free dimensions of the model.
* See https://onnxruntime.ai/docs/tutorials/web/env-flags-and-session-options.html#freedimensionoverrides
* for more information.
* @property {import('./utils/devices.js').DeviceType} [device] The default device to use for the model.
* @property {import('./utils/dtypes.js').DataType|Record<string, import('./utils/dtypes.js').DataType>} [dtype] The default data type to use for the model.
* @property {boolean|Record<string, boolean>} [use_external_data_format=false] Whether to load the model using the external data format (used for models >= 2GB in size).
*/
| transformers.js/src/configs.js/0 | {
"file_path": "transformers.js/src/configs.js",
"repo_id": "transformers.js",
"token_count": 7215
} |
Subsets and Splits