Last commit not found
from typing import Any | |
from transformers.configuration_utils import PretrainedConfig | |
__all__ = ["AIMv2Config"] | |
class AIMv2Config(PretrainedConfig): | |
"""This is the configuration class to store the configuration of an [`AIMv2Model`]. | |
Instantiating a configuration with the defaults will yield a similar configuration | |
to that of the [apple/aimv2-large-patch14-native](https://huggingface.co/apple/aimv2-large-patch14-native) | |
Args: | |
hidden_size: Dimension of the hidden representations. | |
intermediate_size: Dimension of the SwiGLU representations. | |
num_hidden_layers: Number of hidden layers in the Transformer. | |
num_attention_heads: Number of attention heads for each attention layer | |
in the Transformer. | |
num_channels: Number of input channels. | |
num_queries: Number of learnable queries in the head. | |
patch_size: Patch size. | |
rms_norm_eps: Epsilon value used for the RMS normalization layer. | |
attention_dropout: Dropout ratio for attention probabilities. | |
projection_dropout: Dropout ratio for the projection layer after the attention. | |
qkv_bias: Whether to add a bias to the queries, keys and values. | |
use_bias: Whether to add a bias in the feed-forward and projection layers. | |
kwargs: Keyword arguments for the [`PretrainedConfig`]. | |
""" | |
model_type: str = "aimv2" | |
def __init__( | |
self, | |
hidden_size: int = 1024, | |
intermediate_size: int = 2816, | |
num_hidden_layers: int = 24, | |
num_attention_heads: int = 8, | |
num_channels: int = 3, | |
num_queries: int = 256, | |
patch_size: int = 14, | |
rms_norm_eps: float = 1e-5, | |
attention_dropout: float = 0.0, | |
projection_dropout: float = 0.0, | |
qkv_bias: bool = False, | |
use_bias: bool = False, | |
**kwargs: Any, | |
): | |
super().__init__(**kwargs) | |
self.hidden_size = hidden_size | |
self.intermediate_size = intermediate_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.num_channels = num_channels | |
self.num_queries = num_queries | |
self.patch_size = patch_size | |
self.attention_dropout = attention_dropout | |
self.rms_norm_eps = rms_norm_eps | |
self.projection_dropout = projection_dropout | |
self.qkv_bias = qkv_bias | |
self.use_bias = use_bias | |