JingzeShi commited on
Commit
05e8ab2
·
verified ·
1 Parent(s): c52fd95

Upload DogeForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "SmallDoge/Doge-160M-checkpoint",
3
  "architectures": [
4
  "DogeForCausalLM"
5
  ],
@@ -38,9 +38,8 @@
38
  "rope_type": "dynamic"
39
  },
40
  "rope_theta": 10000.0,
41
- "tie_word_embeddings": true,
42
  "torch_dtype": "float32",
43
- "transformers_version": "4.48.2",
44
  "use_cache": true,
45
  "vocab_size": 32768
46
  }
 
1
  {
2
+ "_name_or_path": "./results/Doge-160M/checkpoint-15600",
3
  "architectures": [
4
  "DogeForCausalLM"
5
  ],
 
38
  "rope_type": "dynamic"
39
  },
40
  "rope_theta": 10000.0,
 
41
  "torch_dtype": "float32",
42
+ "transformers_version": "4.48.1",
43
  "use_cache": true,
44
  "vocab_size": 32768
45
  }
configuration_doge.py CHANGED
@@ -1,14 +1,9 @@
1
- # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
- # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
- # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
- # the file from the modular. If any change should be done, please apply the change to the
5
- # modular_doge.py file directly. One of our CI enforces this.
6
- # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
  # coding=utf-8
8
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
  #
10
  # This code is based on the Wonderful Matrices paper implementation.
11
- # The Doge family of small language models is trained by Jingze Shi.
 
12
  #
13
  # Licensed under the Apache License, Version 2.0 (the "License");
14
  # you may not use this file except in compliance with the License.
@@ -21,6 +16,8 @@
21
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
  # See the License for the specific language governing permissions and
23
  # limitations under the License.
 
 
24
  from transformers.configuration_utils import PretrainedConfig
25
  from transformers.modeling_rope_utils import rope_config_validation
26
 
@@ -28,7 +25,7 @@ from transformers.modeling_rope_utils import rope_config_validation
28
  class DogeConfig(PretrainedConfig):
29
  r"""
30
  This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
31
- model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-20M](https://huggingface.co/SmallDoge/Doge-20M).
32
 
33
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
  documentation from [`PretrainedConfig`] for more information.
@@ -36,6 +33,10 @@ class DogeConfig(PretrainedConfig):
36
  Args:
37
  vocab_size (`int`, *optional*, defaults to 32768):
38
  Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
 
 
 
 
39
  hidden_size (`int`, *optional*, defaults to 1024):
40
  Dimension of the hidden representations.
41
  intermediate_size (`int`, *optional*, defaults to 2048):
@@ -48,41 +49,25 @@ class DogeConfig(PretrainedConfig):
48
  Dropout probability for each sequence transformation and state transformation module.
49
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
50
  The non-linear activation function (function or string) in the decoder.
51
- initializer_range (`float`, *optional*, defaults to 0.02):
52
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
53
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
54
- The epsilon used by the rms normalization layers.
55
- use_cache (`bool`, *optional*, defaults to `True`):
56
- Whether or not the model should return the last key/values attentions (not used by all models). Only
57
- relevant if `config.is_decoder=True`.
58
- bos_token_id (`int`, *optional*, defaults to 0):
59
- Beginning of stream token id.
60
- eos_token_id (`int`, *optional*, defaults to 1):
61
- End of stream token id.
62
- pad_token_id (`int`, *optional*, defaults to 2):
63
- Padding token id.
64
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
65
- Whether to tie weight embeddings
66
  max_position_embeddings (`int`, *optional*, defaults to 2048):
67
  The maximum sequence length that this model might ever be used with.
68
  rope_theta (`float`, *optional*, defaults to 10000.0):
69
  The base period of the RoPE embeddings.
70
  rope_scaling (`Dict`, *optional*):
71
- Dictionary containing the scaling configuration for the RoPE embeddings.
72
  NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
73
- Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value.
74
  Expected contents:
75
  `rope_type` (`str`):
76
  The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
77
  `factor` (`float`, *optional*):
78
- Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
79
  In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
80
  `original_max_position_embeddings` (`int`, *optional*):
81
- Used with 'dynamic', 'longrope' and 'llama3'.
82
  The original max position embeddings used during pretraining.
83
  `attention_factor` (`float`, *optional*):
84
  Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
85
- computation.
86
  If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
87
  `beta_fast` (`float`, *optional*):
88
  Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
@@ -91,51 +76,54 @@ class DogeConfig(PretrainedConfig):
91
  Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
92
  ramp function. If unspecified, it defaults to 1.
93
  `short_factor` (`List[float]`, *optional*):
94
- Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
95
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
96
  `long_factor` (`List[float]`, *optional*):
97
- Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
98
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
99
  `low_freq_factor` (`float`, *optional*):
100
  Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
101
  `high_freq_factor` (`float`, *optional*):
102
  Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  num_attention_heads (`int`, *optional*, defaults to 8):
104
  Number of attention heads for each attention layer in the Transformer decoder.
105
- num_key_value_heads (`int`, *optional*):
106
- This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
  If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
- `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
- When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
- For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
  If it is not specified, will default to `num_attention_heads`.
112
  attention_dropout (`float`, *optional*, defaults to 0.0):
113
  The dropout ratio for the attention probabilities.
114
- dynamic_mask_ratio (`float`, *optional*, defaults to 0.0):
115
- The ratio to control the proportion of the dynamic mask filled with the minimum value. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
116
  is_moe (`bool`, *optional*, defaults to `False`):
117
- Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
118
  num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
- Number of Experts for the Cross Domain Mixture of Experts.
120
  num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
- Number of retrieval heads, used to mix multi-head experts.
122
  num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
- Number of Experts per retrieval head, used to mix multi-head experts.
124
  expert_retrieval_size (`int`, *optional*, defaults to 64):
125
- Dimension of the Expert retrieval states for calculating the dot product of query and key to determine the expert index.
126
-
127
- ```python
128
- >>> from transformers import DogeConfig, DogeModel
129
-
130
- >>> # Initializing a Doge-320M style configuration
131
- >>> configuration = DogeConfig()
132
-
133
- >>> # Initializing a model from the Doge-320M style configuration
134
- >>> model = DogeModel(configuration)
135
-
136
- >>> # Accessing the model configuration
137
- >>> configuration = model.config
138
- ```"""
139
 
140
  model_type = "doge"
141
  keys_to_ignore_at_inference = ["past_key_values"]
@@ -154,26 +142,33 @@ class DogeConfig(PretrainedConfig):
154
  def __init__(
155
  self,
156
  vocab_size=32768,
 
 
157
  hidden_size=1024,
158
  intermediate_size=2048,
159
  num_hidden_layers=32,
160
  hidden_bias=False,
161
  hidden_dropout=0.0,
162
  hidden_act="silu",
 
 
 
 
 
 
 
163
  initializer_range=0.02,
164
  rms_norm_eps=1e-06,
165
  use_cache=True,
166
  bos_token_id=0,
167
  eos_token_id=1,
168
  pad_token_id=2,
169
- tie_word_embeddings=False,
170
- max_position_embeddings=2048,
171
- rope_theta=10000.0,
172
- rope_scaling=None,
173
  num_attention_heads=8,
174
  num_key_value_heads=None,
175
  attention_dropout=0.0,
176
  dynamic_mask_ratio=0.0,
 
177
  is_moe=False,
178
  num_cdmoe_experts=16348,
179
  num_cdmoe_heads=4,
@@ -182,24 +177,29 @@ class DogeConfig(PretrainedConfig):
182
  **kwargs,
183
  ):
184
  self.vocab_size = vocab_size
 
 
185
  self.hidden_size = hidden_size
186
  self.intermediate_size = intermediate_size
187
  self.num_hidden_layers = num_hidden_layers
188
-
189
  self.hidden_bias = hidden_bias
190
  self.hidden_dropout = hidden_dropout
191
  self.hidden_act = hidden_act
192
- self.initializer_range = initializer_range
193
- self.rms_norm_eps = rms_norm_eps
194
- self.use_cache = use_cache
195
-
196
  self.max_position_embeddings = max_position_embeddings
197
  self.rope_theta = rope_theta
198
  self.rope_scaling = rope_scaling
 
 
 
 
 
 
 
199
  self.num_attention_heads = num_attention_heads
200
  self.num_key_value_heads = num_key_value_heads
201
  self.attention_dropout = attention_dropout
202
  self.dynamic_mask_ratio = dynamic_mask_ratio
 
203
  self.is_moe = is_moe
204
  self.num_cdmoe_experts = num_cdmoe_experts
205
  self.num_cdmoe_heads = num_cdmoe_heads
 
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
3
  #
4
  # This code is based on the Wonderful Matrices paper implementation.
5
+ #
6
+ # https://arxiv.org/abs/2412.11834
7
  #
8
  # Licensed under the Apache License, Version 2.0 (the "License");
9
  # you may not use this file except in compliance with the License.
 
16
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
  # See the License for the specific language governing permissions and
18
  # limitations under the License.
19
+ """PyTorch Doge model configuration"""
20
+
21
  from transformers.configuration_utils import PretrainedConfig
22
  from transformers.modeling_rope_utils import rope_config_validation
23
 
 
25
  class DogeConfig(PretrainedConfig):
26
  r"""
27
  This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
28
+ model according to the specified arguments, defining the model architecture like [JingzeShi/Doge-20M](https://huggingface.co/JingzeShi/Doge-20M).
29
 
30
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
31
  documentation from [`PretrainedConfig`] for more information.
 
33
  Args:
34
  vocab_size (`int`, *optional*, defaults to 32768):
35
  Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
36
+ num_channels (`int`, *optional*, defaults to 3):
37
+ Number of channels in the input image.
38
+ patch_size (`int`, *optional*, defaults to 16):
39
+ Patch size of Vision Transformer Embeddings.
40
  hidden_size (`int`, *optional*, defaults to 1024):
41
  Dimension of the hidden representations.
42
  intermediate_size (`int`, *optional*, defaults to 2048):
 
49
  Dropout probability for each sequence transformation and state transformation module.
50
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
51
  The non-linear activation function (function or string) in the decoder.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  max_position_embeddings (`int`, *optional*, defaults to 2048):
53
  The maximum sequence length that this model might ever be used with.
54
  rope_theta (`float`, *optional*, defaults to 10000.0):
55
  The base period of the RoPE embeddings.
56
  rope_scaling (`Dict`, *optional*):
57
+ Dictionary containing the scaling configuration for the RoPE embeddings.
58
  NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
 
59
  Expected contents:
60
  `rope_type` (`str`):
61
  The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
62
  `factor` (`float`, *optional*):
63
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
64
  In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
65
  `original_max_position_embeddings` (`int`, *optional*):
66
+ Used with 'dynamic', 'longrope' and 'llama3'.
67
  The original max position embeddings used during pretraining.
68
  `attention_factor` (`float`, *optional*):
69
  Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
70
+ computation.
71
  If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
72
  `beta_fast` (`float`, *optional*):
73
  Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
 
76
  Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
77
  ramp function. If unspecified, it defaults to 1.
78
  `short_factor` (`List[float]`, *optional*):
79
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
80
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
81
  `long_factor` (`List[float]`, *optional*):
82
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
83
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
84
  `low_freq_factor` (`float`, *optional*):
85
  Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
86
  `high_freq_factor` (`float`, *optional*):
87
  Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
88
+ initializer_range (`float`, *optional*, defaults to 0.02):
89
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
90
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
91
+ The epsilon used by the rms normalization layers.
92
+ use_cache (`bool`, *optional*, defaults to `True`):
93
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
94
+ relevant if `config.is_decoder=True`.
95
+ pad_token_id (`int`, *optional*, defaults to 0):
96
+ Padding token id.
97
+ bos_token_id (`int`, *optional*, defaults to 1):
98
+ Beginning of stream token id.
99
+ eos_token_id (`int`, *optional*, defaults to 2):
100
+ End of stream token id.
101
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
102
+ Whether to tie weight embeddings
103
  num_attention_heads (`int`, *optional*, defaults to 8):
104
  Number of attention heads for each attention layer in the Transformer decoder.
105
+ num_key_value_heads (`int`, *optional*, defaults to `None`):
106
+ This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
  If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
+ When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
+ For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
  If it is not specified, will default to `num_attention_heads`.
112
  attention_dropout (`float`, *optional*, defaults to 0.0):
113
  The dropout ratio for the attention probabilities.
114
+ dynamic_mask_ratio (`float`, *optional*, defaults to 0.0, range [0, 1]):
115
+ The ratio to control the proportion of the dynamic mask filled with the minimum value.
116
  is_moe (`bool`, *optional*, defaults to `False`):
117
+ Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize
118
  num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
+ Number of Private Experts for the Cross Domain Mixture of Experts. calculation formula: :math:`\text{num_cdmoe_experts} = (32 \times \text{num_cdmoe_heads})^2`
120
  num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
+ Number of heads of Private Experts for the Cross Domain Mixture of Experts.
122
  num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
+ Number of Private Experts per head for the Cross Domain Mixture of Experts.
124
  expert_retrieval_size (`int`, *optional*, defaults to 64):
125
+ Dimension of the Expert retrieval states for the Cross Domain Mixture of Experts.
126
+ """
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  model_type = "doge"
129
  keys_to_ignore_at_inference = ["past_key_values"]
 
142
  def __init__(
143
  self,
144
  vocab_size=32768,
145
+ num_channels=3,
146
+ patch_size=16,
147
  hidden_size=1024,
148
  intermediate_size=2048,
149
  num_hidden_layers=32,
150
  hidden_bias=False,
151
  hidden_dropout=0.0,
152
  hidden_act="silu",
153
+ max_position_embeddings=2048,
154
+ rope_theta=10000.0,
155
+ rope_scaling={
156
+ "rope_type": "dynamic",
157
+ "factor": 4.0,
158
+ "original_max_position_embeddings": 2048,
159
+ },
160
  initializer_range=0.02,
161
  rms_norm_eps=1e-06,
162
  use_cache=True,
163
  bos_token_id=0,
164
  eos_token_id=1,
165
  pad_token_id=2,
166
+ tie_word_embeddings=True,
 
 
 
167
  num_attention_heads=8,
168
  num_key_value_heads=None,
169
  attention_dropout=0.0,
170
  dynamic_mask_ratio=0.0,
171
+ is_causal=False,
172
  is_moe=False,
173
  num_cdmoe_experts=16348,
174
  num_cdmoe_heads=4,
 
177
  **kwargs,
178
  ):
179
  self.vocab_size = vocab_size
180
+ self.num_channels = num_channels
181
+ self.patch_size = patch_size
182
  self.hidden_size = hidden_size
183
  self.intermediate_size = intermediate_size
184
  self.num_hidden_layers = num_hidden_layers
 
185
  self.hidden_bias = hidden_bias
186
  self.hidden_dropout = hidden_dropout
187
  self.hidden_act = hidden_act
 
 
 
 
188
  self.max_position_embeddings = max_position_embeddings
189
  self.rope_theta = rope_theta
190
  self.rope_scaling = rope_scaling
191
+ self.initializer_range = initializer_range
192
+ self.rms_norm_eps = rms_norm_eps
193
+ self.use_cache = use_cache
194
+ self.bos_token_id = bos_token_id
195
+ self.eos_token_id = eos_token_id
196
+ self.pad_token_id = pad_token_id
197
+ self.tie_word_embeddings = tie_word_embeddings
198
  self.num_attention_heads = num_attention_heads
199
  self.num_key_value_heads = num_key_value_heads
200
  self.attention_dropout = attention_dropout
201
  self.dynamic_mask_ratio = dynamic_mask_ratio
202
+ self.is_causal = is_causal
203
  self.is_moe = is_moe
204
  self.num_cdmoe_experts = num_cdmoe_experts
205
  self.num_cdmoe_heads = num_cdmoe_heads
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.48.2"
7
  }
 
3
  "bos_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.48.1"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a798eb03366281fa7a92ec90d62de141fd23bb80c6f57d5cde731c68510e4ac4
3
  size 610825528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:737335e804c2ef54717d9fdb4c367b841712fb3de278908782cc053f9d3d66cb
3
  size 610825528
modeling_doge.py CHANGED
@@ -1,14 +1,9 @@
1
- # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
- # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
- # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
- # the file from the modular. If any change should be done, please apply the change to the
5
- # modular_doge.py file directly. One of our CI enforces this.
6
- # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
  # coding=utf-8
8
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
  #
10
  # This code is based on the Wonderful Matrices paper implementation.
11
- # The Doge family of small language models is trained by Jingze Shi.
 
12
  #
13
  # Licensed under the Apache License, Version 2.0 (the "License");
14
  # you may not use this file except in compliance with the License.
@@ -21,13 +16,16 @@
21
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
  # See the License for the specific language governing permissions and
23
  # limitations under the License.
 
24
 
25
  import math
26
  from typing import Callable, List, Optional, Tuple, Union
27
 
28
  import torch
29
  import torch.nn.functional as F
 
30
  from torch import nn
 
31
  from transformers.activations import ACT2FN
32
  from transformers.cache_utils import Cache, DynamicCache, StaticCache
33
  from transformers.generation import GenerationMixin
@@ -43,16 +41,18 @@ from transformers.utils import (
43
  LossKwargs,
44
  add_start_docstrings,
45
  add_start_docstrings_to_model_forward,
46
- is_torch_flex_attn_available,
47
  logging,
48
  replace_return_docstrings,
49
  )
50
- from transformers.utils.deprecation import deprecate_kwarg
51
-
52
  from .configuration_doge import DogeConfig
53
 
 
 
 
 
54
 
55
- if is_torch_flex_attn_available():
56
  from torch.nn.attention.flex_attention import flex_attention
57
 
58
 
@@ -94,20 +94,22 @@ class Residual(nn.Module):
94
 
95
 
96
  class RotaryEmbedding(nn.Module):
97
- def __init__(self, config: Optional[DogeConfig] = None, device=None):
98
  super().__init__()
99
- # BC: "rope_type" was originally "type"
100
- if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
 
101
  self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
102
  else:
103
  self.rope_type = "default"
104
  self.max_seq_len_cached = config.max_position_embeddings
105
  self.original_max_seq_len = config.max_position_embeddings
 
106
 
107
  self.config = config
108
  self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
109
 
110
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
111
  self.register_buffer("inv_freq", inv_freq, persistent=False)
112
  self.original_inv_freq = self.inv_freq
113
 
@@ -119,14 +121,13 @@ class RotaryEmbedding(nn.Module):
119
  """
120
  seq_len = torch.max(position_ids) + 1
121
  if seq_len > self.max_seq_len_cached: # growth
122
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
 
 
123
  self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
124
  self.max_seq_len_cached = seq_len
125
 
126
  if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
127
- # This .to() is needed if the model has been moved to a device after being initialized (because
128
- # the buffer is automatically moved, but not the original copy)
129
- self.original_inv_freq = self.original_inv_freq.to(device)
130
  self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
131
  self.max_seq_len_cached = self.original_max_seq_len
132
 
@@ -135,7 +136,7 @@ class RotaryEmbedding(nn.Module):
135
  if "dynamic" in self.rope_type:
136
  self._dynamic_frequency_update(position_ids, device=x.device)
137
 
138
- # Core RoPE block
139
  inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
140
  position_ids_expanded = position_ids[:, None, :].float()
141
  # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
@@ -163,7 +164,7 @@ def rotate_half(x):
163
  return torch.cat((-x2, x1), dim=-1)
164
 
165
 
166
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
167
  """Applies Rotary Position Embedding to the query and key tensors.
168
 
169
  Args:
@@ -175,8 +176,8 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
175
  Deprecated and unused.
176
  unsqueeze_dim (`int`, *optional*, defaults to 1):
177
  The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
178
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
179
- For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
180
  Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k.
181
  Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
182
  Returns:
@@ -191,7 +192,7 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
191
 
192
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
193
  """
194
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
195
  The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
196
  """
197
  batch, num_key_value_heads, slen, head_dim = hidden_states.shape
@@ -210,9 +211,10 @@ class DogeDynamicMaskAttention(nn.Module):
210
  self.layer_idx = layer_idx
211
  self.head_dim = config.hidden_size // config.num_attention_heads
212
  self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
213
- self.scaling = self.head_dim**-0.5
214
  self.attention_dropout = config.attention_dropout
215
  self.dynamic_mask_ratio = config.dynamic_mask_ratio
 
216
 
217
  self.ALL_ATTENTION_FUNCTIONS = {
218
  "eager": self.eager_attention_forward,
@@ -222,21 +224,33 @@ class DogeDynamicMaskAttention(nn.Module):
222
 
223
  # Q K V O projections
224
  self.q_proj = nn.Linear(
225
- config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.hidden_bias
 
 
226
  )
227
  self.k_proj = nn.Linear(
228
- config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
 
 
229
  )
230
  self.v_proj = nn.Linear(
231
- config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
 
 
232
  )
233
  # dynamic mask for the QK^T attention score matrix
234
- self.A = nn.Parameter(torch.zeros(config.num_attention_heads))
 
 
235
  self.dt_proj = nn.Linear(
236
- config.num_key_value_heads * self.head_dim, config.num_attention_heads, bias=config.hidden_bias
 
 
237
  )
238
  self.o_proj = nn.Linear(
239
- config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.hidden_bias
 
 
240
  )
241
 
242
  def forward(
@@ -256,18 +270,21 @@ class DogeDynamicMaskAttention(nn.Module):
256
  value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
257
 
258
  cos, sin = position_embeddings
259
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
260
 
261
  if past_key_value is not None:
262
  # sin and cos are specific to RoPE models; cache_position needed for the static cache
263
  cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
264
  key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
265
 
266
- # calculate dynamic mask from value_states
267
- dt_states = self.dt_proj(
268
- value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1)
269
- )
270
- dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
 
 
 
271
  attn_mask = self.prepare_dynamic_mask(
272
  hidden_states=hidden_states,
273
  dynamic_mask=dynamic_mask,
@@ -278,7 +295,7 @@ class DogeDynamicMaskAttention(nn.Module):
278
  attention_interface: Callable = self.eager_attention_forward
279
  if self.config._attn_implementation != "eager":
280
  attention_interface = self.ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
281
-
282
  attn_output = attention_interface(
283
  query_states,
284
  key_states,
@@ -324,7 +341,7 @@ class DogeDynamicMaskAttention(nn.Module):
324
  attn_mask = attention_mask
325
 
326
  return attn_mask
327
-
328
  def eager_attention_forward(
329
  self,
330
  query: torch.Tensor,
@@ -343,7 +360,7 @@ class DogeDynamicMaskAttention(nn.Module):
343
  if attention_mask is not None:
344
  causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
345
  attn_weights = attn_weights + causal_mask
346
-
347
  # upcast attention scores to fp32
348
  attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
349
  attn_weights = F.dropout(attn_weights, p=dropout, training=self.training)
@@ -352,7 +369,7 @@ class DogeDynamicMaskAttention(nn.Module):
352
  attn_output = torch.matmul(attn_weights, value_states)
353
  attn_output = attn_output.transpose(1, 2).contiguous()
354
  return attn_output
355
-
356
  def sdpa_attention_forward(
357
  self,
358
  query: torch.Tensor,
@@ -363,9 +380,6 @@ class DogeDynamicMaskAttention(nn.Module):
363
  dropout: float = 0.0,
364
  **kwargs,
365
  ) -> torch.Tensor:
366
- key = repeat_kv(key, self.num_key_value_groups)
367
- value = repeat_kv(value, self.num_key_value_groups)
368
-
369
  causal_mask = attention_mask
370
  if attention_mask is not None:
371
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
@@ -385,10 +399,11 @@ class DogeDynamicMaskAttention(nn.Module):
385
  attn_mask=causal_mask,
386
  dropout_p=dropout,
387
  scale=scaling,
 
388
  )
389
  attn_output = attn_output.transpose(1, 2).contiguous()
390
  return attn_output
391
-
392
  def flex_attention_forward(
393
  self,
394
  query: torch.Tensor,
@@ -399,37 +414,37 @@ class DogeDynamicMaskAttention(nn.Module):
399
  dropout: float = 0.0,
400
  **kwargs,
401
  ) -> torch.Tensor:
402
- key = repeat_kv(key, self.num_key_value_groups)
403
- value = repeat_kv(value, self.num_key_value_groups)
404
-
405
  causal_mask = attention_mask
406
  if attention_mask is not None:
407
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
408
 
409
  # TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
410
  # NOTE: So we only use flex_attention in inference mode.
 
411
  def causal_mod(score, batch, head, q_idx, kv_idx):
412
  score = score + causal_mask[batch][0][q_idx][kv_idx]
413
  return score
414
-
415
  def dynamic_mod(score, batch, head, q_idx, kv_idx):
416
  score = score + causal_mask[batch][head][q_idx][kv_idx]
417
  return score
418
-
419
  mask_mod = causal_mod if self.is_causal else dynamic_mod
420
-
421
  attn_output = flex_attention(
422
  query,
423
  key,
424
  value,
425
  score_mod=mask_mod,
426
  scale=scaling,
 
427
  )
428
  attn_output = attn_output.transpose(1, 2).contiguous()
429
  return attn_output
430
 
431
 
432
  class DogeMLP(nn.Module):
 
433
  def __init__(self, config: DogeConfig):
434
  super().__init__()
435
  self.hidden_dim = config.hidden_size
@@ -468,7 +483,7 @@ class DogeCDMoE(DogeMLP):
468
  self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.num_keys, 2, self.expert_retrieval_dim // 2))
469
 
470
  # experts
471
- self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
472
  self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
473
 
474
  def forward(
@@ -485,10 +500,14 @@ class DogeCDMoE(DogeMLP):
485
 
486
  # get experts with the highest similarity
487
  (scores_x, scores_y), (indices_x, indices_y) = sim.topk(self.num_cdmoe_experts_per_head, dim=-1)
488
- all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
489
- all_scores = all_scores.view(*scores_x.shape[:-1], -1)
490
- all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
491
- all_indices = all_indices.view(*indices_x.shape[:-1], -1)
 
 
 
 
492
  scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
493
  indices = all_indices.gather(-1, pk_indices)
494
  down_embed = self.down_embed(indices)
@@ -513,7 +532,7 @@ class DogeDecoderLayer(nn.Module):
513
  self.pre_residual = Residual(config.hidden_size)
514
 
515
  self.post_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
516
- self.feed_forward = DogeMLP(config) if not config.is_moe else DogeCDMoE(config)
517
  self.post_residual = Residual(config.hidden_size)
518
 
519
  def forward(
@@ -528,6 +547,7 @@ class DogeDecoderLayer(nn.Module):
528
  position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
529
  **kwargs,
530
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
 
531
  # sequence transformation
532
  residual = hidden_states
533
  hidden_states = self.pre_layernorm(hidden_states)
@@ -573,8 +593,6 @@ DOGE_START_DOCSTRING = r"""
573
  load the weights associated with the model, only the configuration. Check out the
574
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
575
  """
576
-
577
-
578
  @add_start_docstrings(
579
  "The bare Doge Model outputting raw hidden-states without any specific head on top.",
580
  DOGE_START_DOCSTRING,
@@ -854,7 +872,7 @@ class DogeModel(DogePreTrainedModel):
854
  )
855
 
856
  return causal_mask
857
-
858
  @staticmethod
859
  def _prepare_4d_causal_attention_mask_with_cache_position(
860
  attention_mask: torch.Tensor = None,
@@ -895,9 +913,7 @@ class DogeModel(DogePreTrainedModel):
895
  min_dtype = torch.finfo(dtype).min
896
  causal_mask = torch.full(
897
  (sequence_length, target_length),
898
- fill_value=min_dtype,
899
- dtype=dtype,
900
- device=device,
901
  )
902
  if sequence_length != 1:
903
  causal_mask = torch.triu(causal_mask, diagonal=1)
@@ -943,14 +959,13 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
943
 
944
  def set_output_embeddings(self, new_embeddings):
945
  self.lm_head = new_embeddings
946
-
947
  def get_decoder(self):
948
  return self.model
949
 
950
  def set_decoder(self, decoder):
951
  self.model = decoder
952
 
953
- @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
954
  @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
955
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
956
  def forward(
@@ -966,7 +981,7 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
966
  output_hidden_states: Optional[bool] = None,
967
  return_dict: Optional[bool] = None,
968
  cache_position: Optional[torch.LongTensor] = None,
969
- logits_to_keep: int = 0,
970
  **kwargs: Unpack[KwargsForCausalLM],
971
  ) -> Union[Tuple, CausalLMOutputWithPast]:
972
  r"""
@@ -976,12 +991,10 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
976
  config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
977
  (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
978
 
979
- logits_to_keep (`int`, *optional*):
980
- If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
981
  `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
982
  token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
983
- If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
984
- This is useful when using packed tensor format (single dimension for batch and sequence length).
985
 
986
  Returns:
987
 
@@ -990,8 +1003,8 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
990
  ```python
991
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
992
 
993
- >>> model = AutoModelForCausalLM.from_pretrained("SmallDoge/Doge-20M")
994
- >>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M")
995
 
996
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
997
  >>> inputs = tokenizer(prompt, return_tensors="pt")
@@ -1023,9 +1036,9 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1023
  )
1024
 
1025
  hidden_states = outputs[0]
 
1026
  # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1027
- slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1028
- logits = self.lm_head(hidden_states[:, slice_indices, :])
1029
 
1030
  loss = None
1031
  if labels is not None:
@@ -1044,32 +1057,111 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1044
  )
1045
 
1046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047
  @add_start_docstrings(
1048
  """
1049
  The Doge Model transformer with a sequence classification head on top (linear layer).
1050
 
1051
- [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1052
- (e.g. GPT-2) do.
1053
 
1054
- Since it does classification on the last token, it requires to know the position of the last token. If a
1055
- `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1056
- no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1057
- padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1058
- each row of the batch).
1059
- """,
1060
- DOGE_START_DOCSTRING,
1061
  )
1062
  class DogeForSequenceClassification(DogePreTrainedModel):
1063
  def __init__(self, config: DogeConfig):
1064
  super().__init__(config)
 
1065
  self.num_labels = config.num_labels
1066
 
1067
  self.model = DogeModel(config)
1068
- self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1069
- self.config = config
1070
 
1071
  # Initialize weights and apply final processing
1072
- self.post_init()
1073
 
1074
  def get_input_embeddings(self):
1075
  return self.model.word_embed
@@ -1093,14 +1185,14 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1093
  ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1094
  r"""
1095
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1096
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1097
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1098
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1099
  """
1100
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1101
 
1102
- transformer_outputs = self.model(
1103
- input_ids,
1104
  attention_mask=attention_mask,
1105
  position_ids=position_ids,
1106
  past_key_values=past_key_values,
@@ -1110,8 +1202,8 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1110
  output_hidden_states=output_hidden_states,
1111
  return_dict=return_dict,
1112
  )
1113
- hidden_states = transformer_outputs[0]
1114
- logits = self.score(hidden_states)
1115
 
1116
  if input_ids is not None:
1117
  batch_size = input_ids.shape[0]
@@ -1135,19 +1227,21 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1135
 
1136
  loss = None
1137
  if labels is not None:
1138
- loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
 
 
 
 
 
1139
 
1140
  if not return_dict:
1141
- output = (pooled_logits,) + transformer_outputs[1:]
1142
  return ((loss,) + output) if loss is not None else output
1143
 
1144
  return SequenceClassifierOutputWithPast(
1145
  loss=loss,
1146
  logits=pooled_logits,
1147
- past_key_values=transformer_outputs.past_key_values,
1148
- hidden_states=transformer_outputs.hidden_states,
1149
- attentions=transformer_outputs.attentions,
1150
  )
1151
-
1152
-
1153
- __all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]
 
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
3
  #
4
  # This code is based on the Wonderful Matrices paper implementation.
5
+ #
6
+ # https://arxiv.org/abs/2412.11834
7
  #
8
  # Licensed under the Apache License, Version 2.0 (the "License");
9
  # you may not use this file except in compliance with the License.
 
16
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
  # See the License for the specific language governing permissions and
18
  # limitations under the License.
19
+ """PyTorch Doge model."""
20
 
21
  import math
22
  from typing import Callable, List, Optional, Tuple, Union
23
 
24
  import torch
25
  import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
  from torch import nn
28
+
29
  from transformers.activations import ACT2FN
30
  from transformers.cache_utils import Cache, DynamicCache, StaticCache
31
  from transformers.generation import GenerationMixin
 
41
  LossKwargs,
42
  add_start_docstrings,
43
  add_start_docstrings_to_model_forward,
44
+ is_torch_greater_or_equal,
45
  logging,
46
  replace_return_docstrings,
47
  )
 
 
48
  from .configuration_doge import DogeConfig
49
 
50
+ try:
51
+ from einx import add as einx_add
52
+ except ImportError:
53
+ einx_add = None
54
 
55
+ if is_torch_greater_or_equal("2.5"):
56
  from torch.nn.attention.flex_attention import flex_attention
57
 
58
 
 
94
 
95
 
96
  class RotaryEmbedding(nn.Module):
97
+ def __init__(self, config: Optional[DogeConfig] = None):
98
  super().__init__()
99
+ self.rope_kwargs = {}
100
+
101
+ if config.rope_scaling is not None:
102
  self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
103
  else:
104
  self.rope_type = "default"
105
  self.max_seq_len_cached = config.max_position_embeddings
106
  self.original_max_seq_len = config.max_position_embeddings
107
+ self.base = config.rope_theta
108
 
109
  self.config = config
110
  self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
111
 
112
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, **self.rope_kwargs)
113
  self.register_buffer("inv_freq", inv_freq, persistent=False)
114
  self.original_inv_freq = self.inv_freq
115
 
 
121
  """
122
  seq_len = torch.max(position_ids) + 1
123
  if seq_len > self.max_seq_len_cached: # growth
124
+ inv_freq, self.attention_scaling = self.rope_init_fn(
125
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
126
+ )
127
  self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
128
  self.max_seq_len_cached = seq_len
129
 
130
  if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
 
 
 
131
  self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
132
  self.max_seq_len_cached = self.original_max_seq_len
133
 
 
136
  if "dynamic" in self.rope_type:
137
  self._dynamic_frequency_update(position_ids, device=x.device)
138
 
139
+ # core RoPE block
140
  inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
141
  position_ids_expanded = position_ids[:, None, :].float()
142
  # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
 
164
  return torch.cat((-x2, x1), dim=-1)
165
 
166
 
167
+ def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
168
  """Applies Rotary Position Embedding to the query and key tensors.
169
 
170
  Args:
 
176
  Deprecated and unused.
177
  unsqueeze_dim (`int`, *optional*, defaults to 1):
178
  The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
179
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
180
+ For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
181
  Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k.
182
  Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
183
  Returns:
 
192
 
193
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
194
  """
195
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
196
  The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
197
  """
198
  batch, num_key_value_heads, slen, head_dim = hidden_states.shape
 
211
  self.layer_idx = layer_idx
212
  self.head_dim = config.hidden_size // config.num_attention_heads
213
  self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
214
+ self.scaling = self.head_dim ** -0.5
215
  self.attention_dropout = config.attention_dropout
216
  self.dynamic_mask_ratio = config.dynamic_mask_ratio
217
+ self.is_causal = config.is_causal
218
 
219
  self.ALL_ATTENTION_FUNCTIONS = {
220
  "eager": self.eager_attention_forward,
 
224
 
225
  # Q K V O projections
226
  self.q_proj = nn.Linear(
227
+ config.hidden_size,
228
+ config.num_attention_heads * self.head_dim,
229
+ bias=config.hidden_bias
230
  )
231
  self.k_proj = nn.Linear(
232
+ config.hidden_size,
233
+ config.num_key_value_heads * self.head_dim,
234
+ bias=config.hidden_bias
235
  )
236
  self.v_proj = nn.Linear(
237
+ config.hidden_size,
238
+ config.num_key_value_heads * self.head_dim,
239
+ bias=config.hidden_bias
240
  )
241
  # dynamic mask for the QK^T attention score matrix
242
+ self.A = nn.Parameter(
243
+ torch.zeros(config.num_attention_heads)
244
+ )
245
  self.dt_proj = nn.Linear(
246
+ config.num_key_value_heads * self.head_dim,
247
+ config.num_attention_heads,
248
+ bias=config.hidden_bias
249
  )
250
  self.o_proj = nn.Linear(
251
+ config.num_attention_heads * self.head_dim,
252
+ config.hidden_size,
253
+ bias=config.hidden_bias
254
  )
255
 
256
  def forward(
 
270
  value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
271
 
272
  cos, sin = position_embeddings
273
+ query_states, key_states = apply_QK_rotary_pos_emb(query_states, key_states, cos, sin)
274
 
275
  if past_key_value is not None:
276
  # sin and cos are specific to RoPE models; cache_position needed for the static cache
277
  cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
278
  key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
279
 
280
+
281
+ dynamic_mask = None
282
+ if self.is_causal is False:
283
+ # calculate dynamic mask from value_states
284
+ # NOTE: If these weights are not trained in causal mode, a mask of all ones will be returned, which will not affect the training results of causal mode
285
+ # TODO: The main reason for setting causal mode is that the Flex Attention kernel does not yet support score_mod functions with learnable parameters. However, we can continue training from the causal checkpoint later.
286
+ dt_states = self.dt_proj(value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1))
287
+ dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
288
  attn_mask = self.prepare_dynamic_mask(
289
  hidden_states=hidden_states,
290
  dynamic_mask=dynamic_mask,
 
295
  attention_interface: Callable = self.eager_attention_forward
296
  if self.config._attn_implementation != "eager":
297
  attention_interface = self.ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
298
+
299
  attn_output = attention_interface(
300
  query_states,
301
  key_states,
 
341
  attn_mask = attention_mask
342
 
343
  return attn_mask
344
+
345
  def eager_attention_forward(
346
  self,
347
  query: torch.Tensor,
 
360
  if attention_mask is not None:
361
  causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
362
  attn_weights = attn_weights + causal_mask
363
+
364
  # upcast attention scores to fp32
365
  attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
366
  attn_weights = F.dropout(attn_weights, p=dropout, training=self.training)
 
369
  attn_output = torch.matmul(attn_weights, value_states)
370
  attn_output = attn_output.transpose(1, 2).contiguous()
371
  return attn_output
372
+
373
  def sdpa_attention_forward(
374
  self,
375
  query: torch.Tensor,
 
380
  dropout: float = 0.0,
381
  **kwargs,
382
  ) -> torch.Tensor:
 
 
 
383
  causal_mask = attention_mask
384
  if attention_mask is not None:
385
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
 
399
  attn_mask=causal_mask,
400
  dropout_p=dropout,
401
  scale=scaling,
402
+ enable_gqa=True,
403
  )
404
  attn_output = attn_output.transpose(1, 2).contiguous()
405
  return attn_output
406
+
407
  def flex_attention_forward(
408
  self,
409
  query: torch.Tensor,
 
414
  dropout: float = 0.0,
415
  **kwargs,
416
  ) -> torch.Tensor:
 
 
 
417
  causal_mask = attention_mask
418
  if attention_mask is not None:
419
  causal_mask = causal_mask[:, :, :, : key.shape[-2]]
420
 
421
  # TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
422
  # NOTE: So we only use flex_attention in inference mode.
423
+
424
  def causal_mod(score, batch, head, q_idx, kv_idx):
425
  score = score + causal_mask[batch][0][q_idx][kv_idx]
426
  return score
427
+
428
  def dynamic_mod(score, batch, head, q_idx, kv_idx):
429
  score = score + causal_mask[batch][head][q_idx][kv_idx]
430
  return score
431
+
432
  mask_mod = causal_mod if self.is_causal else dynamic_mod
433
+
434
  attn_output = flex_attention(
435
  query,
436
  key,
437
  value,
438
  score_mod=mask_mod,
439
  scale=scaling,
440
+ enable_gqa=True,
441
  )
442
  attn_output = attn_output.transpose(1, 2).contiguous()
443
  return attn_output
444
 
445
 
446
  class DogeMLP(nn.Module):
447
+
448
  def __init__(self, config: DogeConfig):
449
  super().__init__()
450
  self.hidden_dim = config.hidden_size
 
483
  self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.num_keys, 2, self.expert_retrieval_dim // 2))
484
 
485
  # experts
486
+ self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
487
  self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
488
 
489
  def forward(
 
500
 
501
  # get experts with the highest similarity
502
  (scores_x, scores_y), (indices_x, indices_y) = sim.topk(self.num_cdmoe_experts_per_head, dim=-1)
503
+ if einx_add is not None:
504
+ all_scores = einx_add("... i, ... j -> ... (i j)", scores_x, scores_y)
505
+ all_indices = einx_add("... i, ... j -> ... (i j)", indices_x * self.num_keys, indices_y)
506
+ else:
507
+ all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
508
+ all_scores = all_scores.view(*scores_x.shape[:-1], -1)
509
+ all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
510
+ all_indices = all_indices.view(*indices_x.shape[:-1], -1)
511
  scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
512
  indices = all_indices.gather(-1, pk_indices)
513
  down_embed = self.down_embed(indices)
 
532
  self.pre_residual = Residual(config.hidden_size)
533
 
534
  self.post_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
535
+ self.feed_forward = DogeMLP(config) if config.is_moe == False else DogeCDMoE(config)
536
  self.post_residual = Residual(config.hidden_size)
537
 
538
  def forward(
 
547
  position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
548
  **kwargs,
549
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
550
+
551
  # sequence transformation
552
  residual = hidden_states
553
  hidden_states = self.pre_layernorm(hidden_states)
 
593
  load the weights associated with the model, only the configuration. Check out the
594
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
595
  """
 
 
596
  @add_start_docstrings(
597
  "The bare Doge Model outputting raw hidden-states without any specific head on top.",
598
  DOGE_START_DOCSTRING,
 
872
  )
873
 
874
  return causal_mask
875
+
876
  @staticmethod
877
  def _prepare_4d_causal_attention_mask_with_cache_position(
878
  attention_mask: torch.Tensor = None,
 
913
  min_dtype = torch.finfo(dtype).min
914
  causal_mask = torch.full(
915
  (sequence_length, target_length),
916
+ fill_value=min_dtype, dtype=dtype, device=device,
 
 
917
  )
918
  if sequence_length != 1:
919
  causal_mask = torch.triu(causal_mask, diagonal=1)
 
959
 
960
  def set_output_embeddings(self, new_embeddings):
961
  self.lm_head = new_embeddings
962
+
963
  def get_decoder(self):
964
  return self.model
965
 
966
  def set_decoder(self, decoder):
967
  self.model = decoder
968
 
 
969
  @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
970
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
971
  def forward(
 
981
  output_hidden_states: Optional[bool] = None,
982
  return_dict: Optional[bool] = None,
983
  cache_position: Optional[torch.LongTensor] = None,
984
+ num_logits_to_keep: int = 0,
985
  **kwargs: Unpack[KwargsForCausalLM],
986
  ) -> Union[Tuple, CausalLMOutputWithPast]:
987
  r"""
 
991
  config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
992
  (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
993
 
994
+ num_logits_to_keep (`int`, *optional*):
995
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
996
  `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
997
  token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
 
 
998
 
999
  Returns:
1000
 
 
1003
  ```python
1004
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
1005
 
1006
+ >>> model = AutoModelForCausalLM.from_pretrained("JingzeShi/Doge-20M-Instruct")
1007
+ >>> tokenizer = AutoTokenizer.from_pretrained("JingzeShi/Doge-20M-Instruct")
1008
 
1009
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
1010
  >>> inputs = tokenizer(prompt, return_tensors="pt")
 
1036
  )
1037
 
1038
  hidden_states = outputs[0]
1039
+
1040
  # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1041
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
 
1042
 
1043
  loss = None
1044
  if labels is not None:
 
1057
  )
1058
 
1059
 
1060
+ class DogePatchEmbedding(nn.Module):
1061
+ """
1062
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` of shape `(batch_size, seq_len, hidden_size)` to be consumed by a Transformer.
1063
+ """
1064
+
1065
+ def __init__(self, config: DogeConfig):
1066
+ super().__init__()
1067
+
1068
+ self.num_channels = config.num_channels
1069
+ self.patch_size = config.patch_size
1070
+ self.hidden_dim = config.hidden_size
1071
+
1072
+ self.sequence_proj = nn.Conv2d(self.num_channels, self.hidden_dim, kernel_size=self.patch_size, stride=self.patch_size)
1073
+ self.state_proj = nn.Linear(self.hidden_dim, self.hidden_dim, bias=config.hidden_bias)
1074
+
1075
+ def forward(
1076
+ self,
1077
+ pixel_values: torch.Tensor,
1078
+ ) -> torch.Tensor:
1079
+ image_embedding = self.sequence_proj(pixel_values).flatten(2).transpose(1, 2)
1080
+ image_embedding = self.state_proj(image_embedding)
1081
+ return image_embedding
1082
+
1083
+
1084
+ class DogeForCausalVLM(DogeForCausalLM):
1085
+ _tied_weights_keys = ["lm_head.weight"]
1086
+
1087
+ def __init__(self, config: DogeConfig):
1088
+ super().__init__(config)
1089
+ self.config = config
1090
+ self.pixel_embed = DogePatchEmbedding(config)
1091
+
1092
+ # Initialize weights and apply final processing
1093
+ self.post_init()
1094
+
1095
+ def forward(
1096
+ self,
1097
+ input_ids: torch.LongTensor = None,
1098
+ pixel_values: torch.FloatTensor = None,
1099
+ attention_mask: Optional[torch.Tensor] = None,
1100
+ position_ids: Optional[torch.LongTensor] = None,
1101
+ past_key_values: Optional[torch.Tensor] = None,
1102
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1103
+ labels: Optional[torch.LongTensor] = None,
1104
+ use_cache: Optional[bool] = None,
1105
+ output_attentions: Optional[bool] = None,
1106
+ output_hidden_states: Optional[bool] = None,
1107
+ return_dict: Optional[bool] = None,
1108
+ cache_position: Optional[torch.LongTensor] = None,
1109
+ num_logits_to_keep: int = 0,
1110
+ **loss_kwargs,
1111
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1112
+ # TODO: @wubingheng111: refer to Llava for implementating the forward method
1113
+ ...
1114
+
1115
+ def prepare_inputs_for_generation(
1116
+ self,
1117
+ input_ids=None,
1118
+ pixel_values=None,
1119
+ past_key_values=None,
1120
+ input_embeds=None,
1121
+ attention_mask=None,
1122
+ cache_position=None,
1123
+ num_logits_to_keep=None,
1124
+ **kwargs,
1125
+ ):
1126
+ model_inputs = self.model.prepare_inputs_for_generation(
1127
+ input_ids,
1128
+ past_key_values=past_key_values,
1129
+ inputs_embeds=input_embeds,
1130
+ attention_mask=attention_mask,
1131
+ cache_position=cache_position,
1132
+ num_logits_to_keep=num_logits_to_keep,
1133
+ **kwargs,
1134
+ )
1135
+
1136
+ if cache_position[0] == 0:
1137
+ model_inputs["pixel_values"] = pixel_values
1138
+
1139
+ return model_inputs
1140
+
1141
+
1142
  @add_start_docstrings(
1143
  """
1144
  The Doge Model transformer with a sequence classification head on top (linear layer).
1145
 
1146
+ [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.
 
1147
 
1148
+ Since it does classification on the last token, it requires to know the position of the last token.
1149
+ If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row.
1150
+ If no `pad_token_id` is defined, it simply takes the last value in each row of the batch.
1151
+ Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch).
1152
+ """
 
 
1153
  )
1154
  class DogeForSequenceClassification(DogePreTrainedModel):
1155
  def __init__(self, config: DogeConfig):
1156
  super().__init__(config)
1157
+ self.config = config
1158
  self.num_labels = config.num_labels
1159
 
1160
  self.model = DogeModel(config)
1161
+ self.classifier = nn.Linear(config.hidden_size, self.num_labels, bias=False)
 
1162
 
1163
  # Initialize weights and apply final processing
1164
+ self.init_weights()
1165
 
1166
  def get_input_embeddings(self):
1167
  return self.model.word_embed
 
1185
  ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1186
  r"""
1187
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1188
+ Labels for computing the sequence classification/regression loss.
1189
+ Indices should be in `[0, ..., config.num_labels - 1]`.
1190
+ If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1191
  """
1192
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1193
 
1194
+ outputs = self.model(
1195
+ input_ids=input_ids,
1196
  attention_mask=attention_mask,
1197
  position_ids=position_ids,
1198
  past_key_values=past_key_values,
 
1202
  output_hidden_states=output_hidden_states,
1203
  return_dict=return_dict,
1204
  )
1205
+ hidden_states = outputs[0]
1206
+ logits = self.classifier(hidden_states)
1207
 
1208
  if input_ids is not None:
1209
  batch_size = input_ids.shape[0]
 
1227
 
1228
  loss = None
1229
  if labels is not None:
1230
+ loss = self.loss_function(
1231
+ logits=logits,
1232
+ labels=labels,
1233
+ pooled_logits=pooled_logits,
1234
+ config=self.config,
1235
+ )
1236
 
1237
  if not return_dict:
1238
+ output = (pooled_logits,) + outputs[1:]
1239
  return ((loss,) + output) if loss is not None else output
1240
 
1241
  return SequenceClassifierOutputWithPast(
1242
  loss=loss,
1243
  logits=pooled_logits,
1244
+ past_key_values=outputs.past_key_values,
1245
+ hidden_states=outputs.hidden_states,
1246
+ attentions=outputs.attentions,
1247
  )