muverqqw commited on
Commit
58460cf
·
verified ·
1 Parent(s): 15539d8

Upload configuration_alinlight.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_alinlight.py +63 -0
configuration_alinlight.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2026 EngineerGL Research.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from transformers import PretrainedConfig
17
+
18
+ class AlinlightConfig(PretrainedConfig):
19
+ model_type = "alinlight"
20
+
21
+ def __init__(
22
+ self,
23
+ vocab_size=128000,
24
+ hidden_size=2048,
25
+ intermediate_size=5632,
26
+ num_hidden_layers=22,
27
+ num_attention_heads=32,
28
+ num_key_value_heads=8,
29
+ max_position_embeddings=4096,
30
+ sliding_window=4096,
31
+ attention_dropout=0.0,
32
+ rms_norm_eps=1e-5,
33
+ rope_theta=10000.0,
34
+ rope_scaling=None,
35
+ initializer_range=0.02,
36
+ use_cache=True,
37
+ pad_token_id=0,
38
+ bos_token_id=1,
39
+ eos_token_id=2,
40
+ tie_word_embeddings=True,
41
+ **kwargs,
42
+ ):
43
+ super().__init__(
44
+ pad_token_id=pad_token_id,
45
+ bos_token_id=bos_token_id,
46
+ eos_token_id=eos_token_id,
47
+ tie_word_embeddings=tie_word_embeddings,
48
+ **kwargs
49
+ )
50
+ self.vocab_size = vocab_size
51
+ self.hidden_size = hidden_size
52
+ self.intermediate_size = intermediate_size
53
+ self.num_hidden_layers = num_hidden_layers
54
+ self.num_attention_heads = num_attention_heads
55
+ self.num_key_value_heads = num_key_value_heads
56
+ self.max_position_embeddings = max_position_embeddings
57
+ self.sliding_window = sliding_window
58
+ self.attention_dropout = attention_dropout
59
+ self.rms_norm_eps = rms_norm_eps
60
+ self.rope_theta = rope_theta
61
+ self.rope_scaling = rope_scaling
62
+ self.initializer_range = initializer_range
63
+ self.use_cache = use_cache