gabrielmbmb HF staff commited on
Commit
eb758a2
·
verified ·
1 Parent(s): d7e6db2

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +109 -0
README.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Upcycled-Qwen1.5-MoE2.7B
2
+
3
+ This an attemp (probably too naive) to reproduce the upcycling process used to initialize [Qwen1.5-MoE-A2.7B](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B) using [Qwen1.5-1.8B](Qwen/Qwen1.5-1.8B).
4
+
5
+ ## Upcycling script
6
+
7
+ ```python
8
+ from torch import nn
9
+ from transformers import AutoModelForCausalLM
10
+ from dataclasses import dataclass
11
+ from transformers import AutoModel
12
+ from typing_extensions import Self
13
+ from copy import deepcopy
14
+
15
+ @dataclass
16
+ class UpcyclingConfig:
17
+ finegrained_experts: int
18
+ partitions_from_mlp: int
19
+
20
+ @property
21
+ def upcycling_factor(self) -> int:
22
+ return self.finegrained_experts // self.partitions_from_mlp
23
+
24
+
25
+ def iterate_in_chunks(list1, list2, chunk_size1, chunk_size2):
26
+ iterations = max(len(list1) // chunk_size1, len(list2) // chunk_size2)
27
+ for i in range(iterations):
28
+ start_idx1 = i * chunk_size1
29
+ end_idx1 = start_idx1 + chunk_size1
30
+ start_idx2 = i * chunk_size2
31
+ end_idx2 = start_idx2 + chunk_size2
32
+ yield (list1[start_idx1:end_idx1], list2[start_idx2:end_idx2])
33
+
34
+
35
+ def chunk_linear(linear: nn.Linear, chunks: int, down_proj: bool = False) -> tuple[nn.Linear, ...]:
36
+ if not down_proj:
37
+ in_features = linear.in_features
38
+ out_features = linear.out_features // chunks
39
+ else:
40
+ in_features = linear.in_features // chunks
41
+ out_features = linear.out_features
42
+
43
+ weights = linear.weight.chunk(chunks)
44
+ biases = linear.bias.chunk(chunks) if linear.bias is not None else [None] * chunks
45
+ linear_layers = []
46
+ for weight, bias in zip(weights, biases):
47
+ new_linear = nn.Linear(
48
+ in_features=in_features, out_features=out_features, bias=bias is not None
49
+ )
50
+ new_linear.weight = nn.Parameter(weight.clone()) # Clone weights to ensure they are not shared
51
+ if bias is not None:
52
+ new_linear.bias = nn.Parameter(bias.clone()) # Clone bias if it exists
53
+ linear_layers.append(new_linear)
54
+ return tuple(linear_layers)
55
+
56
+
57
+ class UpcycledModelMixin:
58
+ sparse_moe_block_cls: type
59
+
60
+ @classmethod
61
+ def upcycled_from(cls, source_model, config: UpcyclingConfig) -> Self:
62
+ upcycled_model_config = cls.config_class(**source_model.config.to_dict())
63
+ if hasattr(upcycled_model_config, "shared_expert_intermediate_size"):
64
+ upcycled_model_config.shared_expert_intermediate_size = source_model.config.intermediate_size
65
+
66
+ upcycled_model = cls(upcycled_model_config)
67
+ upcycled_model.model.embed_tokens = source_model.model.embed_tokens
68
+
69
+ for upcycled_layer, layer in zip(upcycled_model.model.layers, source_model.model.layers):
70
+ upcycled_layer.self_attn = layer.self_attn
71
+ upcycled_mlp_layers = [deepcopy(layer.mlp) for _ in range(config.upcycling_factor)]
72
+
73
+ if hasattr(upcycled_layer.mlp, "shared_expert"):
74
+ upcycled_layer.mlp.shared_expert = upcycled_mlp_layers.pop(-1)
75
+
76
+ for experts, mlp in iterate_in_chunks(upcycled_layer.mlp.experts, upcycled_mlp_layers, 4, 1):
77
+ gate_projs = chunk_linear(mlp[0].gate_proj, 4, down_proj=False)
78
+ up_projs = chunk_linear(mlp[0].up_proj, 4, down_proj=False)
79
+ down_projs = chunk_linear(mlp[0].down_proj, 4, down_proj=True)
80
+ for i, expert in enumerate(experts):
81
+ expert.gate_proj = gate_projs[i]
82
+ expert.up_proj = up_projs[i]
83
+ expert.down_proj = down_projs[i]
84
+ expert.act_fn = deepcopy(mlp[0].act_fn)
85
+
86
+ upcycled_layer.input_layernorm = layer.input_layernorm
87
+ upcycled_layer.post_attention_layernorm = layer.post_attention_layernorm
88
+
89
+ upcycled_model.lm_head = source_model.lm_head
90
+ return upcycled_model
91
+
92
+
93
+ from transformers import Qwen2MoeForCausalLM as _Qwen2MoeForCausalLM
94
+ from transformers.models.qwen2.modeling_qwen2 import Qwen2MLP
95
+ from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock
96
+
97
+ class Qwen2MoeForCausalLM(UpcycledModelMixin, _Qwen2MoeForCausalLM):
98
+ sparse_moe_block_cls = Qwen2MoeSparseMoeBlock
99
+
100
+
101
+ source_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-1.8B")
102
+ model = Qwen2MoeForCausalLM.upcycled_from(
103
+ source_model,
104
+ UpcyclingConfig(
105
+ finegrained_experts=64,
106
+ partitions_from_mlp=4,
107
+ ),
108
+ )
109
+ ```