Update README.md
Browse files
README.md
CHANGED
@@ -85,53 +85,63 @@ Please follow the [Build llama.cpp locally](https://github.com/ggerganov/llama.c
|
|
85 |
|
86 |
**5*80G gpu is needed(could optimize), 1.4T cpu memory is needed**
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
model.
|
97 |
-
|
98 |
-
model.layers
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
```
|
136 |
|
137 |
## Ethical Considerations and Limitations
|
|
|
85 |
|
86 |
**5*80G gpu is needed(could optimize), 1.4T cpu memory is needed**
|
87 |
|
88 |
+
pip3 install git+https://github.com/intel/auto-round.git
|
89 |
+
|
90 |
+
```python
|
91 |
+
import torch
|
92 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
93 |
+
|
94 |
+
model_name = DeepSeek-V3-hf
|
95 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
96 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype="auto")
|
97 |
+
|
98 |
+
block = model.model.layers
|
99 |
+
device_map = {}
|
100 |
+
for n, m in block.named_modules():
|
101 |
+
if isinstance(m, (torch.nn.Linear, transformers.modeling_utils.Conv1D)):
|
102 |
+
# if not check_to_quantized(m):
|
103 |
+
# unquantized_layers.append(n)
|
104 |
+
# continue
|
105 |
+
if "experts" in n and ("shared_experts" not in n) and int(n.split('.')[-2])<63 and "down_proj" not in n :
|
106 |
+
device ="cuda:1"
|
107 |
+
output_device = "cuda:1"
|
108 |
+
elif "experts" in n and ("shared_experts" not in n) and "down_proj" in n and int(n.split('.')[-2])<63:
|
109 |
+
device = "cuda:1"
|
110 |
+
output_device = "cuda:0"
|
111 |
+
elif "experts" in n and ("shared_experts" not in n) and int(n.split('.')[-2]) >= 63 and int(n.split('.')[-2]) < 128 and "down_proj" not in n:
|
112 |
+
device = "cuda:2"
|
113 |
+
output_device = "cuda:2"
|
114 |
+
elif "experts" in n and ("shared_experts" not in n) and "down_proj" in n and int(n.split('.')[-2]) >= 63 and int(n.split('.')[-2]) < 128:
|
115 |
+
device = "cuda:2"
|
116 |
+
output_device = "cuda:0"
|
117 |
+
elif "experts" in n and ("shared_experts" not in n) and int(n.split('.')[-2]) >= 128 and int(
|
118 |
+
n.split('.')[-2]) < 192 and "down_proj" not in n:
|
119 |
+
device = "cuda:3"
|
120 |
+
output_device = "cuda:3"
|
121 |
+
elif "experts" in n and ("shared_experts" not in n) and "down_proj" in n and int(
|
122 |
+
n.split('.')[-2]) >= 128 and int(n.split('.')[-2]) < 192:
|
123 |
+
device = "cuda:3"
|
124 |
+
output_device = "cuda:0"
|
125 |
+
elif "experts" in n and ("shared_experts" not in n) and "down_proj" not in n and int(
|
126 |
+
n.split('.')[-2]) >= 192:
|
127 |
+
device = "cuda:4"
|
128 |
+
output_device = "cuda:4"
|
129 |
+
elif "experts" in n and ("shared_experts" not in n) and "down_proj" in n and int(
|
130 |
+
n.split('.')[-2]) >= 192:
|
131 |
+
device = "cuda:4"
|
132 |
+
output_device = "cuda:0"
|
133 |
+
else:
|
134 |
+
device = "cuda:0"
|
135 |
+
output_device = "cuda:0"
|
136 |
+
n = n[2:]
|
137 |
+
device_map.update({n: device})
|
138 |
+
|
139 |
+
from auto_round import AutoRound
|
140 |
+
|
141 |
+
autoround = AutoRound(model=model, tokenizer=tokenizer, layer_config=layer_config, device_map=device_map,
|
142 |
+
iters=200,batch_size=8, seqlen=512)
|
143 |
+
autoround.quantize()
|
144 |
+
autoround.save_quantized(format="gguf:q4_0", output_dir="tmp_autoround"
|
145 |
```
|
146 |
|
147 |
## Ethical Considerations and Limitations
|