Upload prune.py with huggingface_hub
Browse files
prune.py
ADDED
@@ -0,0 +1,906 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import heapq
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.optim as optim
|
6 |
+
import numpy as np
|
7 |
+
from .sparsegpt import SparseGPT
|
8 |
+
from .layerwrapper import WrappedGPT
|
9 |
+
from .data import get_loaders
|
10 |
+
from scipy.optimize import linear_sum_assignment
|
11 |
+
import torch.nn.functional as F
|
12 |
+
from .ablate import AblateGPT
|
13 |
+
|
14 |
+
from transformers.modeling_utils import PreTrainedModel
|
15 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
16 |
+
# from transformers.utils import (
|
17 |
+
# add_start_docstrings,
|
18 |
+
# add_start_docstrings_to_model_forward,
|
19 |
+
# is_flash_attn_2_available,
|
20 |
+
# is_flash_attn_greater_or_equal_2_10,
|
21 |
+
# logging,
|
22 |
+
# replace_return_docstrings,
|
23 |
+
# )
|
24 |
+
from transformers.models.llama.configuration_llama import LlamaConfig
|
25 |
+
|
26 |
+
|
27 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
28 |
+
"""
|
29 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
30 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
31 |
+
"""
|
32 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
33 |
+
if n_rep == 1:
|
34 |
+
return hidden_states
|
35 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
36 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
def compute_attention_output_with_pruned_weights(
|
41 |
+
inps, # Input tensor (seq_len, hidden_dim)
|
42 |
+
attention_mask, # Attention mask (batch_size, 1, seq_len, seq_len)
|
43 |
+
position_ids, # Position IDs (batch_size, seq_len)
|
44 |
+
pruned_weights, # Dictionary of pruned weight matrices
|
45 |
+
num_attention_heads=32, # Number of attention heads
|
46 |
+
hidden_dim=4096 # Hidden dimension size
|
47 |
+
):
|
48 |
+
q_proj = pruned_weights['self_attn.q_proj']
|
49 |
+
k_proj = pruned_weights['self_attn.k_proj']
|
50 |
+
v_proj = pruned_weights['self_attn.v_proj']
|
51 |
+
o_proj = pruned_weights['self_attn.o_proj']
|
52 |
+
batch_size, seq_len, _ = inps.shape
|
53 |
+
head_dim = hidden_dim // num_attention_heads
|
54 |
+
|
55 |
+
output = torch.zeros_like(inps, device=inps.device)
|
56 |
+
|
57 |
+
batch_size = 1
|
58 |
+
i = 0
|
59 |
+
# Step 1: Compute Q, K, V for slice `inps[i]`
|
60 |
+
Q = torch.matmul(inps[i], q_proj.T) # Shape: [seq_len, hidden_dim]
|
61 |
+
K = torch.matmul(inps[i], k_proj.T) # Shape: [seq_len, hidden_dim]
|
62 |
+
V = torch.matmul(inps[i], v_proj.T) # Shape: [seq_len, hidden_dim]
|
63 |
+
|
64 |
+
# Step 2: Reshape Q, K, V for multi-head attention
|
65 |
+
Q = Q.view(seq_len, num_attention_heads, head_dim).transpose(0, 1) # Shape: [num_heads, seq_len, head_dim]
|
66 |
+
K = K.view(seq_len, num_attention_heads, head_dim).transpose(0, 1) # Shape: [num_heads, seq_len, head_dim]
|
67 |
+
V = V.view(seq_len, num_attention_heads, head_dim).transpose(0, 1) # Shape: [num_heads, seq_len, head_dim]
|
68 |
+
|
69 |
+
# Step 3: Compute attention scores (scaled dot-product attention)
|
70 |
+
attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / head_dim**0.5 # Shape: [num_heads, seq_len, seq_len]
|
71 |
+
|
72 |
+
# Apply attention mask
|
73 |
+
attn_scores = attn_scores + attention_mask[0] # Broadcasting the mask to [num_heads, seq_len, seq_len]
|
74 |
+
|
75 |
+
# Step 4: Compute attention probabilities
|
76 |
+
attn_probs = F.softmax(attn_scores, dim=-1) # Shape: [num_heads, seq_len, seq_len]
|
77 |
+
|
78 |
+
# Step 5: Compute attention output
|
79 |
+
context = torch.matmul(attn_probs, V) # Shape: [num_heads, seq_len, head_dim]
|
80 |
+
|
81 |
+
# Step 6: Concatenate the outputs from all heads
|
82 |
+
context = context.transpose(0, 1).contiguous().view(seq_len, hidden_dim) # Shape: [seq_len, hidden_dim]
|
83 |
+
|
84 |
+
# Step 7: Apply the output projection
|
85 |
+
output = torch.matmul(context, o_proj.T) # Shape: [seq_len, hidden_dim]
|
86 |
+
pruned_weights.clear()
|
87 |
+
|
88 |
+
return output
|
89 |
+
|
90 |
+
def find_layers(module, layers=[nn.Linear], name=''):
|
91 |
+
"""
|
92 |
+
Recursively find the layers of a certain type in a module.
|
93 |
+
|
94 |
+
Args:
|
95 |
+
module (nn.Module): PyTorch module.
|
96 |
+
layers (list): List of layer types to find.
|
97 |
+
name (str): Name of the module.
|
98 |
+
|
99 |
+
Returns:
|
100 |
+
dict: Dictionary of layers of the given type(s) within the module.
|
101 |
+
"""
|
102 |
+
if type(module) in layers:
|
103 |
+
return {name: module}
|
104 |
+
res = {}
|
105 |
+
for name1, child in module.named_children():
|
106 |
+
res.update(find_layers(
|
107 |
+
child, layers=layers, name=name + '.' + name1 if name != '' else name1
|
108 |
+
))
|
109 |
+
return res
|
110 |
+
|
111 |
+
def check_sparsity(model):
|
112 |
+
use_cache = model.config.use_cache
|
113 |
+
model.config.use_cache = False
|
114 |
+
|
115 |
+
layers = model.model.layers
|
116 |
+
count = 0
|
117 |
+
total_params = 0
|
118 |
+
for i in range(len(layers)):
|
119 |
+
layer = layers[i]
|
120 |
+
subset = find_layers(layer)
|
121 |
+
|
122 |
+
sub_count = 0
|
123 |
+
sub_params = 0
|
124 |
+
for name in subset:
|
125 |
+
W = subset[name].weight.data
|
126 |
+
count += (W==0).sum().item()
|
127 |
+
total_params += W.numel()
|
128 |
+
|
129 |
+
sub_count += (W==0).sum().item()
|
130 |
+
sub_params += W.numel()
|
131 |
+
|
132 |
+
print(f"layer {i} sparsity {float(sub_count)/sub_params:.6f}")
|
133 |
+
|
134 |
+
model.config.use_cache = use_cache
|
135 |
+
return float(count)/total_params
|
136 |
+
|
137 |
+
def prepare_calibration_input(model, dataloader, device):
|
138 |
+
use_cache = model.config.use_cache
|
139 |
+
model.config.use_cache = False
|
140 |
+
layers = model.model.layers
|
141 |
+
|
142 |
+
# dev = model.hf_device_map["model.embed_tokens"]
|
143 |
+
if "model.embed_tokens" in model.hf_device_map:
|
144 |
+
device = model.hf_device_map["model.embed_tokens"]
|
145 |
+
|
146 |
+
dtype = next(iter(model.parameters())).dtype
|
147 |
+
inps = torch.zeros((128, model.seqlen, model.config.hidden_size), dtype=dtype, device=device)
|
148 |
+
inps.requires_grad = False
|
149 |
+
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
|
150 |
+
|
151 |
+
class Catcher(nn.Module):
|
152 |
+
def __init__(self, module):
|
153 |
+
super().__init__()
|
154 |
+
self.module = module
|
155 |
+
def forward(self, inp, **kwargs):
|
156 |
+
inps[cache['i']] = inp
|
157 |
+
cache['i'] += 1
|
158 |
+
cache['attention_mask'] = kwargs['attention_mask']
|
159 |
+
cache['position_ids'] = kwargs['position_ids']
|
160 |
+
raise ValueError
|
161 |
+
layers[0] = Catcher(layers[0])
|
162 |
+
for batch in dataloader:
|
163 |
+
try:
|
164 |
+
model(batch[0].to(device))
|
165 |
+
except ValueError:
|
166 |
+
pass
|
167 |
+
layers[0] = layers[0].module
|
168 |
+
|
169 |
+
outs = torch.zeros_like(inps)
|
170 |
+
attention_mask = cache['attention_mask']
|
171 |
+
position_ids = cache['position_ids']
|
172 |
+
model.config.use_cache = use_cache
|
173 |
+
|
174 |
+
return inps, outs, attention_mask, position_ids
|
175 |
+
|
176 |
+
def return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before):
|
177 |
+
thres_cumsum = sum_before * alpha
|
178 |
+
sort_mask = tmp_metric <= thres_cumsum.reshape((-1,1))
|
179 |
+
thres = torch.gather(sort_res[0], dim=1, index=sort_mask.sum(dim=1, keepdims=True)-1)
|
180 |
+
W_mask = (W_metric <= thres)
|
181 |
+
cur_sparsity = (W_mask==True).sum() / W_mask.numel()
|
182 |
+
return W_mask, cur_sparsity
|
183 |
+
|
184 |
+
def prune_magnitude(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0):
|
185 |
+
layers = model.model.layers
|
186 |
+
|
187 |
+
for i in range(len(layers)):
|
188 |
+
layer = layers[i]
|
189 |
+
subset = find_layers(layer)
|
190 |
+
|
191 |
+
for name in subset:
|
192 |
+
W = subset[name].weight.data
|
193 |
+
W_metric = torch.abs(W)
|
194 |
+
if prune_n != 0:
|
195 |
+
W_mask = (torch.zeros_like(W)==1)
|
196 |
+
for ii in range(W_metric.shape[1]):
|
197 |
+
if ii % prune_m == 0:
|
198 |
+
tmp = W_metric[:,ii:(ii+prune_m)].float()
|
199 |
+
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
|
200 |
+
else:
|
201 |
+
thresh = torch.sort(W_metric.flatten().cuda())[0][int(W.numel()*args.sparsity_ratio)].cpu()
|
202 |
+
W_mask = (W_metric<=thresh)
|
203 |
+
|
204 |
+
W[W_mask] = 0
|
205 |
+
|
206 |
+
def construct_permutation_matrix(index, device):
|
207 |
+
|
208 |
+
num_cols = index.size(0)
|
209 |
+
P_hard = torch.zeros((num_cols, num_cols), device=device)
|
210 |
+
P_hard[torch.arange(num_cols), index] = 1.0
|
211 |
+
return P_hard
|
212 |
+
|
213 |
+
def gumbel_sinkhorn(log_alpha, n_iters, tau, noise_factor=1.0, epsilon=1e-6, debug=True):
|
214 |
+
"""
|
215 |
+
Applies the Gumbel-Sinkhorn algorithm to generate a soft permutation matrix.
|
216 |
+
|
217 |
+
Args:
|
218 |
+
log_alpha (torch.Tensor): Logits matrix of shape [n, n].
|
219 |
+
n_iters (int): Number of Sinkhorn iterations.
|
220 |
+
tau (float): Temperature parameter.
|
221 |
+
noise_factor (float): Scaling factor for Gumbel noise.
|
222 |
+
epsilon (float): Small constant for numerical stability.
|
223 |
+
debug (bool): If True, prints debug information.
|
224 |
+
|
225 |
+
Returns:
|
226 |
+
torch.Tensor: Soft permutation matrix of shape [n, n].
|
227 |
+
"""
|
228 |
+
|
229 |
+
|
230 |
+
# Ensure computations are in float32 for numerical stability
|
231 |
+
log_alpha = log_alpha.float()
|
232 |
+
|
233 |
+
# Step 1: Sample Gumbel noise
|
234 |
+
gumbel_noise = -torch.log(-torch.log(torch.rand_like(log_alpha) + epsilon) + epsilon)
|
235 |
+
if debug:
|
236 |
+
#print("Gumbel Noise Sampled:")
|
237 |
+
#print_tensor_stats(gumbel_noise, "gumbel_noise")
|
238 |
+
check_tensor(gumbel_noise, "gumbel_noise", debug)
|
239 |
+
|
240 |
+
# Step 2: Add noise to the logits
|
241 |
+
M = (log_alpha + noise_factor * gumbel_noise)
|
242 |
+
if debug:
|
243 |
+
#print("Logits After Adding Gumbel Noise (M):")
|
244 |
+
#print_tensor_stats(M, "M")
|
245 |
+
check_tensor(M, "M", debug)
|
246 |
+
|
247 |
+
# Step 3: Scale by temperature
|
248 |
+
M_scaled = M / tau
|
249 |
+
if debug:
|
250 |
+
#print(f"Logits Scaled by Tau (M_scaled): tau={tau}")
|
251 |
+
#print_tensor_stats(M_scaled, "M_scaled")
|
252 |
+
check_tensor(M_scaled, "M_scaled", debug)
|
253 |
+
|
254 |
+
# Step 4: Subtract max per row for numerical stability
|
255 |
+
row_max, _ = M_scaled.max(dim=1, keepdim=True)
|
256 |
+
M_scaled = M_scaled - row_max
|
257 |
+
if debug:
|
258 |
+
#print("Logits After Subtracting Row Max (M_scaled):")
|
259 |
+
#print_tensor_stats(M_scaled, "M_scaled - max")
|
260 |
+
check_tensor(M_scaled, "M_scaled - max", debug)
|
261 |
+
|
262 |
+
# Step 5: Exponentiate to get S
|
263 |
+
S = torch.exp(M_scaled)
|
264 |
+
if debug:
|
265 |
+
#print("Exponentiated Scaled Logits (S):")
|
266 |
+
#print_tensor_stats(S, "S")
|
267 |
+
check_tensor(S, "S", debug)
|
268 |
+
|
269 |
+
# Step 6: Sinkhorn iterations
|
270 |
+
for iteration in range(n_iters):
|
271 |
+
# Row normalization
|
272 |
+
row_sum = S.sum(dim=1, keepdim=True)
|
273 |
+
S = S / (row_sum + epsilon)
|
274 |
+
if debug:
|
275 |
+
#print(f"After Row Normalization Iteration {iteration+1}:")
|
276 |
+
#print_tensor_stats(S, f"S_row_norm_iter_{iteration+1}")
|
277 |
+
check_tensor(S, f"S_row_norm_iter_{iteration+1}", debug)
|
278 |
+
|
279 |
+
# Column normalization
|
280 |
+
col_sum = S.sum(dim=0, keepdim=True)
|
281 |
+
S = S / (col_sum + epsilon * 100) # Increased epsilon for column normalization
|
282 |
+
if debug:
|
283 |
+
# print(f"After Column Normalization Iteration {iteration+1}:")
|
284 |
+
# print_tensor_stats(S, f"S_col_norm_iter_{iteration+1}")
|
285 |
+
check_tensor(S, f"S_col_norm_iter_{iteration+1}", debug)
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
return S
|
290 |
+
|
291 |
+
def print_tensor_stats(tensor, tensor_name="Tensor"):
|
292 |
+
"""
|
293 |
+
Prints statistics of a tensor.
|
294 |
+
|
295 |
+
Args:
|
296 |
+
tensor (torch.Tensor): The tensor to inspect.
|
297 |
+
tensor_name (str): Name of the tensor for identification.
|
298 |
+
"""
|
299 |
+
print(f"{tensor_name} Stats:")
|
300 |
+
# print(f" Shape: {tensor.shape}")
|
301 |
+
# print(f" Dtype: {tensor.dtype}")
|
302 |
+
# print(f" Min: {tensor.min().item()}")
|
303 |
+
# print(f" Max: {tensor.max().item()}")
|
304 |
+
# print(f" Mean: {tensor.mean().item()}")
|
305 |
+
# print(f" Std: {tensor.std().item()}")
|
306 |
+
# print(f" NaN Count: {torch.isnan(tensor).sum().item()}")
|
307 |
+
# print(f" Inf Count: {torch.isinf(tensor).sum().item()}")
|
308 |
+
|
309 |
+
def check_tensor(tensor, tensor_name="Tensor", debug=False):
|
310 |
+
"""
|
311 |
+
Checks for NaNs and Infs in a tensor and optionally prints a warning.
|
312 |
+
|
313 |
+
Args:
|
314 |
+
tensor (torch.Tensor): The tensor to check.
|
315 |
+
tensor_name (str): Name of the tensor for identification.
|
316 |
+
debug (bool): If True, prints warnings.
|
317 |
+
"""
|
318 |
+
nan_count = torch.isnan(tensor).sum().item()
|
319 |
+
inf_count = torch.isinf(tensor).sum().item()
|
320 |
+
if nan_count > 0 or inf_count > 0:
|
321 |
+
if debug:
|
322 |
+
# print(f"WARNING: {tensor_name} contains NaNs or Infs!")
|
323 |
+
# print(f" NaN Count: {nan_count}")
|
324 |
+
# print(f" Inf Count: {inf_count}")
|
325 |
+
raise ValueError(f"{tensor_name} has NaNs or Infs!")
|
326 |
+
|
327 |
+
def prune_wanda(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0):
|
328 |
+
use_cache = model.config.use_cache
|
329 |
+
model.config.use_cache = False
|
330 |
+
module_name = args.module_name
|
331 |
+
permutate_mode = args.permutate_mode
|
332 |
+
layer_id = args.layer_id
|
333 |
+
|
334 |
+
print("loading calibdation data")
|
335 |
+
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
|
336 |
+
print("dataset loading complete")
|
337 |
+
with torch.no_grad():
|
338 |
+
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, device)
|
339 |
+
|
340 |
+
|
341 |
+
|
342 |
+
layers = model.model.layers
|
343 |
+
# -----------------------------------------------------------
|
344 |
+
# Permutation parameters
|
345 |
+
num_epochs = 100 # Set the number of training epochs
|
346 |
+
learning_rate = 0.5 # Learning rate for optimizer
|
347 |
+
tau = 3 # Temperature parameter for Gumbel-Sinkhorn
|
348 |
+
sinkhorn_iterations = 30 # Number of Sinkhorn iterations
|
349 |
+
epsilon = 1e-8 # Small epsilon to prevent numerical issues
|
350 |
+
|
351 |
+
if permutate_mode == 'eval':num_epochs = 1
|
352 |
+
for i in range(len(layers)):
|
353 |
+
layer = layers[i]
|
354 |
+
subset = find_layers(layer)
|
355 |
+
print(subset)
|
356 |
+
|
357 |
+
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
|
358 |
+
dev = model.hf_device_map[f"model.layers.{i}"]
|
359 |
+
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
|
360 |
+
|
361 |
+
|
362 |
+
wrapped_layers = {}
|
363 |
+
for name in subset:
|
364 |
+
if permutate_mode in ('lora', 'full'):
|
365 |
+
if module_name in name:
|
366 |
+
wrapped_layers[name] = WrappedGPT(subset[name])
|
367 |
+
if permutate_mode == 'eval':
|
368 |
+
wrapped_layers[name] = WrappedGPT(subset[name])
|
369 |
+
|
370 |
+
def add_batch(name):
|
371 |
+
def tmp(_, inp, out):
|
372 |
+
wrapped_layers[name].add_batch(inp[0].data, out.data)
|
373 |
+
return tmp
|
374 |
+
|
375 |
+
handles = []
|
376 |
+
for name in wrapped_layers:
|
377 |
+
handles.append(subset[name].register_forward_hook(add_batch(name)))
|
378 |
+
|
379 |
+
|
380 |
+
# Example of correct computation
|
381 |
+
outputs_before_pruning = []
|
382 |
+
for j in range(args.nsamples):
|
383 |
+
output = layer(
|
384 |
+
inps[j].unsqueeze(0),
|
385 |
+
attention_mask=attention_mask,
|
386 |
+
position_ids=position_ids,
|
387 |
+
)[0]
|
388 |
+
outputs_before_pruning.append(output)
|
389 |
+
|
390 |
+
outputs_before_pruning = torch.stack(outputs_before_pruning)
|
391 |
+
|
392 |
+
for j in range(args.nsamples):
|
393 |
+
with torch.no_grad():
|
394 |
+
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
|
395 |
+
#outputs_before_pruning[j] = layer(inps[j].unsqueeze(0),attention_mask=attention_mask,position_ids=position_ids,)[0]
|
396 |
+
for h in handles:
|
397 |
+
h.remove()
|
398 |
+
|
399 |
+
|
400 |
+
# Store the original weights to restore later
|
401 |
+
original_weights = {}
|
402 |
+
for name in subset:
|
403 |
+
if permutate_mode in ('lora', 'full'):
|
404 |
+
if module_name in name:
|
405 |
+
original_weights[name] = subset[name].weight.data.clone()
|
406 |
+
if permutate_mode == 'eval':
|
407 |
+
original_weights[name] = subset[name].weight.data.clone()
|
408 |
+
|
409 |
+
# Initialize learnable logits M for each module in the subset
|
410 |
+
if i == layer_id or permutate_mode == 'eval':
|
411 |
+
rank = 1
|
412 |
+
M_dict = {}
|
413 |
+
U_dict = {}
|
414 |
+
V_dict = {}
|
415 |
+
original_weight = {}
|
416 |
+
for name in subset:
|
417 |
+
if permutate_mode in ('lora', 'full'):
|
418 |
+
if module_name in name:
|
419 |
+
print(name)
|
420 |
+
W = subset[name].weight.data # Shape: [output_dim, input_dim]
|
421 |
+
original_weight[name] = W
|
422 |
+
num_cols = W.shape[1]
|
423 |
+
if permutate_mode == 'full':
|
424 |
+
M_dict[name] = nn.Parameter(torch.zeros(num_cols, num_cols, device=device))
|
425 |
+
if permutate_mode == 'lora':
|
426 |
+
U_dict[name] = nn.Parameter(torch.randn(num_cols, rank, device=device) * 0.0001)
|
427 |
+
V_dict[name] = nn.Parameter(torch.randn(rank, num_cols, device=device) * 0.0001)
|
428 |
+
if permutate_mode == 'eval':
|
429 |
+
print(name)
|
430 |
+
W = subset[name].weight.data # Shape: [output_dim, input_dim]
|
431 |
+
original_weight[name] = W
|
432 |
+
num_cols = W.shape[1]
|
433 |
+
if permutate_mode == 'full':
|
434 |
+
optimizer = optim.Adam(M_dict.values(), lr=learning_rate)
|
435 |
+
# Assuming U_dict, V_dict, and M_dict all contain nn.Parameter objects
|
436 |
+
if permutate_mode == 'lora':
|
437 |
+
params_to_optimize = list(U_dict.values()) + list(V_dict.values())
|
438 |
+
optimizer = optim.Adam(params_to_optimize, lr=learning_rate)
|
439 |
+
|
440 |
+
# output_without_pruned_weights = compute_attention_output_with_pruned_weights(
|
441 |
+
# inps, attention_mask, position_ids, original_weight, num_attention_heads=32, hidden_dim=4096
|
442 |
+
# )
|
443 |
+
best_loss = float('inf')
|
444 |
+
best_permutations = {}
|
445 |
+
|
446 |
+
for epoch in range(num_epochs):
|
447 |
+
if permutate_mode in ('lora', 'full'):
|
448 |
+
optimizer.zero_grad()
|
449 |
+
original_weights = {}
|
450 |
+
current_permutations = {}
|
451 |
+
for name in subset:
|
452 |
+
original_weights[name] = subset[name].weight.data.clone()
|
453 |
+
# Dictionary to store permuted and pruned weights and masks
|
454 |
+
W_perm_dict = {}
|
455 |
+
W_pruned_dict = {}
|
456 |
+
W_mask_dict = {}
|
457 |
+
#initial_M = M_dict['mlp.up_proj'].clone().detach()
|
458 |
+
|
459 |
+
total_preserved_metric = 0.0
|
460 |
+
total_preserved_weight = 0.0
|
461 |
+
# Step 1: Compute permutations and apply pruning for each module
|
462 |
+
pruned_weights_dict = {}
|
463 |
+
for name in subset:
|
464 |
+
if module_name in name or permutate_mode == 'eval':
|
465 |
+
#print(name)
|
466 |
+
W_metric = torch.abs(subset[name].weight.data) * torch.sqrt(wrapped_layers[name].scaler_row.reshape((1,-1)))
|
467 |
+
|
468 |
+
W = subset[name].weight.data
|
469 |
+
#print(W.shape)
|
470 |
+
if permutate_mode in ('lora', 'full'):
|
471 |
+
num_cols = W.shape[1]
|
472 |
+
if permutate_mode == 'lora':
|
473 |
+
U = U_dict[name]
|
474 |
+
V = V_dict[name]
|
475 |
+
M = torch.matmul(U.half(), V.half()).half()
|
476 |
+
M = torch.clamp(M, min=-10.0, max=10.0)
|
477 |
+
if permutate_mode == 'full':
|
478 |
+
M = M_dict[name].half()
|
479 |
+
M = torch.clamp(M, min=-10.0, max=10.0)
|
480 |
+
|
481 |
+
S_soft = gumbel_sinkhorn(M, sinkhorn_iterations, tau, epsilon=epsilon)
|
482 |
+
S_soft = torch.clamp(S_soft, min=1e-8, max=1 - 1e-8)
|
483 |
+
# Step 2: Compute the hard permutation matrix P_hard
|
484 |
+
with torch.no_grad():
|
485 |
+
S_cpu = S_soft.detach().cpu().numpy()
|
486 |
+
if not np.isfinite(S_cpu).all():
|
487 |
+
print("Invalid values in S_cpu")
|
488 |
+
print(S_cpu)
|
489 |
+
exit()
|
490 |
+
row_ind, col_ind = linear_sum_assignment(-S_cpu)
|
491 |
+
P_hard = torch.zeros_like(S_soft)
|
492 |
+
P_hard[row_ind, col_ind] = 1.0
|
493 |
+
|
494 |
+
# Modify P_hard to allow gradient flow
|
495 |
+
P_hard = (P_hard - S_soft).detach() + S_soft
|
496 |
+
|
497 |
+
P_hard = P_hard.to(W.dtype)
|
498 |
+
|
499 |
+
current_permutations[name] = P_hard.detach().cpu()
|
500 |
+
|
501 |
+
# # Load the best permutation matrices
|
502 |
+
if permutate_mode == 'eval':
|
503 |
+
#print(name)
|
504 |
+
module = None
|
505 |
+
if name in ('self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj'): module = 'self'
|
506 |
+
if name == 'mlp.gate_proj': module = 'gate'
|
507 |
+
if name == 'mlp.up_proj': module = 'up'
|
508 |
+
if name == 'mlp.down_proj': module = 'down'
|
509 |
+
|
510 |
+
if module in ('self', 'gate', 'up'):
|
511 |
+
best_permutations = torch.load(f'./p_weight/Layer{i}_{module}_P_matrix.pt')
|
512 |
+
|
513 |
+
P_hard = best_permutations[name]
|
514 |
+
P_hard = P_hard.to(W.dtype)
|
515 |
+
if module == 'down':
|
516 |
+
sorted_idx = torch.sort(torch.sum(W_metric, dim=0))[1]
|
517 |
+
|
518 |
+
# Channel reallocation (permutation)
|
519 |
+
index = torch.zeros_like(sorted_idx)
|
520 |
+
for ii in range(1, prune_m + 1):
|
521 |
+
if ii % 2 == 1:
|
522 |
+
index[
|
523 |
+
ii - 1 :: prune_m
|
524 |
+
] = sorted_idx[
|
525 |
+
int(W_metric.shape[1] * (ii - 1) / prune_m) : int(
|
526 |
+
W_metric.shape[1] * ii / prune_m
|
527 |
+
)
|
528 |
+
]
|
529 |
+
else:
|
530 |
+
index[
|
531 |
+
ii - 1 :: prune_m
|
532 |
+
] = sorted_idx[
|
533 |
+
int(W_metric.shape[1] * (ii - 1) / prune_m) : int(
|
534 |
+
W_metric.shape[1] * ii / prune_m
|
535 |
+
)
|
536 |
+
].flip(0)
|
537 |
+
|
538 |
+
# Construct P_hard permutation matrix
|
539 |
+
P_hard = construct_permutation_matrix(index, device=W.device)
|
540 |
+
P_hard = P_hard.to(W.dtype)
|
541 |
+
row_sums = P_hard.sum(dim=1)
|
542 |
+
col_sums = P_hard.sum(dim=0)
|
543 |
+
|
544 |
+
W_device = W.device
|
545 |
+
P_hard = P_hard.to(W_device)
|
546 |
+
# Permute the columns of W using P_hard
|
547 |
+
W_perm = torch.matmul(W, P_hard)
|
548 |
+
W_metric = W_metric.to(torch.float16)
|
549 |
+
W_metric_perm = torch.matmul(W_metric, P_hard)
|
550 |
+
|
551 |
+
W_mask = (torch.zeros_like(W_metric_perm) == 1)
|
552 |
+
for ii in range(0, W_metric_perm.shape[1], prune_m):
|
553 |
+
tmp = W_metric_perm[:, ii:(ii + prune_m)].float()
|
554 |
+
_, indices = torch.topk(tmp, prune_n, dim=1, largest=False)
|
555 |
+
W_mask[:, ii:(ii + prune_m)].scatter_(1, indices, True)
|
556 |
+
# Apply the pruning mask to the permuted W
|
557 |
+
# Apply pruning mask
|
558 |
+
W_metric_preserved = W_metric_perm.clone()
|
559 |
+
W_metric_preserved[W_mask] = 0
|
560 |
+
W_metric_preserved = W_metric_preserved.to(torch.float32) # Ensure precision
|
561 |
+
total_preserved_metric += W_metric_preserved.sum()
|
562 |
+
|
563 |
+
#print(W_metric_preserved)
|
564 |
+
|
565 |
+
W_pruned_perm = W_perm.clone()
|
566 |
+
W_pruned_perm[W_mask] = 0
|
567 |
+
# Map the permuted and pruned weights back to the original order
|
568 |
+
# Compute the inverse permutation
|
569 |
+
# inv_perm = torch.argsort(torch.tensor(col_ind)).to(device)
|
570 |
+
# Map W_pruned_perm back to original order
|
571 |
+
W_pruned = torch.matmul(W_pruned_perm, P_hard.t())
|
572 |
+
# Store pruned weights
|
573 |
+
W_pruned_dict[name] = W_pruned
|
574 |
+
total_preserved_weight += W_pruned.sum()
|
575 |
+
pruned_weights_dict[name] = W_pruned
|
576 |
+
|
577 |
+
|
578 |
+
|
579 |
+
|
580 |
+
subset[name].weight.data = W_pruned.clone()
|
581 |
+
|
582 |
+
# output_with_pruned_weights = compute_attention_output_with_pruned_weights(
|
583 |
+
# inps, attention_mask, position_ids, pruned_weights_dict, num_attention_heads=32, hidden_dim=4096
|
584 |
+
# )
|
585 |
+
# print("Out:")
|
586 |
+
# print(output_with_pruned_weights.shape)
|
587 |
+
# Forward pass after pruning with permutation
|
588 |
+
outputs_with_perm = [] # Use a list instead of a preallocated tensor
|
589 |
+
for j in range(args.nsamples):
|
590 |
+
output = layer(inps[j].unsqueeze(0),attention_mask=attention_mask,position_ids=position_ids,)[0]
|
591 |
+
outputs_with_perm.append(output)
|
592 |
+
# Stack outputs into a single tensor
|
593 |
+
outputs_with_perm = torch.stack(outputs_with_perm) # Shape: [nsamples, ...]
|
594 |
+
|
595 |
+
# Verify gradient through differences_norm_with_perm
|
596 |
+
differences_norm_with_perm = []
|
597 |
+
for j in range(args.nsamples):
|
598 |
+
diff_norm = torch.norm(outputs_with_perm[j] - outputs_before_pruning[j])
|
599 |
+
differences_norm_with_perm.append(diff_norm)
|
600 |
+
|
601 |
+
# # Stack into a tensor
|
602 |
+
differences_norm_with_perm = torch.stack(differences_norm_with_perm)
|
603 |
+
differences_norm_with_perm.requires_grad_(True)
|
604 |
+
|
605 |
+
|
606 |
+
# # Compute loss
|
607 |
+
loss_diff = torch.mean(differences_norm_with_perm)
|
608 |
+
#print(f'Diff: {loss_diff}')
|
609 |
+
#loss = -total_preserved_metric/200000 + torch.mean(differences_norm_with_perm)
|
610 |
+
loss = -total_preserved_metric / 100
|
611 |
+
#print(loss.device)
|
612 |
+
#loss = output_with_pruned_weights.sum()
|
613 |
+
#diff = output_with_pruned_weights - output_without_pruned_weights
|
614 |
+
#print(diff)
|
615 |
+
#loss = torch.abs(diff).sum()
|
616 |
+
#loss = -total_preserved_weight
|
617 |
+
#loss = M.sum()
|
618 |
+
#print(f"Loss.requires_grad: {loss.requires_grad}") # Should be True
|
619 |
+
if permutate_mode in ('lora', 'full'):
|
620 |
+
if loss_diff.item() < best_loss:
|
621 |
+
best_loss = loss_diff.item()
|
622 |
+
best_permutations = current_permutations
|
623 |
+
torch.save(best_permutations, f'./p_weight/Layer{i}_{module_name}_P_matrix.pt') # Save the best permutations to a file
|
624 |
+
print(f"New best loss: {best_loss:.6f}, saving permutation matrices.")
|
625 |
+
|
626 |
+
print(f'Layer {i}, Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.6f}')
|
627 |
+
loss.backward(retain_graph=True)
|
628 |
+
|
629 |
+
# Check gradients
|
630 |
+
for name in M_dict:
|
631 |
+
if permutate_mode == 'full':
|
632 |
+
grad_norm = M_dict[name].grad.norm().item() if M_dict[name].grad is not None else None
|
633 |
+
print(f'Permutation parameter {name}, Gradient Norm: {grad_norm}')
|
634 |
+
if permutate_mode == 'lora':
|
635 |
+
# For each layer in M_dict, access both U and V
|
636 |
+
U_grad_norm = M_dict[name]['U'].grad.norm().item() if M_dict[name]['U'].grad is not None else None
|
637 |
+
V_grad_norm = M_dict[name]['V'].grad.norm().item() if M_dict[name]['V'].grad is not None else None
|
638 |
+
|
639 |
+
# Print the gradient norms for both U and V
|
640 |
+
print(f'Layer {name}:')
|
641 |
+
print(f' U Gradient Norm: {U_grad_norm}')
|
642 |
+
print(f' V Gradient Norm: {V_grad_norm}')
|
643 |
+
|
644 |
+
#final_M = M_dict['mlp.up_proj'].clone().detach()
|
645 |
+
#difference = torch.norm(final_M - initial_M)
|
646 |
+
#print(f'Total change in permutation weights: {difference.item()}')
|
647 |
+
optimizer.step()
|
648 |
+
# Zero the gradients
|
649 |
+
optimizer.zero_grad()
|
650 |
+
for name in subset:
|
651 |
+
subset[name].weight.data = original_weights[name].clone()
|
652 |
+
|
653 |
+
del W_metric, W, M, S_soft, P_hard, W_perm, W_metric_perm, W_mask
|
654 |
+
torch.cuda.empty_cache()
|
655 |
+
# for h in handles:
|
656 |
+
# h.remove()
|
657 |
+
# del handles
|
658 |
+
# -------------------------------------------------------------------------------------------------------------------------
|
659 |
+
# Step 1: Pruning without permutation
|
660 |
+
# for name in subset:
|
661 |
+
# if "self" in name:
|
662 |
+
# print(f"Pruning layer {i} name {name} without permutation")
|
663 |
+
# W_metric = torch.abs(subset[name].weight.data) * torch.sqrt(wrapped_layers[name].scaler_row.reshape((1,-1)))
|
664 |
+
|
665 |
+
# W_mask = (torch.zeros_like(W_metric) == 1) ## initialize a mask to be all False
|
666 |
+
# if prune_n != 0:
|
667 |
+
# # structured n:m sparsity
|
668 |
+
# for ii in range(W_metric.shape[1]):
|
669 |
+
# if ii % prune_m == 0:
|
670 |
+
# tmp = W_metric[:,ii:(ii+prune_m)].float()
|
671 |
+
# W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
|
672 |
+
|
673 |
+
# subset[name].weight.data[W_mask] = 0 ## set weights to zero
|
674 |
+
# outputs_after_pruning_no_perm = torch.zeros_like(outs)
|
675 |
+
# for j in range(args.nsamples):
|
676 |
+
# with torch.no_grad():
|
677 |
+
# outputs_after_pruning_no_perm[j] = layer(inps[j].unsqueeze(0),attention_mask=attention_mask,position_ids=position_ids,)[0]
|
678 |
+
# differences_norm_no_perm = []
|
679 |
+
# for j in range(args.nsamples):
|
680 |
+
# diff_norm = torch.norm(outputs_after_pruning_no_perm[j] - outputs_before_pruning[j])
|
681 |
+
# differences_norm_no_perm.append(diff_norm.item())
|
682 |
+
|
683 |
+
# # Restore original weights before permutation
|
684 |
+
# for name in subset:
|
685 |
+
# if "self" in name:
|
686 |
+
# subset[name].weight.data = original_weights[name].clone()
|
687 |
+
# -------------------------------------------------------------------------------------------------------------------------
|
688 |
+
|
689 |
+
# -------------------------------------------------------------------------------------------------------------------------
|
690 |
+
# print(f"Layer {i} difference norms:")
|
691 |
+
# for j in range(args.nsamples):
|
692 |
+
# print(
|
693 |
+
# f"Sample {j}: Without Permutation = {differences_norm_no_perm[j]:.6f}, "
|
694 |
+
# f"With Permutation = {differences_norm_with_perm[j]:.6f}"
|
695 |
+
# )
|
696 |
+
outputs_after_pruning = torch.zeros_like(outs)
|
697 |
+
for j in range(args.nsamples):
|
698 |
+
with torch.no_grad():
|
699 |
+
outputs_after_pruning[j] = layer(inps[j].unsqueeze(0),attention_mask=attention_mask,position_ids=position_ids,)[0]
|
700 |
+
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
|
701 |
+
inps, outs = outs, inps
|
702 |
+
|
703 |
+
# differences_norm = []
|
704 |
+
# for j in range(args.nsamples):
|
705 |
+
# # print(outputs_before_pruning[j])
|
706 |
+
# # print(outputs_after_pruning[j])
|
707 |
+
# diff_norm = torch.norm(outputs_after_pruning[j] - outputs_before_pruning[j])
|
708 |
+
# differences_norm.append(diff_norm.item())
|
709 |
+
|
710 |
+
# print(f"Differences for layer {i}:")
|
711 |
+
# for j in range(args.nsamples):
|
712 |
+
# print(f"Sample {j}: Difference Norm = {differences_norm[j]}")
|
713 |
+
|
714 |
+
#break
|
715 |
+
|
716 |
+
|
717 |
+
model.config.use_cache = use_cache
|
718 |
+
torch.cuda.empty_cache()
|
719 |
+
|
720 |
+
|
721 |
+
@torch.no_grad()
|
722 |
+
def prune_sparsegpt(args, model, tokenizer, dev, prune_n=0, prune_m=0):
|
723 |
+
## SparseGPT code available at: https://github.com/IST-DASLab/sparsegpt/tree/f5c25005a61f96a0933ca2f95705a963585aafaa
|
724 |
+
print('Starting ...')
|
725 |
+
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
|
726 |
+
|
727 |
+
use_cache = model.config.use_cache
|
728 |
+
model.config.use_cache = False
|
729 |
+
layers = model.model.layers
|
730 |
+
|
731 |
+
if "model.embed_tokens" in model.hf_device_map:
|
732 |
+
dev = model.hf_device_map["model.embed_tokens"]
|
733 |
+
|
734 |
+
dtype = next(iter(model.parameters())).dtype
|
735 |
+
inps = torch.zeros(
|
736 |
+
(args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
|
737 |
+
)
|
738 |
+
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
|
739 |
+
|
740 |
+
class Catcher(nn.Module):
|
741 |
+
def __init__(self, module):
|
742 |
+
super().__init__()
|
743 |
+
self.module = module
|
744 |
+
def forward(self, inp, **kwargs):
|
745 |
+
inps[cache['i']] = inp
|
746 |
+
cache['i'] += 1
|
747 |
+
cache['attention_mask'] = kwargs['attention_mask']
|
748 |
+
cache['position_ids'] = kwargs['position_ids']
|
749 |
+
raise ValueError
|
750 |
+
layers[0] = Catcher(layers[0])
|
751 |
+
for batch in dataloader:
|
752 |
+
try:
|
753 |
+
model(batch[0].to(dev))
|
754 |
+
except ValueError:
|
755 |
+
pass
|
756 |
+
layers[0] = layers[0].module
|
757 |
+
torch.cuda.empty_cache()
|
758 |
+
|
759 |
+
outs = torch.zeros_like(inps)
|
760 |
+
attention_mask = cache['attention_mask']
|
761 |
+
position_ids = cache['position_ids']
|
762 |
+
|
763 |
+
print('Ready.')
|
764 |
+
|
765 |
+
for i in range(len(layers)):
|
766 |
+
layer = layers[i]
|
767 |
+
if f"model.layers.{i}" in model.hf_device_map:
|
768 |
+
dev = model.hf_device_map[f"model.layers.{i}"]
|
769 |
+
print(f"layer {i} device {dev}")
|
770 |
+
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
|
771 |
+
|
772 |
+
subset = find_layers(layer)
|
773 |
+
|
774 |
+
gpts = {}
|
775 |
+
for name in subset:
|
776 |
+
gpts[name] = SparseGPT(subset[name])
|
777 |
+
|
778 |
+
def add_batch(name):
|
779 |
+
def tmp(_, inp, out):
|
780 |
+
gpts[name].add_batch(inp[0].data, out.data)
|
781 |
+
return tmp
|
782 |
+
|
783 |
+
handles = []
|
784 |
+
for name in gpts:
|
785 |
+
handles.append(subset[name].register_forward_hook(add_batch(name)))
|
786 |
+
|
787 |
+
for j in range(args.nsamples):
|
788 |
+
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
|
789 |
+
for h in handles:
|
790 |
+
h.remove()
|
791 |
+
|
792 |
+
for name in gpts:
|
793 |
+
print(i, name)
|
794 |
+
print('Pruning ...')
|
795 |
+
|
796 |
+
gpts[name].fasterprune(args.sparsity_ratio, prune_n=prune_n, prune_m=prune_m, percdamp=0.01, blocksize=128)
|
797 |
+
gpts[name].free()
|
798 |
+
|
799 |
+
for j in range(args.nsamples):
|
800 |
+
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
|
801 |
+
|
802 |
+
layers[i] = layer
|
803 |
+
torch.cuda.empty_cache()
|
804 |
+
|
805 |
+
inps, outs = outs, inps
|
806 |
+
|
807 |
+
model.config.use_cache = use_cache
|
808 |
+
torch.cuda.empty_cache()
|
809 |
+
|
810 |
+
|
811 |
+
|
812 |
+
@torch.no_grad()
|
813 |
+
def prune_ablate(args, model, tokenizer, dev, prune_n=0, prune_m=0):
|
814 |
+
## SparseGPT code available at: https://github.com/IST-DASLab/sparsegpt/tree/f5c25005a61f96a0933ca2f95705a963585aafaa
|
815 |
+
print('Starting ...')
|
816 |
+
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
|
817 |
+
|
818 |
+
use_cache = model.config.use_cache
|
819 |
+
model.config.use_cache = False
|
820 |
+
layers = model.model.layers
|
821 |
+
|
822 |
+
if "model.embed_tokens" in model.hf_device_map:
|
823 |
+
dev = model.hf_device_map["model.embed_tokens"]
|
824 |
+
|
825 |
+
dtype = next(iter(model.parameters())).dtype
|
826 |
+
inps = torch.zeros(
|
827 |
+
(args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
|
828 |
+
)
|
829 |
+
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
|
830 |
+
|
831 |
+
class Catcher(nn.Module):
|
832 |
+
def __init__(self, module):
|
833 |
+
super().__init__()
|
834 |
+
self.module = module
|
835 |
+
def forward(self, inp, **kwargs):
|
836 |
+
inps[cache['i']] = inp
|
837 |
+
cache['i'] += 1
|
838 |
+
cache['attention_mask'] = kwargs['attention_mask']
|
839 |
+
cache['position_ids'] = kwargs['position_ids']
|
840 |
+
raise ValueError
|
841 |
+
layers[0] = Catcher(layers[0])
|
842 |
+
for batch in dataloader:
|
843 |
+
try:
|
844 |
+
model(batch[0].to(dev))
|
845 |
+
except ValueError:
|
846 |
+
pass
|
847 |
+
layers[0] = layers[0].module
|
848 |
+
torch.cuda.empty_cache()
|
849 |
+
|
850 |
+
outs = torch.zeros_like(inps)
|
851 |
+
attention_mask = cache['attention_mask']
|
852 |
+
position_ids = cache['position_ids']
|
853 |
+
|
854 |
+
print('Ready.')
|
855 |
+
|
856 |
+
for i in range(len(layers)):
|
857 |
+
layer = layers[i]
|
858 |
+
if f"model.layers.{i}" in model.hf_device_map:
|
859 |
+
dev = model.hf_device_map[f"model.layers.{i}"]
|
860 |
+
print(f"layer {i} device {dev}")
|
861 |
+
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
|
862 |
+
|
863 |
+
subset = find_layers(layer)
|
864 |
+
|
865 |
+
gpts = {}
|
866 |
+
for name in subset:
|
867 |
+
gpts[name] = AblateGPT(subset[name])
|
868 |
+
|
869 |
+
def add_batch(name):
|
870 |
+
def tmp(_, inp, out):
|
871 |
+
gpts[name].add_batch(inp[0].data, out.data)
|
872 |
+
return tmp
|
873 |
+
|
874 |
+
handles = []
|
875 |
+
for name in gpts:
|
876 |
+
handles.append(subset[name].register_forward_hook(add_batch(name)))
|
877 |
+
|
878 |
+
for j in range(args.nsamples):
|
879 |
+
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
|
880 |
+
for h in handles:
|
881 |
+
h.remove()
|
882 |
+
|
883 |
+
for name in gpts:
|
884 |
+
print(i, name)
|
885 |
+
print('Pruning ...')
|
886 |
+
|
887 |
+
if args.prune_method == "ablate_wanda_seq":
|
888 |
+
prune_mask = gpts[name].get_wanda_mask(args.sparsity_ratio, prune_n, prune_m)
|
889 |
+
elif args.prune_method == "ablate_mag_seq":
|
890 |
+
prune_mask = gpts[name].get_mag_mask(args.sparsity_ratio, prune_n, prune_m)
|
891 |
+
elif "iter" in args.prune_method:
|
892 |
+
prune_mask = None
|
893 |
+
|
894 |
+
gpts[name].fasterprune(args, args.sparsity_ratio, mask=prune_mask, prune_n=prune_n, prune_m=prune_m, percdamp=0.01, blocksize=128)
|
895 |
+
gpts[name].free()
|
896 |
+
|
897 |
+
for j in range(args.nsamples):
|
898 |
+
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
|
899 |
+
|
900 |
+
layers[i] = layer
|
901 |
+
torch.cuda.empty_cache()
|
902 |
+
|
903 |
+
inps, outs = outs, inps
|
904 |
+
|
905 |
+
model.config.use_cache = use_cache
|
906 |
+
torch.cuda.empty_cache()
|