mwitiderrick commited on
Commit
a1ca7fe
·
1 Parent(s): 26df164

Create recipe.yaml

Browse files
Files changed (1) hide show
  1. recipe.yaml +38 -0
recipe.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_stage:
2
+ obcq_modifiers:
3
+ LogarithmicEqualizationModifier:
4
+ mappings: [
5
+ [["re:.*q_proj", "re:.*k_proj", "re:.*v_proj"], "re:.*input_layernorm"],
6
+ [["re:.*gate_proj", "re:.*up_proj"], "re:.*post_attention_layernorm"],
7
+ ]
8
+ QuantizationModifier:
9
+ ignore:
10
+ # These operations don't make sense to quantize
11
+ - LlamaRotaryEmbedding
12
+ - LlamaRMSNorm
13
+ - SiLUActivation
14
+ - MatMulOutput_QK
15
+ - MatMulOutput_PV
16
+ # Skip quantizing the BMMs
17
+ - QuantizableMatMul
18
+ # Skip quantizing the layers with the most sensitive activations
19
+ - model.layers.23.mlp.down_proj
20
+ - model.layers.3.mlp.down_proj
21
+ - model.layers.22.mlp.down_proj
22
+ - model.layers.21.mlp.down_proj
23
+ - model.layers.23.self_attn.o_proj
24
+ post_oneshot_calibration: true
25
+ scheme_overrides:
26
+ Embedding:
27
+ input_activations: null
28
+ weights:
29
+ num_bits: 8
30
+ symmetric: false
31
+ SparseGPTModifier:
32
+ sparsity: 0.5
33
+ block_size: 128
34
+ sequential_update: true
35
+ quantize: true
36
+ percdamp: 0.01
37
+ mask_structure: "0:0"
38
+ targets: ["re:model.layers.\\d*$"]