test_stage: obcq_modifiers: LogarithmicEqualizationModifier: mappings: - - ['re:.*c_attn'] - re:.*ln_1 - - ['re:.*c_fc'] - re:.*ln_2 QuantizationModifier: ignore: [Dropout, LayerNorm, PytorchGELUTanh] post_oneshot_calibration: true scheme_overrides: Embedding: input_activations: null weights: {num_bits: 8, symmetric: false} SparseGPTModifier: sparsity: 0.0 block_size: 128 sequential_update: true quantize: true percdamp: 0.01 mask_structure: 0:0 targets: ['re:transformer.h.\d*$']