File size: 2,074 Bytes
c61ccee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65

from torch import nn

class QuantStub(nn.Module):
    r"""Quantize stub module, before calibration, this is same as an observer,

    it will be swapped as `nnq.Quantize` in `convert`.



    Args:

        qconfig: quantization configuration for the tensor,

            if qconfig is not provided, we will get qconfig from parent modules

    """
    def __init__(self, qconfig=None):
        super().__init__()
        if qconfig:
            self.qconfig = qconfig

    def forward(self, x):
        return x


class DeQuantStub(nn.Module):
    r"""Dequantize stub module, before calibration, this is same as identity,

    this will be swapped as `nnq.DeQuantize` in `convert`.



    Args:

        qconfig: quantization configuration for the tensor,

            if qconfig is not provided, we will get qconfig from parent modules

    """
    def __init__(self, qconfig=None):
        super().__init__()
        if qconfig:
            self.qconfig = qconfig

    def forward(self, x):
        return x


class QuantWrapper(nn.Module):
    r"""A wrapper class that wraps the input module, adds QuantStub and

    DeQuantStub and surround the call to module with call to quant and dequant

    modules.



    This is used by the `quantization` utility functions to add the quant and

    dequant modules, before `convert` function `QuantStub` will just be observer,

    it observes the input tensor, after `convert`, `QuantStub`

    will be swapped to `nnq.Quantize` which does actual quantization. Similarly

    for `DeQuantStub`.

    """
    quant: QuantStub
    dequant: DeQuantStub
    module: nn.Module

    def __init__(self, module):
        super().__init__()
        qconfig = getattr(module, "qconfig", None)
        self.add_module('quant', QuantStub(qconfig))
        self.add_module('dequant', DeQuantStub(qconfig))
        self.add_module('module', module)
        self.train(module.training)

    def forward(self, X):
        X = self.quant(X)
        X = self.module(X)
        return self.dequant(X)