File size: 904 Bytes
c61ccee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# flake8: noqa: F401
r"""Quantized Modules.



This file is in the process of migration to `torch/ao/nn/quantized`, and

is kept here for compatibility while the migration process is ongoing.

If you are adding a new entry/functionality, please, add it to the

appropriate file under the `torch/ao/nn/quantized/modules`,

while adding an import statement here.

"""

__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']

from torch.ao.nn.quantized.modules.conv import _reverse_repeat_padding

from torch.ao.nn.quantized.modules.conv import Conv1d
from torch.ao.nn.quantized.modules.conv import Conv2d
from torch.ao.nn.quantized.modules.conv import Conv3d

from torch.ao.nn.quantized.modules.conv import ConvTranspose1d
from torch.ao.nn.quantized.modules.conv import ConvTranspose2d
from torch.ao.nn.quantized.modules.conv import ConvTranspose3d