'''
ResNet in PyTorch.

ResNet(深度残差网络)是由微软研究院的Kaiming He等人提出的深度神经网络架构。
主要创新点是引入了残差学习的概念,通过跳跃连接解决了深层网络的退化问题。

主要特点:
1. 引入残差块(Residual Block),使用跳跃连接
2. 使用Batch Normalization进行归一化
3. 支持更深的网络结构(最深可达152层)
4. 在多个计算机视觉任务上取得了突破性进展

Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
    Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn

class BasicBlock(nn.Module):
    """基础残差块
    
    用于ResNet18/34等浅层网络。结构为:
    x -> Conv -> BN -> ReLU -> Conv -> BN -> (+) -> ReLU
         |------------------------------------------|
         
    Args:
        in_channels: 输入通道数
        out_channels: 输出通道数
        stride: 步长,用于下采样,默认为1
        
    注意:基础模块没有通道压缩,expansion=1
    """
    expansion = 1
    
    def __init__(self, in_channels, out_channels, stride=1):
        super(BasicBlock,self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True),
            nn.Conv2d(out_channels,out_channels, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(out_channels)
        )
        
        # 如果输入输出维度不等,则使用1x1卷积层来改变维度
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != self.expansion * out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion * out_channels),
            )
            
    def forward(self, x):
        out = self.features(x)
        out += self.shortcut(x)
        out = torch.relu(out)
        return out
    

class Bottleneck(nn.Module):
    """瓶颈残差块
    
    用于ResNet50/101/152等深层网络。结构为:
    x -> 1x1Conv -> BN -> ReLU -> 3x3Conv -> BN -> ReLU -> 1x1Conv -> BN -> (+) -> ReLU
         |-------------------------------------------------------------------|
         
    Args:
        in_channels: 输入通道数
        zip_channels: 压缩后的通道数
        stride: 步长,用于下采样,默认为1
        
    注意:通过1x1卷积先压缩通道数,再还原,expansion=4
    """
    expansion = 4
    
    def __init__(self, in_channels, zip_channels, stride=1):
        super(Bottleneck, self).__init__()
        out_channels = self.expansion * zip_channels
        self.features = nn.Sequential(
            # 1x1卷积压缩通道
            nn.Conv2d(in_channels, zip_channels, kernel_size=1, bias=False),
            nn.BatchNorm2d(zip_channels),
            nn.ReLU(inplace=True),
            # 3x3卷积提取特征
            nn.Conv2d(zip_channels, zip_channels, kernel_size=3, stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(zip_channels),
            nn.ReLU(inplace=True),
            # 1x1卷积还原通道
            nn.Conv2d(zip_channels, out_channels, kernel_size=1, bias=False),
            nn.BatchNorm2d(out_channels)
        )
        
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )
            
    def forward(self, x):
        out = self.features(x)
        out += self.shortcut(x)
        out = torch.relu(out)
        return out
    
class ResNet(nn.Module):
    """ResNet模型
    
    网络结构:
    1. 一个卷积层用于特征提取
    2. 四个残差层,每层包含多个残差块
    3. 平均池化和全连接层进行分类
    
    对于CIFAR10,特征图大小变化为:
    (32,32,3) -> [Conv] -> (32,32,64) -> [Layer1] -> (32,32,64) -> [Layer2] 
    -> (16,16,128) -> [Layer3] -> (8,8,256) -> [Layer4] -> (4,4,512) -> [AvgPool] 
    -> (1,1,512) -> [FC] -> (num_classes)
    
    Args:
        block: 残差块类型(BasicBlock或Bottleneck)
        num_blocks: 每层残差块数量的列表
        num_classes: 分类数量,默认为10
        verbose: 是否打印中间特征图大小
        init_weights: 是否初始化权重
    """
    def __init__(self, block, num_blocks, num_classes=10, verbose=False, init_weights=True):
        super(ResNet, self).__init__()
        self.verbose = verbose
        self.in_channels = 64
        
        # 第一层卷积
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
        
        # 四个残差层
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        
        # 分类层
        self.avg_pool = nn.AvgPool2d(kernel_size=4)
        self.classifier = nn.Linear(512 * block.expansion, num_classes)

        if init_weights:
            self._initialize_weights()
            
    def _make_layer(self, block, out_channels, num_blocks, stride):
        """构建残差层
        
        Args:
            block: 残差块类型
            out_channels: 输出通道数
            num_blocks: 残差块数量
            stride: 第一个残差块的步长(用于下采样)
            
        Returns:
            nn.Sequential: 残差层
        """
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_channels, out_channels, stride))
            self.in_channels = out_channels * block.expansion
        return nn.Sequential(*layers)
    
    def forward(self, x):
        """前向传播
        
        Args:
            x: 输入张量,[N,3,32,32]
            
        Returns:
            out: 输出张量,[N,num_classes]
        """
        out = self.features(x)
        if self.verbose:
            print('block 1 output: {}'.format(out.shape))
            
        out = self.layer1(out)        
        if self.verbose:
            print('block 2 output: {}'.format(out.shape))
            
        out = self.layer2(out)
        if self.verbose:
            print('block 3 output: {}'.format(out.shape))
            
        out = self.layer3(out)
        if self.verbose:
            print('block 4 output: {}'.format(out.shape))
            
        out = self.layer4(out)
        if self.verbose:
            print('block 5 output: {}'.format(out.shape))
            
        out = self.avg_pool(out)
        out = out.view(out.size(0), -1)
        out = self.classifier(out)
        return out
    
    def _initialize_weights(self):
        """初始化模型权重
        
        采用kaiming初始化方法:
        - 卷积层权重采用kaiming_normal_初始化
        - BN层参数采用常数初始化
        - 线性层采用正态分布初始化
        """
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

def ResNet18(verbose=False):
    """ResNet-18模型"""
    return ResNet(BasicBlock, [2,2,2,2], verbose=verbose)

def ResNet34(verbose=False):
    """ResNet-34模型"""
    return ResNet(BasicBlock, [3,4,6,3], verbose=verbose)

def ResNet50(verbose=False):
    """ResNet-50模型"""
    return ResNet(Bottleneck, [3,4,6,3], verbose=verbose)

def ResNet101(verbose=False):
    """ResNet-101模型"""
    return ResNet(Bottleneck, [3,4,23,3], verbose=verbose)

def ResNet152(verbose=False):
    """ResNet-152模型"""
    return ResNet(Bottleneck, [3,8,36,3], verbose=verbose)

def test():
    """测试函数"""
    net = ResNet34()
    x = torch.randn(2,3,32,32)
    y = net(x)
    print('Output shape:', y.size())
    
    # 打印模型结构
    from torchinfo import summary
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net = net.to(device)
    summary(net,(2,3,32,32))

if __name__ == '__main__':
    test()