0基础小白的AI学习笔记-卷积神经网络CNN

import torch
from torch import nn
'''
    原始写法
'''
class LeNet(nn.Module):
    def __init__(self):
        super().__init__()
        # 卷积层:抽取特征,不补0,图像变小
        self.conv1 = nn.Conv2d(in_channels=1,out_channels=6,kernel_size=5,stride=1,padding=0)
        # 池化层:图像减半
        self.mp = nn.MaxPool2d(kernel_size=2,stride=2,padding=0)
        self.conv2 = nn.Conv2d(in_channels=6,out_channels=16,kernel_size=5,stride=1,padding=0)
        self.mp2 = nn.MaxPool2d(kernel_size=2,stride=2,padding=0)
        # 展平:所有相素都展开排列在一行,这样才能进行后续分类/回归的任务计算
        self.flatten = nn.Flatten()
        # 全连接层:即线性层,没有relu激活函数
        self.linear1 = nn.Linear(in_features = 400,out_features = 120)
        self.linear2 = nn.Linear(in_features = 120,out_features = 84)
        self.linear3 = nn.Linear(in_features = 84,out_features = 10)
        
    def forward(self,X):
        print(X.shape)
        X = self.conv1(X)
        print(X.shape)
        X = self.mp(X)
        print(X.shape)
        X = self.conv2(X)
        print(X.shape)
        X = self.mp2(X)
        print(X.shape)
        # 展平操作
        X = X.view(X.size(0),-1)
        print(X.shape)
        X = self.linear1(X)
        print(X.shape)
        X = self.linear2(X)
        print(X.shape)
        X = self.linear3(X)
        print(X.shape)

model = LeNet()
X =  torch.randn(2,1,32,32)
model(X)

'''
    发展一段时间后的写法
'''
class LeNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels=1,out_channels=6,kernel_size=5,stride=1,padding=0)
        # 增加了归一化层,对数据进行规范化,以加快模型收敛和提升模型性能
        self.bn1 = nn.BatchNorm2d(num_features=6)
        # 增加了激函数,对输出结果进行非线性变化,使模型学习复杂特征
        self.relu1 = nn.ReLU()
        self.mp = nn.MaxPool2d(kernel_size=2,stride=2,padding=0)
        self.conv2 = nn.Conv2d(in_channels=6,out_channels=16,kernel_size=5,stride=1,padding=0)
        self.bn2 = nn.BatchNorm2d(num_features=16)
        self.relu2 = nn.ReLU()
        self.mp2 = nn.MaxPool2d(kernel_size=2,stride=2,padding=0)
        
        self.flatten = nn.Flatten()
        self.linear1 = nn.Linear(in_features = 400,out_features = 120)
        # 增加了激函数,对输出结果进行非线性变化,使模型学习复杂特征
        self.relu11 = nn.ReLU()
        self.linear2 = nn.Linear(in_features = 120,out_features = 84)
        self.relu22 = nn.ReLU()
        self.linear3 = nn.Linear(in_features = 84,out_features = 10)
        
    def forward(self,X):
        X = self.conv1(X)
        X = self.bn1(X)
        X = self.relu1(X)
        
        X = self.mp(X)
        X = self.conv2(X)
        X = self.bn2(X)
        X =self.relu2(X)
        
        X = self.mp2(X)
        
        X = X.view(X.size(0),-1)
        
        X = self.linear1(X)
        X = self.relu11(X)
        
        X = self.linear2(X)
        X = self.relu22(X)
        
        X = self.linear3(X)

        return X
model = LeNet()
X =  torch.randn(2,1,32,32)
model(X)

'''
    现代写法:将一个卷积层、一个归一化层、一个激活函数打包成一层卷积使用
'''
# 封装卷积层
class ConvBlock(nn.Module):    
    def __init__(self,in_channels,out_channels,kernel_size=3,stride=1,padding=1):
        super().__init__()
        # 一个卷积层
        self.conv = nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=kernel_size,stride=stride,padding=padding)
        # 一个归一化层
        self.bn = nn.BatchNorm2d(num_features=out_channels)
        # 一个激活函数
        self.relu = nn.ReLU()

    def forward(self,x):
        x = self.conv(x)
        x =self.bn(x)
        x = self.relu(x)
        return x

# 封装LeNet,里面调用卷积层
class LeNet(nn.Module):
    def __init__(self):
        super().__init__()
        # 1.特征抽取部分
        self.feature_extractor = nn.Sequential(
            # 调用已封装的卷积层-1
            ConvBlock(in_channels=1,out_channels=6,kernel_size=5,stride=1,padding=0),
            # 池化层(也叫亚采样)
            nn.MaxPool2d(kernel_size=2,stride=2,padding=0),
            # 调用已封装的卷积层-2
            ConvBlock(in_channels=6,out_channels=16,kernel_size=5,stride=1,padding=0),
            # 池化层(也叫亚采样)
            nn.MaxPool2d(kernel_size=2,stride=2,padding=0)
        )
        # 2.分类
        self.classifier = nn.Sequential(
            # 展平层
            nn.Flatten(),
            # 全连接层(线性层)
            nn.Linear(in_features = 400,out_features = 120),
            # 激活函数
            nn.ReLU()
,           nn.Linear(in_features = 120,out_features = 84),
            nn.ReLU(),
            nn.Linear(in_features = 84,out_features = 10)        
        )
        
    def forward(self,X):
        # 1.提取特征
        X = self.feature_extractor(X)
        # 2.分类输出
        X = self.classifier(X)
        
        return X
model = LeNet()
X = torch.randn(2,1,32,32)
model(X).shape

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值