PyTorch入门七 || 卷积神经网络(提高)

基本的CNN 串行网络结构

image-20220206133525924

更为复杂的 CNN 网络结构-- GoogleNet

image-20220206133602626

如何实现?

首先找到相同的网络结构,将其封装为一个类。

image-20220206133843408

上述红圈部分的模型称之为 Inception Module 其结构如下

image-20220206134128198

​ 其思想是:我们不知道卷积核的大小究竟为3 * 3还是5 * 5 更好,索性都用上,然后在梯度更新中,比较好的卷积核尺寸会被给予更高的权重;

​ 这四个路径出来的张量 w 和 h 必须保持一致,通道可以不同

​ 1 * 1 的卷积核可以改变 输入的通道数,并且减少运算量

concatenate 表示张量拼接

image-20220206134545625

image-20220206141036742

实现块

class InceptionA(torch.nn.Module):
    def __init__(self,in_channels):
        super(InceptionA, self).__init__()
        
        self.branch1x1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        
        self.branch5x5_1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        self.branch5x5_2 = torch.nn.Conv2d(16,24,kernel_size=5,padding=2)
        
        self.branch3x3_1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        self.branch3x3_2 = torch.nn.Conv2d(16,24,kernel_size=3,padding=1)
        self.branch3x3_3 = torch.nn.Conv2d(24,24,kernel_size=3,padding=1)
        
        self.branch_pool = torch.nn.Conv2d(in_channels,24,kernel_size=1)
    
    def forward(self,x):
        branch1x1 = self.branch1x1(x)
        
        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)
        
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)
        
        branch_pool = F.avg_pool2d(x,kernel_size = 3,stride = 1,padding=1)
        branch_pool = self.branch_pool(branch_pool)
        
        outputs = [branch1x1,branch5x5,branch3x3,branch_pool]
        # 张量为 (b,c,w,h) ,下标从0开始,故dim=1 按照通道维度来拼接
        return torch.cat(outputs,dim=1)

所有代码

import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch
import matplotlib.pyplot as plt

transform = transforms.Compose([
    transforms.ToTensor(),  #将图像转为tensor向量即每一行叠加起来,会丧失空间结构,且取值为0-1
    transforms.Normalize((0.1307,),(0.3081,))   #第一个是均值,第二个是标准差,需要提前算出,这两个参数都是mnist的
])

batch_size = 64

train_dataset = datasets.MNIST(root='../dataset/mnist',
                               train = True,
                               download=False,
                               transform=transform
                               )

train_loader = DataLoader(train_dataset,
                          shuffle=True,
                          batch_size=batch_size)

test_dataset = datasets.MNIST(root='../dataset/mnist',
                              train = False,
                              download=False,
                              transform=transform
                              )

test_loader = DataLoader(test_dataset,
                         shuffle=False,
                         batch_size=batch_size)

class InceptionA(torch.nn.Module):
    #经过InceptionA ,数据通道变为 88 ,输入的宽高与输出的宽高保持一致
    def __init__(self,in_channels):
        super(InceptionA, self).__init__()

        self.branch1x1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)

        self.branch5x5_1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        self.branch5x5_2 = torch.nn.Conv2d(16,24,kernel_size=5,padding=2)

        self.branch3x3_1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        self.branch3x3_2 = torch.nn.Conv2d(16,24,kernel_size=3,padding=1)
        self.branch3x3_3 = torch.nn.Conv2d(24,24,kernel_size=3,padding=1)

        self.branch_pool = torch.nn.Conv2d(in_channels,24,kernel_size=1)

    def forward(self,x):
        branch1x1 = self.branch1x1(x)

        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)

        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)

        branch_pool = F.avg_pool2d(x,kernel_size = 3,stride = 1,padding=1)
        branch_pool = self.branch_pool(branch_pool)

        outputs = [branch1x1,branch5x5,branch3x3,branch_pool]
        # 张量为 (b,c,w,h) ,下标从0开始,故dim=1 按照通道维度来拼接
        return torch.cat(outputs,dim=1)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)
        self.conv2 = torch.nn.Conv2d(88,20,kernel_size=5)

        self.incep1 = InceptionA(in_channels=10)
        self.incep2 = InceptionA(in_channels=20)

        self.mp = torch.nn.MaxPool2d(2)
        self.fc = torch.nn.Linear(1408,10)

    def forward(self,x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(in_size,-1)
        x = self.fc(x)
        return x

model = Net()

criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5)

#使用 GPU 加速
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)

def train(epoch):
    running_loss = 0.0
    # batch_idx 的范围是从 0-937 共938个 因为 batch为64,共60000个数据,所以输入矩阵为 (64*N)
    for batch_idx,data in enumerate(train_loader,0):
        x ,y = data
        x,y = x.to(device),y.to(device) #装入GPU

        optimizer.zero_grad()
        y_pred = model(x)
        loss = criterion(y_pred,y)  #计算交叉熵损失
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

        if batch_idx%300 == 299:
            print("[%d,%5d] loss:%.3f"%(epoch+1,batch_idx+1,running_loss/300))
            running_loss = 0.0

def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            x,y = data
            x,y = x.to(device),y.to(device)
            y_pred = model(x)
            _,predicted = torch.max(y_pred.data,dim=1)
            total += y.size(0)
            correct += (predicted==y).sum().item()
    print('accuracy on test set:%d%% [%d/%d]'%(100*correct/total,correct,total))
    accuracy_list.append(100*correct/total)

if __name__ == '__main__':
    accuracy_list = []
    for epoch in range(10):
        train(epoch)
        test()
    plt.plot(np.linspace(1,10,10),accuracy_list)
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.show()

梯度消失

网络变得复杂但是性能却降低了。原因:假如更新到后面,梯度都小于1,那么根据链式法则,把这些梯度小于1的值乘起来会趋于0,w=w-ag,权重就得不到更新。

如何解决?

增加一个跳连接——残差网络

image-20220207103041544

image-20220207103424983

F(x) + x , 表示张量相加,通道,宽度,高度都得一样

#残差网络
class ResidualBlock(torch.nn.Module):
    def __init__(self,channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        self.conv1 = torch.nn.Conv2d(channels,channels,kernel_size=3,padding=1)
        self.conv2 = torch.nn.Conv2d(channels,channels,kernel_size=3,padding=1)
    
    def forward(self,x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        y = F.relu(x+y)
        return y
网络结构
class Net2(torch.nn.Module):
    def __init__(self):
        super(Net2, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,16,kernel_size=5)
        self.conv2 = torch.nn.Conv2d(16,32,kernel_size=5)
        self.mp = torch.nn.MaxPool2d(kernel_size=2)

        self.rblock1 = ResidualBlock(16)
        self.rblock2 = ResidualBlock(32)

        self.fc = torch.nn.Linear(512,10)

    def forward(self,x):
        in_size = x.size(0)
        x = self.mp(F.relu(self.conv1(x)))
        x = self.rblock1(x)
        x = self.mp(F.relu(self.conv2(x)))
        x = self.rblock2(x)
        x = x.view(in_size,-1)
        x = self.fc(x)
        return x
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
回答: Python中的卷积神经网络(Convolutional Neural Networks, CNN)是一种特别适用于处理图像数据的深度学习模型。在Python中,我们可以使用流行的深度学习库TensorFlow和Keras来创建和训练一个CNN模型。CNN模型的构建涉及到一些基础的Python知识,比如类和对象的使用。在构建网络时,我们可以使用Python中的类来定义网络的结构和功能。通过定义一个类,我们可以使用类中的方法来构建一个简单的卷积神经网络。在这个类中,我们可以使用__init__方法来初始化网络的参数,使用forward方法来定义网络的前向传播过程。\[1\]在Pytorch中,forward()函数被嵌套在__call__()函数中,因此在调用网络时,直接初始化网络会执行forward()函数中的内容。\[2\]通过使用Python和相关的深度学习库,我们可以创建一个简单的CNN模型,并使用它对图像进行分类。\[3\] #### 引用[.reference_title] - *1* *2* [卷积神经网络构建的python基础-详细理解(Pytorch)](https://blog.csdn.net/hjkdh/article/details/124208268)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [Python中的卷积神经网络CNN入门](https://blog.csdn.net/u012409436/article/details/130798185)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值