刘二PyTorch深度学习(十)——CNN进阶

卷积神经网络(CNN)高级——GoogLeNet

  1. 超参数:卷积核的大小就是一个超参数

  2. 信息融合:举个例子,就是没门科目的分数*权重1,然后再Σ科目*权重1,即总分,这就是信息融合;说白了就是多个Channel的卷积加起来最后的那个值,就是信息融合

3. 1*1卷积核:最主要的作用是改变通道数,从而减少运算数量(以下图为例:输入的Channel如果是3的话,那么1*1卷积核的Channel也得是3,但是最后输出的Channel却是1,因为要将计算的三个值相加,才能得到右边“x”的值)

b21501b9dee1774d9828b9bcb52387f3.png

  1. 计算卷积运算的次数:卷积核宽度的平方*输入图片的宽*输入图片的高*输入通道数*输出通道数

  2. 输出图像的通道数=卷积核的个数

  1. Inception块:上图就是一个Inception块,相当于把一个神经网络中所有重复的模块统一封装成一个类,减少代码冗余

执行过程:

第一步:经过卷积层(输入1个通道,输出10个通道,卷积块是5*5),卷积层分三步:卷积-池化-激活(x = F.relu(self.mp(self.conv1(x)))

第二步:经过Inception块(输入是10个通道,输出是88)

第三步:在经过卷积层(输入88通道,输出20通道,卷积块5*5)

第四步:经过Inception块(输入20,输出88)

第五步:池化(卷积块是2*2)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.incep1 = inceptionA(inchannels=10)
        self.conv2 = torch.nn.Conv2d(88, 20, kernel_size=5)
        self.incep2 = inceptionA(inchannels=20)

        self.mp = torch.nn.MaxPool2d(kernel_size=2)

        self.fc=torch.nn.Linear(1408,10)

上面全连接中的1408是通过自动计算得来的,方法如下:

首先,定义forward方法,先不调用全连接层,当调用完x.view方法后,直接调用x.shape就可以自动计算出1408

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(in_size, -1)
        count = x.shape# 用来计算全连接中的1408
        # x = self.fc(x)

        return x

然后,就是在下面先调用下train方法,就可以自动计算了

代码

import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

batch_size = 64

transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])

train_datasets = datasets.MNIST(root='./data/mnist/', train=True, download=False, transform=transform)
train_dataloader = DataLoader(dataset=train_datasets, shuffle=True, batch_size=batch_size)

test_datasets = datasets.MNIST(root='./data/mnist/', train=False, download=False, transform=transform)
test_dataloader = DataLoader(dataset=test_datasets, shuffle=False, batch_size=batch_size)


class inceptionA(torch.nn.Module):
    def __init__(self, inchannels):
        super(inceptionA, self).__init__()
        self.branch1x1 = torch.nn.Conv2d(inchannels, 16, kernel_size=1)

        self.branch5x5_1 = torch.nn.Conv2d(inchannels, 16, kernel_size=1)
        self.branch5x5_2 = torch.nn.Conv2d(16, 24, kernel_size=5, padding=2)

        self.branch3x3_1 = torch.nn.Conv2d(inchannels, 16, kernel_size=1)
        self.branch3x3_2 = torch.nn.Conv2d(16, 24, kernel_size=3, padding=1)
        self.branch3x3_3 = torch.nn.Conv2d(24, 24, kernel_size=3, padding=1)

        self.branch_pool = torch.nn.Conv2d(inchannels, 24, kernel_size=1)

    def forward(self, x):
        branch1x1 = self.branch1x1(x)

        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)

        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)

        branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)

        out_puts = [branch1x1, branch5x5, branch3x3, branch_pool]

        return torch.cat(out_puts, dim=1)  # Concatenate


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.incep1 = inceptionA(inchannels=10)
        self.conv2 = torch.nn.Conv2d(88, 20, kernel_size=5)
        self.incep2 = inceptionA(inchannels=20)

        self.mp = torch.nn.MaxPool2d(kernel_size=2)

        self.fc = torch.nn.Linear(1408, 10)

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(in_size, -1)
        # count = x.shape# 用来计算全连接中的1408
        x = self.fc(x)

        return x


model = Net()

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.05)


def train(epoch):
    running_loss = 0.0
    for batch_index, data in enumerate(train_dataloader, 0):
        inputs, target = data
        outputs = model(inputs)
        loss = criterion(outputs, target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        if batch_index % 300 == 299:
            print('[%d,%5d] loss: %.3f' % (epoch + 1, batch_index + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_dataloader:
            images, labels = data
            outputs = model(images)
            # 求出每一行最大值的下标
            _, predicted = torch.max(outputs.data, dim=1)  # dim=0表示行(矩阵从上到下),dim=1表示列(矩阵从左到右)
            total += labels.size(0)  # labels是一个(N,1)的元组
            correct += (predicted == labels).sum().item()

    print('Accuracy on testset %d %%' % (100 * correct / total))
    return correct / total


if __name__ == '__main__':
    loss_list = []
    epoch_list = []
    for epoch in range(10):
        train(epoch)
        loss = test()
        epoch_list.append(epoch)
        loss_list.append(loss)
plt.plot(epoch_list, loss_list)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()

Residual Net——ResNet(残差网络)的作用:防止梯度消失(Residual也叫跳连接)

梯度消失的原因:如果将某个大小固定的卷积核进行反复迭代,理想状态下是希望训练状态越来越好,但以CIFAR-10的训练过程为例,在第20次的训练效果达到了最高,第56次的训练结果还不如20次的,所以,并不是迭代次数越多,效果越好,这其中可能的原因就是梯度消失。

下图左边的

  1. Residual Block的实现方式:(首先要保证RB中卷积输出图像的大小不变)

注意:上图中的return那里,是先求和,再激活,这和第一个relu那里是不一样的

import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import matplotlib.pyplot as plt

batch_size = 64

transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])

train_datasets = datasets.MNIST(root='./data/mnist/', train=True, download=False, transform=transform)
train_dataloader = DataLoader(dataset=train_datasets, shuffle=True, batch_size=batch_size)

test_datasets = datasets.MNIST(root='./data/mnist/', train=False, download=False, transform=transform)
test_dataloader = DataLoader(dataset=test_datasets, shuffle=False, batch_size=batch_size)


# 定义Residual块
class ResidualBlock(torch.nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        # 在定义Res模块时,输入和输出的通道要是一样的,而且既然要保证输入输出通道一致,那么就要加padding了
        self.conv1 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)
        self.conv2 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)

    def forward(self, x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        return F.relu(x + y)


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
        self.resblock1 = ResidualBlock(channels=16)
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)
        self.resblock2 = ResidualBlock(channels=32)

        self.mp = torch.nn.MaxPool2d(2)

        self.fc = torch.nn.Linear(512, 10)

    def forward(self, x):
        in_size = x.size(0)
        x = self.mp(F.relu(self.conv1(x)))
        x = self.resblock1(x)

        x = self.mp(F.relu(self.conv2(x)))
        x = self.resblock2(x)

        x = x.view(in_size, -1)
        x = self.fc(x)
        return x


model = Net()

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.05)


def train(epoch):
    running_loss = 0.0
    for batch_index, data in enumerate(train_dataloader, 0):
        inputs, target = data
        outputs = model(inputs)
        loss = criterion(outputs, target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        if batch_index % 300 == 299:
            print('[%d,%5d] loss: %.3f' % (epoch + 1, batch_index + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_dataloader:
            images, labels = data
            outputs = model(images)
            # 求出每一行最大值的下标
            _, predicted = torch.max(outputs.data, dim=1)  # dim=0表示行(矩阵从上到下),dim=1表示列(矩阵从左到右)
            total += labels.size(0)  # labels是一个(N,1)的元组
            correct += (predicted == labels).sum().item()

    print('Accuracy on testset %d %%' % (100 * correct / total))
    return correct / total


if __name__ == '__main__':
    loss_list = []
    epoch_list = []
    for epoch in range(10):
        train(epoch)
        loss = test()
        epoch_list.append(epoch)
        loss_list.append(loss)
    plt.plot(epoch_list, loss_list)
    plt.xlabel('epoch')
    plt.ylabel('Accuracy')
    plt.show()

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值