卷积神经网络(高级篇)

Inception

导入包库

import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms

# use gpu
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

MNIST数据集加载

# data
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])

tra_data = datasets.MNIST(root="./datasets/mnist", train=True, download=False, transform=transform)
test_data = datasets.MNIST(root="./datasets/mnist", train=False, download=False, transform=transform)

tra_loader = DataLoader(dataset=tra_data, batch_size=64, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=False)

Inception block

结构:
在这里插入图片描述

# Inception
class IceptionA(torch.nn.Module):
    def __init__(self, in_channels):
        super(IceptionA, self).__init__()

        self.branch_pool_pool = torch.nn.AvgPool2d(kernel_size=3, padding=1, stride=1)    # 平均池化
        self.branch_pool_conv = torch.nn.Conv2d(in_channels, 24, kernel_size=3, padding=1, stride=1)

        self.branch1x1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)

        self.branch5x5_1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch5x5_2 = torch.nn.Conv2d(16, 24, kernel_size=5, padding=2, stride=1)

        self.branch3x3_1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch3x3_2 = torch.nn.Conv2d(16, 24, kernel_size=3, padding=1)
        self.branch3x3_3 = torch.nn.Conv2d(24, 24, kernel_size=3, padding=1)

        self.relu = torch.nn.ReLU()
        self.maxpooling = torch.nn.MaxPool2d(2)

    def forward(self, x):

        branch_pool = self.branch_pool_pool(x)
        branch_pool = self.branch_pool_conv(branch_pool)

        branch1x1 = self.branch1x1(x)

        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)

        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)

        branches = [branch_pool, branch1x1, branch5x5, branch3x3]

        return torch.cat(branches, dim=1)

# # 查看Inception后输出的通道数,方便后面定义网络时的参数设置
# x = torch.randn(1, 1, 28, 28)
# model = IceptionA(1)
# print(model(x).size(1))

定义网络结构

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(88, 20, kernel_size=5)

        self.incep1 = IceptionA(10)
        self.incep2 = IceptionA(20)

        self.mp = torch.nn.MaxPool2d(2)
        self.relu = torch.nn.ReLU()

        self.linear = torch.nn.Linear(1408, 10)


    def forward(self, x):

        batch_size = x.size(0)    # 求batch size,方便后面flatten

        x = self.mp(self.relu(self.conv1(x)))
        x = self.incep1(x)
        x = self.mp(self.relu(self.conv2(x)))
        x = self.incep2(x)

        x = x.view(batch_size, -1)
        x = self.linear(x)

        return x

# # 查看view后输出的通道数,方便linear层的参数设置
# x = torch.randn(1, 1, 28, 28)
# model = Net()
# print(model(x).size(1))

model = Net()
model = model.to(device)

loss and optimizer

# loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

train函数

def train(epoch):
    running_loss = 0.0
    for i, data in enumerate(tra_loader):
        inputs, targets = data
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()

        # forward
        y_pred = model(inputs)
        l = criterion(y_pred, targets)

        # backward
        l.backward()

        # update
        optimizer.step()

        running_loss += l.item()
        # 300 Iterations 打印一次loss
        if i % 300 == 299:
            print("[%d %5d]\tloss: %3f" % (epoch+1, i+1, running_loss / 300))
            running_loss = 0.0
            

test函数

def test():
    total = 0
    correct = 0
    with torch.no_grad():
        for data in test_loader:
            x, labels = data
            x, labels = x.to(device), labels.to(device)

            outputs = model(x)  # predict

            total += labels.size(0)  # 总的样本数量
            _, predicted = torch.max(outputs.data, dim=1)   # 取每个样本的分类最大值的下标(即样本被预测为哪个类)
            correct += (predicted == labels).sum().item()   # 被预测正确的样本数量

    print("Accuracy on Test is %2f %% [%d %d]" % (100 * correct / total, correct, total))
    return 100 * correct / total

train、test和画图

if __name__ == '__main__':
    acc_list = []
    for epoch in range(10):
        train(epoch)
        acc = test()
        acc_list.append(acc)

    # 画图
    acc_list = np.array(acc_list)
    plt.plot(range(10), acc_list)
    plt.xlabel('epoch')
    plt.ylabel('Accuracy')
    plt.show()
    plt.close()

Residual Net

导入包库

import torch
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

MNIST数据集加载

# data
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

tra_data = datasets.MNIST(root='./datasets/mnist', transform=transform, train=True, download=False)
test_data = datasets.MNIST(root='./datasets/mnist', transform=transform, train=False, download=False)

tra_loader = DataLoader(dataset=tra_data, batch_size=64, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=False)

residual block

结构:
在这里插入图片描述

# residual block
class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
        self.relu = nn.ReLU()

        self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)

    def forward(self, x):
        y = self.relu(self.conv1(x))
        y = self.conv2(x)
        return self.relu(x + y)   # 对(x + y)使用激活函数

定义网络结构

# Net
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=5)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=5)
        self.mp = nn.MaxPool2d(2)
        self.relu = nn.ReLU()

        self.rblock1 = ResidualBlock(16)
        self.rblock2 = ResidualBlock(32)

        self.linear = nn.Linear(512, 10)


    def forward(self, x):
        batch_size = x.size(0)

        x = self.mp(self.relu(self.conv1(x)))
        x = self.rblock1(x)
        x = self.mp(self.relu(self.conv2(x)))
        x = self.rblock2(x)

        x = x.view(batch_size, -1)
        x = self.linear(x)
        return x

# # 查看view后输出的通道数,方便linear层的参数设置
# x = torch.randn(1, 1, 28, 28)
# model = Net()
# print(model(x).size(1))

model = Net()
model = model.to(device)

loss and optimizer

# loss and optimizer
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)

train函数

def train(epoch):
    running_loss = 0.0
    for i, data in enumerate(tra_loader):
        inputs, targets = data
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()

        y_pred = model(inputs)
        l = criterion(y_pred, targets)

        l.backward()

        optimizer.step()

        running_loss += l.item()
        if i % 300 == 299:
            print('[%d %5d]\tloss: %3f' % (epoch+1, i+1, running_loss / 300))
            running_loss = 0.0

test函数

def test():
    total = 0
    correct = 0
    for data in test_loader:
        x, labels = data
        x, labels = x.to(device), labels.to(device)

        outputs = model(x)   # predict

        total += labels.size(0)  # 总的样本数量
        _, predicted = torch.max(outputs.data, dim=1)   # 取每个样本的分类最大值的下标(即被预测为哪个类)
        correct += (predicted == labels).sum().item()   # 被预测正确的样本数量

    print('Accuracy on Test is %2f %% [%d %d]' % (100 * correct / total, correct, total))
    return 100 * correct / total

train、test和画图

if __name__ == '__main__':
    acc_list = []
    for epoch in range(10):
        train(epoch)
        acc = test()
        acc_list.append(acc)

    # 画图
    acc_list = np.array(acc_list)
    plt.plot(range(10), acc_list)
    plt.xlabel('epoch')
    plt.ylabel('Accuracy')
    plt.show()
    plt.close()

  • 12
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值