2 卷积神经网络CNN

本章代码均在kaggle上运行成功

LeNet-5

import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from matplotlib_inline import backend_inline

backend_inline.set_matplotlib_formats('svg')
%matplotlib inline
# 准备数据
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(0.1307, 0.3081)])

train_dataset = datasets.MNIST(root='/kaggle/working/',
                               train=True,
                               transform=transform,
                               download=True)
test_dataset = datasets.MNIST(root='/kaggle/working/',
                              train=False,
                              transform=transform,
                              download=True)

# 批次加载器
train_loader = DataLoader(dataset=train_dataset, batch_size=256, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=256, shuffle=False)
# 构建网络
class CNN(nn.Module):

    def __init__(self):
        super(CNN, self).__init__()
        self.net = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, padding=2),
            nn.Tanh(), nn.AvgPool2d(kernel_size=2, stride=2),
            nn.Conv2d(6, 16, kernel_size=5), nn.Tanh(),
            nn.AvgPool2d(kernel_size=2, stride=2),
            nn.Conv2d(16, 120, kernel_size=5), nn.Tanh(), nn.Flatten(),
            nn.Linear(120, 84), nn.Tanh(), nn.Linear(84, 10))

    def forward(self, x):
        return self.net(x)
# 查看网络结构
X = torch.rand(size=(1, 1, 28, 28))
for layer in CNN().net:
    X = layer(X)
    print(layer.__class__.__name__, 'output shape: \t', X.shape)
Conv2d output shape: 	 torch.Size([1, 6, 28, 28])
Tanh output shape: 	 torch.Size([1, 6, 28, 28])
AvgPool2d output shape: 	 torch.Size([1, 6, 14, 14])
Conv2d output shape: 	 torch.Size([1, 16, 10, 10])
Tanh output shape: 	 torch.Size([1, 16, 10, 10])
AvgPool2d output shape: 	 torch.Size([1, 16, 5, 5])
Conv2d output shape: 	 torch.Size([1, 120, 1, 1])
Tanh output shape: 	 torch.Size([1, 120, 1, 1])
Flatten output shape: 	 torch.Size([1, 120])
Linear output shape: 	 torch.Size([1, 84])
Tanh output shape: 	 torch.Size([1, 84])
Linear output shape: 	 torch.Size([1, 10])
# 判断是否有GPU加速
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 定义模型
model = CNN()

# 使用DataParallel将模型并行到多个GPU
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)

# 将模型放到device上
model.to(device)

# 定义损失函数和优化器
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.9)

# 训练参数
epochs = 5
losses = []

# 训练过程
for epoch in range(epochs):
    for (x, y) in train_loader:
        x, y = x.to(device), y.to(device)
        Pred = model(x)
        loss = loss_fn(Pred, y)
        losses.append(loss.item())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

# 绘制损失曲线
plt.plot(range(len(losses)), losses)
plt.show()
Let's use 2 GPUs!

在这里插入图片描述

# 测试网络
correct = 0
total = 0

with torch.no_grad():
    for (x, y) in test_loader:
        x, y = x.to('cuda:0'), y.to('cuda:0')
        Pred = model(x)
        _, predicted = torch.max(Pred.data, dim=1)
        correct += torch.sum((predicted == y))
        total += y.size(0)

print(f'测试集准确率:{100*correct/total}%') # 比上一节深度学习网络预测效果好
测试集准确率:98.5199966430664%

AlexNet

只是网络结构稍作改变而已。这里运行较久,建议用GPU。

import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from matplotlib_inline import backend_inline

backend_inline.set_matplotlib_formats('svg')
%matplotlib inline

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Resize(224),
    transforms.Normalize(0.1307, 0.3081)
])
# 下载训练集与测试集
train_Data = datasets.FashionMNIST(root='/kaggle/working/',
                                   train=True,
                                   download=True,
                                   transform=transform)
test_Data = datasets.FashionMNIST(root='/kaggle/working/',
                                  train=False,
                                  download=True,
                                  transform=transform)

# 批次加载器
train_loader = DataLoader(train_Data, shuffle=True, batch_size=128)
test_loader = DataLoader(test_Data, shuffle=False, batch_size=128)
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to /kaggle/working/FashionMNIST/raw/train-images-idx3-ubyte.gz


100%|██████████| 26421880/26421880 [00:05<00:00, 5088751.35it/s] 


Extracting /kaggle/working/FashionMNIST/raw/train-images-idx3-ubyte.gz to /kaggle/working/FashionMNIST/raw

Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz to /kaggle/working/FashionMNIST/raw/train-labels-idx1-ubyte.gz


100%|██████████| 29515/29515 [00:00<00:00, 267832.55it/s]


Extracting /kaggle/working/FashionMNIST/raw/train-labels-idx1-ubyte.gz to /kaggle/working/FashionMNIST/raw

Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz to /kaggle/working/FashionMNIST/raw/t10k-images-idx3-ubyte.gz


100%|██████████| 4422102/4422102 [00:00<00:00, 5078567.66it/s]


Extracting /kaggle/working/FashionMNIST/raw/t10k-images-idx3-ubyte.gz to /kaggle/working/FashionMNIST/raw

Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz to /kaggle/working/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz


100%|██████████| 5148/5148 [00:00<00:00, 24962169.93it/s]

Extracting /kaggle/working/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz to /kaggle/working/FashionMNIST/raw
# 构建网络
class CNN(nn.Module):

    def __init__(self):
        super(CNN, self).__init__()
        self.net = nn.Sequential(
            nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),
            nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),
            nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(),
            nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),
            nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
            nn.Linear(4096, 10))

    def forward(self, x):
        y = self.net(x)
        return y


# 判断是否有GPU加速
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 定义模型
model = CNN()
# 使用DataParallel将模型并行到多个GPU
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)
# 将模型放到device上
model.to(device)

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)


# 训练函数
def train(epoch):
    running_loss = 0.0
    for batch_idx, (x, y) in enumerate(train_loader, 0):
        x, y = x.to(device), y.to(device)
        optimizer.zero_grad()
        Pred = model(x)
        loss = loss_fn(Pred, y)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:  # 每300个batch打印一次平均loss
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


# 测试函数
def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data,
                                     dim=1)  # 返回每一行中最大值的那个元素,以及其索引
            total += labels.size(0)
            correct += torch.sum((predicted == labels)).item()  # 统计预测正确的样本个数
    accuracy = 100 * correct / total
    accuracy_list.append(accuracy)
    print('Accuracy on test set: %d %%' % accuracy)


if __name__ == '__main__':
    accuracy_list = []
    # 每完成一次epoch就测试一遍
    for epoch in range(10):
        train(epoch)
        test()
    plt.plot(accuracy_list)
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.grid()
    plt.show()
Let's use 2 GPUs!
[1,   300] loss: 1.432
Accuracy on test set: 78 %
[2,   300] loss: 0.465
Accuracy on test set: 85 %
[3,   300] loss: 0.352
Accuracy on test set: 86 %
[4,   300] loss: 0.306
Accuracy on test set: 87 %
[5,   300] loss: 0.275
Accuracy on test set: 89 %
[6,   300] loss: 0.248
Accuracy on test set: 88 %
[7,   300] loss: 0.227
Accuracy on test set: 89 %
[8,   300] loss: 0.212
Accuracy on test set: 90 %
[9,   300] loss: 0.201
Accuracy on test set: 90 %
[10,   300] loss: 0.186
Accuracy on test set: 90 %

在这里插入图片描述

GoogLeNet

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt
%matplotlib inline

# 展示高清图
from matplotlib_inline import backend_inline
backend_inline.set_matplotlib_formats('svg')
# 准备数据集
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(0.1307, 0.3081)])
train_Data = datasets.FashionMNIST(root='/kaggle/working/',
                                   train=True,
                                   download=False,
                                   transform=transform)
test_Data = datasets.FashionMNIST(root='/kaggle/working/',
                                  train=False,
                                  download=False,
                                  transform=transform)

# 批次加载器
train_loader = DataLoader(train_Data, shuffle=True, batch_size=128)
test_loader = DataLoader(test_Data, shuffle=False, batch_size=128)

# Inception 块
class Inception(nn.Module):
    def __init__(self, in_channels):
        super(Inception, self).__init__()
        self.branch1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch2 = nn.Sequential(nn.Conv2d(in_channels, 16, kernel_size=1),
                                     nn.Conv2d(16, 24, kernel_size=3, padding=1),
                                     nn.Conv2d(24, 24, kernel_size=3, padding=1))
        self.branch3 = nn.Sequential(nn.Conv2d(in_channels, 16, kernel_size=1),
                                     nn.Conv2d(16, 24, kernel_size=5, padding=2))
        self.branch4 = nn.Conv2d(in_channels, 24, kernel_size=1)
        
    def forward(self, x):
        branch1 = self.branch1(x)
        branch2 = self.branch2(x)
        branch3 = self.branch3(x)
        branch4 = self.branch4(x)
        outputs = [branch1, branch2, branch3, branch4]
        return torch.cat(outputs, 1)
    
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.net = nn.Sequential(nn.Conv2d(1, 10, kernel_size=5), nn.ReLU(),
                                 nn.MaxPool2d(kernel_size=2, stride=2),
                                 Inception(in_channels=10),nn.Conv2d(88, 20, kernel_size=5), nn.ReLU(),
                                 nn.MaxPool2d(kernel_size=2, stride=2),Inception(in_channels=20),
                                 nn.Flatten(),nn.Linear(1408, 10))
    def forward(self, x):
        y = self.net(x)
        return y
    
# 查看网络结构
X = torch.rand(size= (1, 1, 28, 28))
for layer in CNN().net:
    X = layer(X)
    print( layer.__class__.__name__, 'output shape: \t', X.shape )
Conv2d output shape: 	 torch.Size([1, 10, 24, 24])
ReLU output shape: 	 torch.Size([1, 10, 24, 24])
MaxPool2d output shape: 	 torch.Size([1, 10, 12, 12])
Inception output shape: 	 torch.Size([1, 88, 12, 12])
Conv2d output shape: 	 torch.Size([1, 20, 8, 8])
ReLU output shape: 	 torch.Size([1, 20, 8, 8])
MaxPool2d output shape: 	 torch.Size([1, 20, 4, 4])
Inception output shape: 	 torch.Size([1, 88, 4, 4])
Flatten output shape: 	 torch.Size([1, 1408])
Linear output shape: 	 torch.Size([1, 10])
# 判断是否有GPU加速
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 定义模型
model = CNN()
# 使用DataParallel将模型并行到多个GPU
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)
# 将模型放到device上
model.to(device)

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


# 训练函数
def train(epoch):
    running_loss = 0.0
    for batch_idx, (x, y) in enumerate(train_loader, 0):
        x, y = x.to(device), y.to(device)
        optimizer.zero_grad()
        Pred = model(x)
        loss = loss_fn(Pred, y)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:  # 每300个batch打印一次平均loss
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


# 测试函数
def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data,
                                     dim=1)  # 返回每一行中最大值的那个元素,以及其索引
            total += labels.size(0)
            correct += torch.sum((predicted == labels)).item()  # 统计预测正确的样本个数
    accuracy = 100 * correct / total
    accuracy_list.append(accuracy)
    print('Accuracy on test set: %.3f %%' % accuracy)


if __name__ == '__main__':
    accuracy_list = []
    # 每完成一次epoch就测试一遍
    for epoch in range(10):
        train(epoch)
        test()
    plt.plot(accuracy_list)
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.grid()
    plt.show()
Let's use 2 GPUs!
[1,   300] loss: 0.937
Accuracy on test set: 79.810 %
[2,   300] loss: 0.520
Accuracy on test set: 82.550 %
[3,   300] loss: 0.437
Accuracy on test set: 85.050 %
[4,   300] loss: 0.399
Accuracy on test set: 85.750 %
[5,   300] loss: 0.371
Accuracy on test set: 86.450 %
[6,   300] loss: 0.350
Accuracy on test set: 87.140 %
[7,   300] loss: 0.337
Accuracy on test set: 87.560 %
[8,   300] loss: 0.323
Accuracy on test set: 87.580 %
[9,   300] loss: 0.312
Accuracy on test set: 87.650 %
[10,   300] loss: 0.305
Accuracy on test set: 88.240 %

在这里插入图片描述

ResNet

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt
%matplotlib inline

# 展示高清图
from matplotlib_inline import backend_inline
backend_inline.set_matplotlib_formats('svg')
# 准备数据集
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(0.1307, 0.3081)])
train_Data = datasets.FashionMNIST(root='/kaggle/working/',
                                   train=True,
                                   download=False,
                                   transform=transform)
test_Data = datasets.FashionMNIST(root='/kaggle/working/',
                                  train=False,
                                  download=False,
                                  transform=transform)

# 批次加载器
train_loader = DataLoader(train_Data, shuffle=True, batch_size=128)
test_loader = DataLoader(test_Data, shuffle=False, batch_size=128)

# 残差块
class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.net = nn.Sequential(
        nn.Conv2d(channels, channels, kernel_size=3, padding=1),
        nn.ReLU(),
        nn.Conv2d(channels, channels, kernel_size=3, padding=1),
        )
    def forward(self, x):
        y = self.net(x)
        return nn.functional.relu(x+y) # 这里是关键
    
class CNN(nn.Module):
    
    def __init__(self):
        super(CNN, self).__init__()
        self.net = nn.Sequential(
        nn.Conv2d(1, 16, kernel_size=5), nn.ReLU(),
        nn.MaxPool2d(2), ResidualBlock(16),
        nn.Conv2d(16, 32, kernel_size=5), nn.ReLU(),
        nn.MaxPool2d(2), ResidualBlock(32),
        nn.Flatten(),
        nn.Linear(512, 10)
        )
        
    def forward(self, x):
        y = self.net(x)
        return y
    
# 查看网络结构
X = torch.rand(size= (1, 1, 28, 28))
for layer in CNN().net:
    X = layer(X)
    print( layer.__class__.__name__, 'output shape: \t', X.shape )
Conv2d output shape: 	 torch.Size([1, 16, 24, 24])
ReLU output shape: 	 torch.Size([1, 16, 24, 24])
MaxPool2d output shape: 	 torch.Size([1, 16, 12, 12])
ResidualBlock output shape: 	 torch.Size([1, 16, 12, 12])
Conv2d output shape: 	 torch.Size([1, 32, 8, 8])
ReLU output shape: 	 torch.Size([1, 32, 8, 8])
MaxPool2d output shape: 	 torch.Size([1, 32, 4, 4])
ResidualBlock output shape: 	 torch.Size([1, 32, 4, 4])
Flatten output shape: 	 torch.Size([1, 512])
Linear output shape: 	 torch.Size([1, 10])
# 判断是否有GPU加速
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 定义模型
model = CNN()
# 使用DataParallel将模型并行到多个GPU
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)
# 将模型放到device上
model.to(device)

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.5)


# 训练函数
def train(epoch):
    running_loss = 0.0
    for batch_idx, (x, y) in enumerate(train_loader, 0):
        x, y = x.to(device), y.to(device)
        optimizer.zero_grad()
        Pred = model(x)
        loss = loss_fn(Pred, y)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:  # 每300个batch打印一次平均loss
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


# 测试函数
def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data,
                                     dim=1)  # 返回每一行中最大值的那个元素,以及其索引
            total += labels.size(0)
            correct += torch.sum((predicted == labels)).item()  # 统计预测正确的样本个数
    accuracy = 100 * correct / total
    accuracy_list.append(accuracy)
    print('Accuracy on test set: %.3f %%' % accuracy)


if __name__ == '__main__':
    accuracy_list = []
    # 每完成一次epoch就测试一遍
    for epoch in range(10):
        train(epoch)
        test()
    plt.plot(accuracy_list)
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.grid()
    plt.show()
Let's use 2 GPUs!
[1,   300] loss: 0.595
Accuracy on test set: 84.480 %
[2,   300] loss: 0.338
Accuracy on test set: 87.170 %
[3,   300] loss: 0.288
Accuracy on test set: 88.620 %
[4,   300] loss: 0.264
Accuracy on test set: 88.880 %
[5,   300] loss: 0.245
Accuracy on test set: 89.740 %
[6,   300] loss: 0.233
Accuracy on test set: 90.020 %
[7,   300] loss: 0.217
Accuracy on test set: 89.720 %
[8,   300] loss: 0.206
Accuracy on test set: 89.990 %
[9,   300] loss: 0.194
Accuracy on test set: 90.500 %
[10,   300] loss: 0.188
Accuracy on test set: 89.310 %

在这里插入图片描述

其他相关文章:
《PyTorch 深度学习实践》第10讲 卷积神经网络(基础篇)
《PyTorch 深度学习实践》第11讲 卷积神经网络(高级篇)
7.6 残差网络(ResNet)——动手学深度学习
Deep Residual Learning for Image Recognition

  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值