PyTorch深度学习实践——11 卷积神经网络(高级篇2 ResNet)

UP:B站-刘二大人

原视频链接:11.卷积神经网络(高级篇)_哔哩哔哩_bilibili

'''
Resnet残差网络(本代码不是标准的Resnet结构,只是跟随视频搭建的简化网络)
'''

import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

batch_size = 64
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ] )

train_dataset = datasets.MNIST(root="../dataset/mnist", train=True, download=True, transform=transform)
train_loader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root="../dataset/mnist", train=False, transform=transform)
test_loader = DataLoader(dataset=test_dataset, shuffle=False, batch_size=batch_size)

# 定义一个残差模块类
class ResidualBlock(torch.nn.Module): # 残差模块输出的是F(x)+x,需要保证输入的张量维度和输出的张量维度完全一样,即经过两层卷积之后的F(x)和x的维度要完全一样(通道,宽度,高度)
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        self.conv1 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)
        self.conv2 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)
        self.active = torch .nn.ReLU()

    def forward(self, x):
        y = self.active(self.conv1(x))
        y = self.conv2(y)
        return self.active(y+x)

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)

        self.active = torch.nn.ReLU()

        self.maxpool = torch.nn.MaxPool2d(2)

        self.resblock1 = ResidualBlock(16)
        self.resblock2 = ResidualBlock(32)

        self.fc = torch.nn.Linear(512, 10)

    def forward(self, x):
        x = self.maxpool(self.active(self.conv1(x)))
        x = self.resblock1(x)
        x = self.maxpool(self.active(self.conv2(x)))
        x = self.resblock2(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x

model = Model()

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

loss_list = []
def train(epoch):
    loss_sum = 0.0
    for i, (inputs, labels) in enumerate(train_loader):
        outputs = model(inputs)
        loss = criterion(outputs, labels)

        loss_sum += loss.item()
        loss_list.append(loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % 300 ==299:
            print("[%d %5d] loss:%.3f" % (epoch+1, i+1, loss_sum/i))

accuracy = []
def test():
    correct = 0
    total = 0
    for (images, targets) in test_loader:
        y_pred = model(images)
        _,predicted = torch.max(y_pred.data, dim=1)
        total += targets.size(0)
        correct += (predicted == targets).sum().item()
    print("Accuracy on test data:%.3f %% [%d %d]" % (100*correct/total, correct, total))
    accuracy.append(100*correct/total)

if __name__=="__main__":
    for epoch in range(10):
        train(epoch)
        test()

plt.subplot(121)
plt.plot(range(len(loss_list)), loss_list)
plt.xlabel("step")
plt.ylabel("loss")
plt.title("train")

plt.subplot(122)
plt.plot(range(epoch+1), accuracy)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.title("test")
plt.show()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值