Pytorch深度学习实践(b站刘二大人)P11讲 (CNN卷积神经网络高级篇 作业)

constant scaling代码: 

import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.1307,), (0.3081,))])


transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, transform=transform, download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, transform=transform, download=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)





class ResidualBlock(nn.Module):
    # Residual Block需要保证输出和输入通道数x一样
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        # 3*3卷积核,保证图像大小不变将padding设为1
        # 第一个卷积
        self.conv1 = nn.Conv2d(channels, channels,
                               kernel_size=3, padding=1)
        # 第二个卷积
        self.conv2 = nn.Conv2d(channels, channels,
                               kernel_size=3, padding=1)

    def forward(self, x):
        # 激活
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        # 先求和 后激活
        z = (x + y) * 0.5
        return F.relu(z)
        

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)
        self.mp = nn.MaxPool2d(2)

        self.rblock1 = ResidualBlock(16)
        self.rblock2 = ResidualBlock(32)


        self.fc = torch.nn.Linear(512, 10)

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.rblock1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.rblock2(x)
        x = x.view(in_size, -1)
        x = self.fc(x)
        return x


net = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, targets = data
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        # forward
        y_pred = net(inputs)
        # backward
        loss = criterion(y_pred, targets)
        loss.backward()
        # update
        optimizer.step()

        running_loss += loss.item()
        if (batch_idx % 300 == 299):
            print("[%d,%d]loss:%.3f" % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


accuracy = []


def test():
    correct = 0
    total = 0

    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, dim=1)
            total += labels.size(0)
            correct += (labels == predicted).sum().item()

        print("accuracy on test set:%d %% [%d/%d]" % (100 * correct / total, correct, total))
        accuracy.append(100 * correct / total)


if __name__ == "__main__":
    for epoch in range(10):
        train(epoch)
        test()
    plt.plot(range(10), accuracy)
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.grid()
    plt.show()
    print("done")

运行结果:

conv shortcut代码: 

import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.1307,), (0.3081,))])


transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, transform=transform, download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, transform=transform, download=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)





class ResidualBlock(nn.Module):
    # Residual Block需要保证输出和输入通道数x一样
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        # 3*3卷积核,保证图像大小不变将padding设为1
        # 第一个卷积
        self.conv1 = nn.Conv2d(channels, channels,
                               kernel_size=3, padding=1)
        # 第二个卷积
        self.conv2 = nn.Conv2d(channels, channels,
                               kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(channels,channels,kernel_size=1)


    def forward(self, x):
        # 激活
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        # 先求和 后激活
        z = self.conv3(x)
        return F.relu(z + y)
        

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)
        self.mp = nn.MaxPool2d(2)

        self.rblock1 = ResidualBlock(16)
        self.rblock2 = ResidualBlock(32)


        self.fc = torch.nn.Linear(512, 10)

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.rblock1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.rblock2(x)
        x = x.view(in_size, -1)
        x = self.fc(x)
        return x


net = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, targets = data
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        # forward
        y_pred = net(inputs)
        # backward
        loss = criterion(y_pred, targets)
        loss.backward()
        # update
        optimizer.step()

        running_loss += loss.item()
        if (batch_idx % 300 == 299):
            print("[%d,%d]loss:%.3f" % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


accuracy = []


def test():
    correct = 0
    total = 0

    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, dim=1)
            total += labels.size(0)
            correct += (labels == predicted).sum().item()

        print("accuracy on test set:%d %% [%d/%d]" % (100 * correct / total, correct, total))
        accuracy.append(100 * correct / total)


if __name__ == "__main__":
    for epoch in range(10):
        train(epoch)
        test()
    plt.plot(range(10), accuracy)
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.grid()
    plt.show()
    print("done")

运行结果:

 

  • 3
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
PyTorch深度学习实战中,可以使用卷积神经网络来进行图像分类任务。在实战中,可以使用经典的卷积神经网络模型,如VGG、ResNet、Inception和DenseNet等。这些模型都是在深度学习的发展过程中出现的经典模型,对深度学习的学术研究和工业生产都起到了巨大的促进作用。初学者可以通过阅读论文和实现代码来全面了解这些模型。例如,可以使用PyTorch中的torchvision.models模块来加载预训练的卷积神经网络模型,如VGG-16、VGG-19和ResNet等\[1\]。其中,VGG-16和VGG-19是由卷积层、池化层和全连接层等不同组合构成的经典卷积神经网络模型\[1\]。而ResNet是一种使用残差单元连接而成的卷积神经网络模型,通过跨层的短接来突出微小的变化,使得网络对误差更加敏感,并解决了网络退化现象,具有良好的学习效果\[2\]\[3\]。因此,在PyTorch深度学习实战中,可以选择合适的卷积神经网络模型来进行图像分类任务。 #### 引用[.reference_title] - *1* *2* *3* [PyTorch深度学习实战 | 典型卷积神经网络](https://blog.csdn.net/qq_41640218/article/details/129832298)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^insert_down1,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值