Pytorch:使用Alexnet网络实现CIFAR10分类

全部代码: https://github.com/SPECTRELWF/pytorch-cnn-study

网络介绍:

Alexnet网络是CV领域最经典的网络结构之一了,在2012年横空出世,并在当年夺下了不少比赛的冠军,下面是Alexnet的网络结构:
image.png

网络结构较为简单,共有五个卷积层和三个全连接层,原文作者在训练时使用了多卡一起训练,具体细节可以阅读原文得到。
Alexnet文章链接:http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf
作者在网络中使用了Relu激活函数和Dropout等方法来防止过拟合,更多细节看文章。

数据集介绍

使用的是CIFAR10数据集,数据集的简介可看我的另一篇文章:
http://liuweifeng.top:8090/archives/python%E8%AF%BB%E5%8F%96cifar10%E6%95%B0%E6%8D%AE%E9%9B%86%E5%B9%B6%E5%B0%86%E6%95%B0%E6%8D%AE%E9%9B%86%E8%BD%AC%E6%8D%A2%E4%B8%BApng%E6%A0%BC%E5%BC%8F%E5%AD%98%E5%82%A8

定义网络结构

就按照网络结构图中一层一层的定义就行,其中第1,2,5层卷积层后面接有Max pooling层和Relu激活函数,五层卷积之后得到图像的特征表示,送入全连接层中进行分类。

# !/usr/bin/python3
# -*- coding:utf-8 -*-
# Author:WeiFeng Liu
# @Time: 2021/11/2 下午3:25

import torch.nn as nn


class AlexNet(nn.Module):
    def __init__(self, width_mult=1):
        super(AlexNet, self).__init__()
        # 定义每一个就卷积层
        self.layer1 = nn.Sequential(
            # 卷积层  #输入图像为1*28*28
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            # 池化层
            nn.MaxPool2d(kernel_size=2, stride=2),  # 池化层特征图通道数不改变,每个特征图的分辨率变小
            # 激活函数Relu
            nn.ReLU(inplace=True),
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.ReLU(inplace=True),
        )

        self.layer3 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
        )

        self.layer4 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
        )
        self.layer5 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
        )

        # 定义全连接层
        self.fc1 = nn.Linear(256 * 3 * 3, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, 10)
        # 对应十个类别的输出

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        # print(x.shape)
        x = x.view(-1, 256 * 3 * 3)
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)

        return x


训练

# !/usr/bin/python3
# -*- coding:utf-8 -*-
# Author:WeiFeng Liu
# @Time: 2021/11/4 下午12:59


import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from alexnet import AlexNet
from utils import plot_curve
from dataload.cifar10_dataload import CIFAR10_dataset
# 定义使用GPU
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 设置超参数
epochs = 50
batch_size = 256
lr = 0.01

transform = transforms.Compose([
    transforms.Resize([32,32]),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    # transforms.Normalize(mean=[.5,.5,.5],std=[.5,.5,.5]),
    ])

train_dataset = CIFAR10_dataset(r'/home/lwf/code/pytorch学习/alexnet-CIFAR10/dataset/train',transform=transform)
# print(train_dataset[0])
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle = True,)
net = AlexNet().cuda(device)
loss_func = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),lr=lr,momentum=0.9)

train_loss = []
for epoch in range(epochs):
    sum_loss = 0
    for batch_idx,(x,y) in enumerate(train_loader):
        x = x.to(device)
        y = y.to(device)
        pred = net(x)

        optimizer.zero_grad()
        loss = loss_func(pred,y)
        loss.backward()
        optimizer.step()
        sum_loss += loss.item()
        train_loss.append(loss.item())
        print(["epoch:%d , batch:%d , loss:%.3f" %(epoch,batch_idx,loss.item())])
    torch.save(net.state_dict(), '模型地址')


plot_curve(train_loss)

使用交叉熵损失函数和SGD优化器来训练网络,训练后保存模型至本地。

测试准确率

image.png![image.png]

预测单张图片:

image.png

  • 2
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是使用PyTorch框架实现AlexNet网络模型,并进行训练和测试的代码: ```python import torch import torch.nn as nn import torch.optim as optim from torchvision import transforms, datasets # 定义超参数 batch_size = 32 learning_rate = 0.001 num_epochs = 10 # 加载数据集 transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # 定义AlexNet 模型 class AlexNet(nn.Module): def __init__(self): super(AlexNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2) self.relu1 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2) self.relu2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2) self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1) self.relu3 = nn.ReLU(inplace=True) self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1) self.relu4 = nn.ReLU(inplace=True) self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.relu5 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2) self.fc1 = nn.Linear(6 * 6 * 256, 4096) self.dropout1 = nn.Dropout() self.fc2 = nn.Linear(4096, 4096) self.dropout2 = nn.Dropout() self.fc3 = nn.Linear(4096, 10) def forward(self, x): x = self.conv1(x) x = self.relu1(x) x = self.pool1(x) x = self.conv2(x) x = self.relu2(x) x = self.pool2(x) x = self.conv3(x) x = self.relu3(x) x = self.conv4(x) x = self.relu4(x) x = self.conv5(x) x = self.relu5(x) x = self.pool3(x) x = x.view(-1, 6 * 6 * 256) x = self.fc1(x) x = self.dropout1(x) x = self.fc2(x) x = self.dropout2(x) x = self.fc3(x) return x model = AlexNet() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) # 训练模型 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (i+1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, i+1, len(train_loader), loss.item())) # 测试模型 model.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total)) ``` 在上述代码中,我们使用CIFAR10数据集进行训练和测试。首先,我们使用`transforms`来对图像进行预处理。然后,我们使用`datasets`加载数据集,并使用`DataLoader`来创建一个迭代器,用于训练和测试我们的模型。 接下来,我们定义了AlexNet模型,并使用`nn.Module`实现它。我们在模型中定义了卷积层、池化层和全连接层。我们还使用了ReLU激活函数和Dropout层来提高模型的性能。在模型的`forward`方法中,我们定义了模型的前向传播过程。 然后,我们定义了损失函数和优化器,并使用它们来训练我们的模型。在每个epoch中,我们迭代数据集中的所有批次,并计算损失。我们使用反向传播来计算梯度,并使用优化器来更新模型的参数。 最后,我们使用测试数据集来评估模型的性能。我们计算模型在测试数据集上的准确率,并输出结果。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值