AlexNet模型构建

参考博客:https://blog.csdn.net/u012348774/article/details/90047539

猫狗识别是学习CNN中最有趣的一个应用,本次主要在kaggle上的dogs vs cat redux进行了一次尝试。考虑到训练速度和实验的简易性,该文使用了比较简单的AlexNet,并且还用了对应的ImageNet的预训练模型。

下文就按照数据处理、模型预处理、训练和测试结果几个部分展开。

数据处理

从Kaggle上下载完比赛数据后,首先要做两个处理:1、将train数据中不同类别的数据放到不同的文件夹下。2、完成train数据处理后,则需要进一步将其分为train数据和validation数据,本次实验时,通过随机采样的方式,从train中选取20%的数据用于验证。

模型预处理

模型预处理的主要读取ImageNet预训练的模型,然后对AlexNet网络最后一层进行修改,使得分类类别数量为2

import torch
import torch.nn as nn
from torch import optim
from torchvision.datasets import ImageFolder
from torchvision import transforms

#AlexNet网络结构
class MineAlexNet(nn.Module):
    def __init__(self, num_classes=2):
        super(MineAlexNet, self).__init__()

        self.features=nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3,stride=2),
        )
        self.avgpool=nn.AdaptiveAvgPool2d((6,6))
        sefl.classifier=nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )
        #softmax
        self.logsoftmax = nn.LogSoftmax(dim=1)
    def forward(self, x):
        x=self.features(x)
        x=self.avgpool(x)
        x=x.view(x.size(0), 256*6*6)
        x=self.classifier(x)
        x=self.logsoftmax(x)
        return x

#读取提前下载的预训练模型
state_dict = torch.load('alexnetImageNet.pth')
alexnet=MineAlexNet(1000)
alexnet.load_state_dict(state_dict)

#修改AlexNet网络结构的后两层
alexNet.classifier[6]=nn.Linear(4096,2)

#保存包含预训练参数的,猫狗分类AlexNet模型
torch.save(alexNet.state_dict(),'begin.pth')     

训练

训练了30epoch

import torch
import torch.nn as nn
from torch import optim
from torchvision.datasets import ImageFolder
from torchvision import transforms

#数据预处理  非常重要,如果数据不处理,效果会差
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
    transforms.Resize(size=9227,227)),
    transforms.RandomRotation(20),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(), #将图片转换为Tensor,归一化至【0,1】
    normalize
])

# 从文件夹中读取训练数据
train_dataset = ImageFolder(r'/home/yqs/Desktop/dogs-vs-cats-redux-kernels-edition/train',transform=transform)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=512, shuffle=True)

# 从文件夹中读取validation数据
validation_dataset = ImageFolder(r'/home/yqs/Desktop/dogs-vs-cats-redux-kernels-edition/validation',transform=transform)
validationloader = torch.utils.data.DataLoader(validation_dataset, batch_size=512, shuffle=True)

# AlexNet
class MineAlexNet(nn.Module):
    def __init__(self, num_classes=2):
        super(MineAlexNet, self).__init__()
        
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

        self.logsoftmax = nn.LogSoftmax(dim=1)

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), 256 * 6 * 6)
        x = self.classifier(x)
        x = self.logsoftmax(x)
        return x
    

# 读取转换后的AlexNet模型
state_dict = torch.load('begin.pth')
alexNet = MineAlexNet(2)
alexNet.load_state_dict(state_dict)

# cuda
alexNet.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(alexNet.parameters(), lr=0.00005)

epochw = 30
train_losses, validation_losses = [],[]

#训练
for e in range(epochs):
    running_loss = 0
    for images, labels in trainloader:

        images = images.cuda()
        labels = labels.cuda()

        #TODO: Training pass
        optimizer.zero_grad()

        output = alexNet(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

    else:
        validation_loss = 0
        accuracy = 0

        # Turn off gradients for validation, saves memory and computations
        with torch.no_grad():
            for images, labels in validationloader:
                images = images.cuda()
                labels = labels.cuda()

                log_ps = alexNet(images)
                validation_loss += criterion(log_ps, labels)

                ps = torch.exp(log_ps)
                top_p, top_class = ps.topk(1, dim=1)
                equals = top_class == labels.view(*top_class.shape)
                accuracy += torch.mean(equals.type(torch.FloatTensor))

        train_losses.append(running_loss/len(trainloader))
        validation_losses.append(validation_loss/len(validationloader))

        torch.save(alexNet.state_dict(), str(e+1+37) +'.pth')

        print("Epoch: {}/{}.. ".format( e+1, epochs),
              "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
              "Test Loss: {:.3f}.. ".format(validation_loss/len(validationloader)),
              "Test Accuracy: {:.3f}".format(accuracy/len(validationloader)))

#画一下精度图
import matplotlib.pyplot as plt
plt.plot(train_losses, label='Training loss')
plt.plot(validation_losses, label='Validation loss')
plt.legend(frameon=False)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值