基于pytorch的猫狗预测-GoogleNet

GoogleNet是Google开发的一种深度神经网络结构,在2014年的ImageNet大赛中夺得了冠军。与传统的卷积神经网络不同,GoogleNet采用了一种称为“Inception”的模块结构,通过多个不同大小的卷积核同时对输入进行卷积操作,并将多个输出进行拼接,从而得到更高质量的表示。GoogleNet还通过全局平均池化层实现了较少参数数量和防止过拟合的效果,同时在训练过程中也采用了更加有效的损失函数和优化器。

在基于Pytorch的猫狗预测中,可以使用Pytorch自带的torchvision.models模块中的GoogLeNet模型对图像进行训练和预测。可以通过调整模型的超参数和训练方法,如学习率、训练迭代次数等,来获得更好的预测效果。在训练过程中,也可以采用GPU加速、交叉熵损失函数和优化器来提高训练和优化的效率。最终,可以使用训练好的模型对新的图片进行预测,判断其是否是猫或狗。

 代码:

%matplotlib inline
import torch
import torchvision
import torch.nn as nn
from torchvision import datasets,transforms
from torch import nn
from d2l import torch as d2l
from torch.utils.data import random_split
from torch.utils import data
transforms = transforms.Compose([
    transforms.RandomResizedCrop(150),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
])
train_data = torchvision.datasets.ImageFolder('D:\\Jupyter Notebook\\Pytorch入门\\catsdogs\\train',transform = transforms)
valid_data = torchvision.datasets.ImageFolder('D:\\Jupyter Notebook\\Pytorch入门\\catsdogs\\test',transform = transforms)
#设置迭代器
batch_size = 32
train_iter = data.DataLoader(train_data,batch_size,shuffle = True,num_workers = 0)
valid_iter = data.DataLoader(valid_data,batch_size,shuffle = False,num_workers = 0)
#inception层
class Inception(nn.Module):
    # c1--c4是每条路径的输出通道数
    def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
        super(Inception, self).__init__(**kwargs)
        # 线路1,单1x1卷积层
        self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
        # 线路2,1x1卷积层后接3x3卷积层
        self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
        self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
        # 线路3,1x1卷积层后接5x5卷积层
        self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
        self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
        # 线路4,3x3最大汇聚层后接1x1卷积层
        self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)
        self.relu = nn.ReLU()
    def forward(self, x):
        p1 = self.relu(self.p1_1(x))
        p2 = self.relu(self.p2_2(self.relu(self.p2_1(x))))
        p3 = self.relu(self.p3_2(self.relu(self.p3_1(x))))
        p4 = self.relu(self.p4_2(self.p4_1(x)))
        # 在通道维度上连结输出
        return torch.cat((p1, p2, p3, p4), dim=1)
b1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
                   nn.ReLU(),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),
                   nn.ReLU(),
                   nn.Conv2d(64, 192, kernel_size=3, padding=1),
                   nn.ReLU(),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
                   Inception(256, 128, (128, 192), (32, 96), 64),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
                   Inception(512, 160, (112, 224), (24, 64), 64),
                   Inception(512, 128, (128, 256), (24, 64), 64),
                   Inception(512, 112, (144, 288), (32, 64), 64),
                   Inception(528, 256, (160, 320), (32, 128), 128),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
                   Inception(832, 384, (192, 384), (48, 128), 128),
                   nn.AdaptiveAvgPool2d((1,1)),
                   nn.Flatten())

GoogLeNet_net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 2))
GoogLeNet_net
lr=1e-4
device=torch.device("cuda" if torch.cuda.is_available() else "cpu" )

model=GoogLeNet_net.to(device)
optimizer=torch.optim.Adam(model.parameters(),lr=lr)
loss_fn = nn.CrossEntropyLoss().to(device)

print(device)
def train(model,device,train_iter,optimizer,loss,epochs):
    total_train_step = 0
    for epoch in range(epochs):
        print("第{}轮训练开始".format(epoch+1))
        model.train()
        for idx,(data,target) in enumerate(train_iter):
            data,target = data.to(device),target.to(device)
            pred = model(data)
            loss = loss_fn(pred,target)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_train_step = total_train_step+1
            
            if total_train_step %10 == 0:
                print("训练次数:{},Loss:{}".format(total_train_step,loss.item()))

def test(model,device,test_iter,loss_fn):
    total_test_step = 0
    total_test_loss = 0
    total_accuracy = 0
    model.eval()
    correct = 0
    with torch.no_grad():
        for idx,(data,target) in enumerate(test_iter):
            data,target = data.to(device),target.to(device)
            pred = model(data)
            loss = loss_fn(pred,target)
            total_test_loss = total_test_loss + loss.item()#计算测试Loss
            #计算精确度
            accuracy = (pred.argmax(1) == target).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的accuracy:{}".format(total_accuracy/len(valid_data)))
num_epochs=60
import time
begin_time=time.time()
print(time.ctime(begin_time))
train(model,device,train_iter,optimizer,loss_fn,num_epochs)
# test(model,device,test_loader)
end_time=time.time() 
print(time.ctime(end_time))
test(model,device,valid_iter,loss_fn) 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值