VGG16训练CIFAR10

模仿的b站小土堆的方法

import torch
import torchvision.datasets

#下载训练接和测试集
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import models

train_data=torchvision.datasets.CIFAR10("./data",train=True,
                                        transform=torchvision.transforms.ToTensor(),download=True)
test_data =torchvision.datasets.CIFAR10("./data",train=False,
                                        transform=torchvision.transforms.ToTensor(),download=True)

tn_data_loader=DataLoader(train_data,batch_size=64)
tt_data_loader=DataLoader(test_data,batch_size=64)

#打印训练集和测试集长度
train_data_size=len(train_data)
test_data_size=len(test_data)
print("训练集长度为:{}\n测试集长度为:{}".format(train_data_size,test_data_size))

#修改vgg16网络模型,使其符合我们的训练集
class VGG16_NET(nn.Module):
    def __init__(self):
        super(VGG16_NET, self).__init__()
        net=models.vgg16(True)
        net.classifier=nn.Sequential()
        self.futures=net
        self.classifier=nn.Sequential(
            nn.Flatten(),
            nn.Linear(25088,512),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512,128),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(128,10),
        )

    def forward(self,x):
        x=self.futures(x)
        # x=x.view(x.size(0),-1)
        x=self.classifier(x)
        return x

vgg16=VGG16_NET()

if torch.cuda.is_available():
    vgg16=vgg16.cuda()

#定义损失函数和优化器
loss_fn=nn.CrossEntropyLoss()
if torch.cuda.is_available():
    loss_fn=loss_fn.cuda()
learning_rate=0.01
optimizer=torch.optim.SGD(params=vgg16.parameters(),lr=learning_rate)

#tensorboard
writer=SummaryWriter("./logs")

epoch=10

train_step=0
test_step=0
for i in range(epoch):
    print("-----第{}轮训练开始-----".format(i+1))
    for data in tn_data_loader:
        imgs,targets=data
        if torch.cuda.is_available():
            imgs=imgs.cuda()
            targets=targets.cuda()

        output=vgg16(imgs)
        optimizer.zero_grad()
        loss=loss_fn(output,targets)
        loss.backward()
        optimizer.step()
        train_step=train_step+1
        if train_step%100==0:
            print("训练次数:{} Loss:{}".format(train_step,loss))
            writer.add_scalar("train_loss",loss,train_step)

    test_loss=0
    total_accuracy=0
    vgg16.eval()
    with torch.no_grad():
        for data in tt_data_loader:
            imgs,targets=data
            if torch.cuda.is_available():
                imgs = imgs.cuda()
                targets = targets.cuda()

            output=vgg16(imgs)
            loss=loss_fn(output,targets)
            test_loss=test_loss+loss
            accuracy=(output.argmax(1)==targets).sum()
            total_accuracy=total_accuracy+accuracy

    print("整体测试集上的Loss:{}\n整体测试集的准确率:{}".format(test_loss,total_accuracy/test_data_size))
    writer.add_scalar("Test_Loss",test_loss,test_step)
    writer.add_scalar("accuracy",total_accuracy,test_step)
    torch.save(vgg16,"vgg16_{}.pth".format(i+1))
    test_step=test_step+1
    print("模型已保存")

writer.close()

最后准确率在百分之87左右

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值