实现卷积神经网络模型的训练和推理任务(VGG-16处理MNIST数据集)

下述内容若有错漏之处,敬请指正

按照上图所示四个步骤依次展开

准备并加载数据集

MNIST数据集简介

      官方链接:https://www.tensorflow.org/datasets/catalog/mnist

      描述:MNIST数据集为手写数字数据库

      特征结构:  

FeaturesDict({
    'image': Image(shape=(28, 28, 1), dtype=uint8),
    'label': ClassLabel(shape=(), dtype=int64, num_classes=10),
})

       数据集图像示例:

代码

# MNIST数据集为黑白手写数字, 所以只有一个通道
data_tf = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5], [0.5])
])

train_dataset = torchvision.datasets.MNIST("./dataset", train=True, transform=data_tf, download=True)
test_dataset = torchvision.datasets.MNIST("./dataset", train=False, transform=data_tf, download=True)

# 加载数据集, train_dataset的长度为60000, test_dataset的长度为10000, 所以这里选择每次都抓取100个数据
train_data = DataLoader(train_dataset, batch_size=100)
test_data = DataLoader(test_dataset, batch_size=100)

搭建神经网络

VGG-16模型结构

# 搭建神经网络VGG-16
class VGGNet(nn.Module):
    def __init__(self):
        super(VGGNet, self).__init__()

        # 构建第一层, 包括两个卷积层和一个最大池化层
        self.layer1 = Sequential(
            # 重构网络的第一层卷积层,适配MNIST数据的灰度图像格式, 将输入通道改为1
            # 224*224*1 -> 224*224*64
            Conv2d(in_channels=1, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            # 在卷积神经网络的卷积层之后添加BatchNorm2d进行数据的归一化处理,这使得数据在进行Relu之前不会因为数据过大而导致网络性能的不稳定
            BatchNorm2d(64),
            ReLU(),

            # 224*224*64 -> 224*224*64
            Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            # 在卷积神经网络的卷积层之后添加BatchNorm2d进行数据的归一化处理,这使得数据在进行Relu之前不会因为数据过大而导致网络性能的不稳定
            BatchNorm2d(64),
            ReLU(),

            # 224*224*64 -> 112*112*64
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer2 = Sequential(
            # 112*112*64 -> 112*112*128
            Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(128),
            ReLU(),

            # 112*112*128 -> 112*112*128
            Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(128),
            ReLU(),

            # 112*112*128 -> 56*56*128
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer3 = Sequential(
            # 56*56*128 -> 56*56*256
            Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(256),
            ReLU(),

            # 56*56*256 -> 56*56*256
            Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(256),
            ReLU(),

            # 56*56*256 -> 56*56*256
            Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(256),
            ReLU(),

            # 56*56*256 -> 28*28*256
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer4 = Sequential(
            # 28*28*256 -> 28*28*512
            Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 28*28*512 -> 28*28*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 28*28*512 -> 28*28*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 28*28*512 -> 14*14*512
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer5 = Sequential(
            # 14*14*512 -> 14*14*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 14*14*512 -> 14*14*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 14*14*512 -> 14*14*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 14*14*512 -> 7*7*512
            MaxPool2d(kernel_size=2, stride=2)
        )

        dropout = 0
        self.fc = Sequential(
            # 7*7*512 -> 1*1*4096
            Linear(7 * 7 * 512, 4096),
            ReLU(),
            Dropout(dropout),

            Linear(4096, 4096),
            ReLU(),
            Dropout(dropout),

            Linear(4096, 1000),
            ReLU(),
            Dropout(dropout),

            Linear(1000, 10)
        )
        
    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x = x.reshape(10, -1)
        x = self.fc(x)
        return x

搭建好网络后,创建一个输入,检验一下网络是否搭建正确

model = VGGNet()
# 创建一个输入,用于检查网络的正确性
input = torch.ones((10, 1, 224, 224))
output = model(input)
# output.shape : [10, 10], 符合
print(output.shape)

输出如下图所示,可见上述网络搭建正确 

 构建损失函数、优化器

# 损失函数
loss_fn = nn.CrossEntropyLoss()

# 优化器
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

设置参数

# 设置训练网络的一些参数
# 记录训练次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮数
epoch = 10

训练部分

model.train()
    # 训练步骤开始
    for data in train_dataloader:
        imgs, targets = data
        imgs = imgs.to(device)
        targets = targets.to(device)
        output = model(imgs)
        loss = loss_fn(output, targets)

        # 梯度清零
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 优化
        optimizer.step()

        total_train_step = total_train_step + 1
        # 设置训练次数整除100时输出,不然输出太多
        if total_train_step % 100 == 0:
            print("训练次数:{}, Loss:{}".format(total_train_step, loss.item()))
            

测试部分

# 测试步骤开始
    # 进行测试时不需要对模型进行调优,只需要测试当前的模型
    # 使用with torch.no_grad(), 在该模块下, 所有计算得出的tensor的requires_grad都自动设置为False
    total_test_loss = 0
    # 设置total_test_loss为求整个数据集上的误差
    total_accuracy = 0
    # total_accuracy表示整体数据集上预测正确的个数
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = model(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{} %".format(100 * total_accuracy / test_data_size))
    total_test_step = total_test_step + 1

补充细节部分

使用tensorboard可视化训练的具体情况

# 添加tensorboard
writer = SummaryWriter("logs")

# 将训练情况的损失可视化
writer.add_scalar("train_loss", loss.item(), total_train_step)

# 将测试情况的损失和正确率可视化
writer.add_scalar("test_loss", total_test_loss, total_test_step) writer.add_scalar("test_accuracy", 100 * total_accuracy / test_data_size, total_test_step)

#每一轮训练后,保存模型
torch.save(model, "test_{}.pth".format(i))
print("模型已保存")

利用GPU训练

判断cuda是否可用,并选择设备

完整代码

import torch
import torchvision
from torch import nn, softmax
from torch.nn import Conv2d, MaxPool2d, Dropout, Linear, ReLU, Sequential, BatchNorm2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms


# 定义运行设备 cpu/cuda(gpu)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cuda")

# 准备数据集
# 下面要使用VGG-16神经网络,而VGG-16输入图片宽高为224, 所以这里将MNIST数据集的宽高也更改为224
data_tf = transforms.Compose([
    transforms.Resize(224),
    transforms.ToTensor(),
    transforms.Normalize([0.5], [0.5])
])

train_dataset = torchvision.datasets.MNIST("./dataset", train=True, transform=data_tf, download=True)
test_dataset = torchvision.datasets.MNIST("./dataset", train=False, transform=data_tf, download=True)

# 加载数据集, train_dataset的长度为60000, test_dataset的长度为10000, 所以这里选择每次都抓取100个数据
train_dataloader = DataLoader(train_dataset, batch_size=20)
test_dataloader = DataLoader(test_dataset, batch_size=20)

# 求出测试集的长度, 后续用于计算正确率
test_data_size = len(test_dataset)


# 搭建神经网络VGG-16
class VGGNet(nn.Module):
    def __init__(self):
        super(VGGNet, self).__init__()

        # 构建第一层, 包括两个卷积层和一个最大池化层
        self.layer1 = Sequential(
            # 重构网络的第一层卷积层,适配MNIST数据的灰度图像格式, 将输入通道改为1
            # 224*224*1 -> 224*224*64
            Conv2d(in_channels=1, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            # 在卷积神经网络的卷积层之后添加BatchNorm2d进行数据的归一化处理,这使得数据在进行Relu之前不会因为数据过大而导致网络性能的不稳定
            BatchNorm2d(64),
            ReLU(),

            # 224*224*64 -> 224*224*64
            Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            # 在卷积神经网络的卷积层之后添加BatchNorm2d进行数据的归一化处理,这使得数据在进行Relu之前不会因为数据过大而导致网络性能的不稳定
            BatchNorm2d(64),
            ReLU(),

            # 224*224*64 -> 112*112*64
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer2 = Sequential(
            # 112*112*64 -> 112*112*128
            Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(128),
            ReLU(),

            # 112*112*128 -> 112*112*128
            Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(128),
            ReLU(),

            # 112*112*128 -> 56*56*128
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer3 = Sequential(
            # 56*56*128 -> 56*56*256
            Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(256),
            ReLU(),

            # 56*56*256 -> 56*56*256
            Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(256),
            ReLU(),

            # 56*56*256 -> 56*56*256
            Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(256),
            ReLU(),

            # 56*56*256 -> 28*28*256
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer4 = Sequential(
            # 28*28*256 -> 28*28*512
            Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 28*28*512 -> 28*28*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 28*28*512 -> 28*28*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 28*28*512 -> 14*14*512
            MaxPool2d(kernel_size=2, stride=2)
        )

        self.layer5 = Sequential(
            # 14*14*512 -> 14*14*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 14*14*512 -> 14*14*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 14*14*512 -> 14*14*512
            Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            BatchNorm2d(512),
            ReLU(),

            # 14*14*512 -> 7*7*512
            MaxPool2d(kernel_size=2, stride=2)
        )

        dropout = 0
        self.fc = Sequential(
            # 7*7*512 -> 1*1*4096
            Linear(7 * 7 * 512, 4096),
            ReLU(),
            Dropout(dropout),

            Linear(4096, 4096),
            ReLU(),
            Dropout(dropout),

            Linear(4096, 1000),
            ReLU(),
            Dropout(dropout),

            Linear(1000, 10)
        )

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x = x.reshape(20, -1)
        x = self.fc(x)
        x = softmax(x, dim=1)
        return x


# 创建网络模型
model = VGGNet()
model = model.to(device)

# 损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)

# 优化器
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

# 设置训练网络的一些参数
# 记录训练次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮数
epoch = 5

# 添加tensorboard
writer = SummaryWriter("logs")

for i in range(epoch):
    print("----------第{}轮训练开始----------".format(i + 1))

    model.train()
    # 训练步骤开始
    for data in train_dataloader:
        imgs, targets = data
        imgs = imgs.to(device)
        targets = targets.to(device)
        output = model(imgs)
        loss = loss_fn(output, targets)

        # 梯度清零
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 优化
        optimizer.step()

        total_train_step = total_train_step + 1
        # 设置训练次数整除100时输出,不然输出太多
        if total_train_step % 100 == 0:
            print("训练次数:{}, Loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(), total_train_step)

    # 测试步骤开始
    model.eval()
    # 进行测试时不需要对模型进行调优,只需要测试当前的模型
    # 使用with torch.no_grad(), 在该模块下, 所有计算得出的tensor的requires_grad都自动设置为False
    total_test_loss = 0
    # 设置total_test_loss为求整个数据集上的误差
    total_accuracy = 0
    # total_accuracy表示整体数据集上预测正确的个数
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            imgs = imgs.to(device)
            targets = targets.to(device)
            outputs = model(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{:.2f} %".format(100 * total_accuracy / test_data_size))
    writer.add_scalar("test_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy", 100 * total_accuracy / test_data_size, total_test_step)
    total_test_step = total_test_step + 1

    # 保存每一轮的模型
    torch.save(model, "test_{}.pth".format(i))
    print("模型已保存")

writer.close()

运行结果

----------第1轮训练开始----------
训练次数:100, Loss:2.2856597900390625
训练次数:200, Loss:2.150400400161743
训练次数:300, Loss:1.7740538120269775
训练次数:400, Loss:1.7432806491851807
训练次数:500, Loss:1.5415332317352295
训练次数:600, Loss:1.6052879095077515
训练次数:700, Loss:1.6011254787445068
训练次数:800, Loss:1.5425055027008057
训练次数:900, Loss:1.4809238910675049
训练次数:1000, Loss:1.4737474918365479
训练次数:1100, Loss:1.4968842267990112
训练次数:1200, Loss:1.530411958694458
训练次数:1300, Loss:1.5045621395111084
训练次数:1400, Loss:1.467671513557434
训练次数:1500, Loss:1.5350953340530396
训练次数:1600, Loss:1.467505693435669
训练次数:1700, Loss:1.4625208377838135
训练次数:1800, Loss:1.4694852828979492
训练次数:1900, Loss:1.498245358467102
训练次数:2000, Loss:1.4620107412338257
训练次数:2100, Loss:1.5054409503936768
训练次数:2200, Loss:1.5024210214614868
训练次数:2300, Loss:1.482898473739624
训练次数:2400, Loss:1.4615617990493774
训练次数:2500, Loss:1.4962882995605469
训练次数:2600, Loss:1.5348854064941406
训练次数:2700, Loss:1.496151089668274
训练次数:2800, Loss:1.4617316722869873
训练次数:2900, Loss:1.4616574048995972
训练次数:3000, Loss:1.4644801616668701
整体测试集上的Loss:737.7400785684586
整体测试集上的正确率:98.87 %
模型已保存
----------第2轮训练开始----------
训练次数:3100, Loss:1.462166428565979
训练次数:3200, Loss:1.5109279155731201
训练次数:3300, Loss:1.4669560194015503
训练次数:3400, Loss:1.5350016355514526
训练次数:3500, Loss:1.4613951444625854
训练次数:3600, Loss:1.4636991024017334
训练次数:3700, Loss:1.4775222539901733
训练次数:3800, Loss:1.5045650005340576
训练次数:3900, Loss:1.4613001346588135
训练次数:4000, Loss:1.461578130722046
训练次数:4100, Loss:1.4632203578948975
训练次数:4200, Loss:1.4617078304290771
训练次数:4300, Loss:1.4656989574432373
训练次数:4400, Loss:1.4611986875534058
训练次数:4500, Loss:1.4943411350250244
训练次数:4600, Loss:1.4613455533981323
训练次数:4700, Loss:1.461228609085083
训练次数:4800, Loss:1.469355821609497
训练次数:4900, Loss:1.4677988290786743
训练次数:5000, Loss:1.4618520736694336
训练次数:5100, Loss:1.5073597431182861
训练次数:5200, Loss:1.461596965789795
训练次数:5300, Loss:1.462557315826416
训练次数:5400, Loss:1.4612281322479248
训练次数:5500, Loss:1.4627866744995117
训练次数:5600, Loss:1.502852201461792
训练次数:5700, Loss:1.4847246408462524
训练次数:5800, Loss:1.4611848592758179
训练次数:5900, Loss:1.4615039825439453
训练次数:6000, Loss:1.4659409523010254
整体测试集上的Loss:735.6695410013199
整体测试集上的正确率:99.15 %
模型已保存
----------第3轮训练开始----------
训练次数:6100, Loss:1.4621410369873047
训练次数:6200, Loss:1.5099278688430786
训练次数:6300, Loss:1.4654113054275513
训练次数:6400, Loss:1.5115697383880615
训练次数:6500, Loss:1.4612452983856201
训练次数:6600, Loss:1.4613730907440186
训练次数:6700, Loss:1.462097406387329
训练次数:6800, Loss:1.4689401388168335
训练次数:6900, Loss:1.4611705541610718
训练次数:7000, Loss:1.4613380432128906
训练次数:7100, Loss:1.4616750478744507
训练次数:7200, Loss:1.4615848064422607
训练次数:7300, Loss:1.4734020233154297
训练次数:7400, Loss:1.4611988067626953
训练次数:7500, Loss:1.4615139961242676
训练次数:7600, Loss:1.461174488067627
训练次数:7700, Loss:1.4611701965332031
训练次数:7800, Loss:1.4624950885772705
训练次数:7900, Loss:1.4616165161132812
训练次数:8000, Loss:1.4626996517181396
训练次数:8100, Loss:1.499658465385437
训练次数:8200, Loss:1.4614639282226562
训练次数:8300, Loss:1.4635884761810303
训练次数:8400, Loss:1.4612008333206177
训练次数:8500, Loss:1.4622135162353516
训练次数:8600, Loss:1.4661115407943726
训练次数:8700, Loss:1.488004446029663
训练次数:8800, Loss:1.4611705541610718
训练次数:8900, Loss:1.461399793624878
训练次数:9000, Loss:1.4626014232635498
整体测试集上的Loss:735.7861216068268
整体测试集上的正确率:99.10 %
模型已保存
----------第4轮训练开始----------
训练次数:9100, Loss:1.461256980895996
训练次数:9200, Loss:1.51102876663208
训练次数:9300, Loss:1.4616492986679077
训练次数:9400, Loss:1.4734785556793213
训练次数:9500, Loss:1.4613710641860962
训练次数:9600, Loss:1.4611947536468506
训练次数:9700, Loss:1.4661444425582886
训练次数:9800, Loss:1.4634325504302979
训练次数:9900, Loss:1.4611588716506958
训练次数:10000, Loss:1.4611637592315674
训练次数:10100, Loss:1.4613595008850098
训练次数:10200, Loss:1.4611856937408447
训练次数:10300, Loss:1.467907190322876
训练次数:10400, Loss:1.461153507232666
训练次数:10500, Loss:1.461257815361023
训练次数:10600, Loss:1.461167573928833
训练次数:10700, Loss:1.4611680507659912
训练次数:10800, Loss:1.4614837169647217
训练次数:10900, Loss:1.4613018035888672
训练次数:11000, Loss:1.4611787796020508
训练次数:11100, Loss:1.4650567770004272
训练次数:11200, Loss:1.463962435722351
训练次数:11300, Loss:1.4611883163452148
训练次数:11400, Loss:1.4611852169036865
训练次数:11500, Loss:1.4612447023391724
训练次数:11600, Loss:1.4623310565948486
训练次数:11700, Loss:1.483723521232605
训练次数:11800, Loss:1.4611642360687256
训练次数:11900, Loss:1.4611644744873047
训练次数:12000, Loss:1.4640625715255737
整体测试集上的Loss:735.9970046281815
整体测试集上的正确率:99.02 %
模型已保存
----------第5轮训练开始----------
训练次数:12100, Loss:1.4611581563949585
训练次数:12200, Loss:1.5111618041992188
训练次数:12300, Loss:1.4611775875091553
训练次数:12400, Loss:1.4799469709396362
训练次数:12500, Loss:1.4612648487091064
训练次数:12600, Loss:1.461230754852295
训练次数:12700, Loss:1.4612605571746826
训练次数:12800, Loss:1.4644612073898315
训练次数:12900, Loss:1.461151361465454
训练次数:13000, Loss:1.4611716270446777
训练次数:13100, Loss:1.4614062309265137
训练次数:13200, Loss:1.4614568948745728
训练次数:13300, Loss:1.4655282497406006
训练次数:13400, Loss:1.4611512422561646
训练次数:13500, Loss:1.4613738059997559
训练次数:13600, Loss:1.461181402206421
训练次数:13700, Loss:1.461181879043579
训练次数:13800, Loss:1.4614752531051636
训练次数:13900, Loss:1.4632519483566284
训练次数:14000, Loss:1.4611815214157104
训练次数:14100, Loss:1.4629517793655396
训练次数:14200, Loss:1.4618713855743408
训练次数:14300, Loss:1.461651086807251
训练次数:14400, Loss:1.4611736536026
训练次数:14500, Loss:1.4613096714019775
训练次数:14600, Loss:1.4625952243804932
训练次数:14700, Loss:1.471457600593567
训练次数:14800, Loss:1.4611611366271973
训练次数:14900, Loss:1.4611570835113525
训练次数:15000, Loss:1.4612820148468018
整体测试集上的Loss:735.1141477823257
整体测试集上的正确率:99.24 %
模型已保存

经过5轮训练后,VGG-16模型对MNIST数据集的分类准确率高达99.24%

参考: 

https://blog.51cto.com/u_16099289/8659193

深度学习笔记--基于Pytorch搭建VGG16网络_vgg16 pytorch代码-CSDN博客

  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值