Pytorch中构建类VGG网络结构做预测

直接看案例:

1、构建网络

import torch
import torch.nn as nn
import torch.nn.functional as F


class VGGbase(nn.Module):
    """
    构建类VGG网络结构
    """
    def __init__(self):
        super(VGGbase, self).__init__()

        # 3 * 28 * 28 (crop:32转成28大小)
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )
        self.max_pooling1 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 14 * 14
        self.conv2_1 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )

        self.conv2_2 = nn.Sequential(
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )

        self.max_pooling2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 7 * 7
        self.conv3_1 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )

        self.conv3_2 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )

        self.max_pooling3 = nn.MaxPool2d(kernel_size=2,
                                         stride=2,
                                         padding=1)

        # 4 * 4
        self.conv4_1 = nn.Sequential(
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU()
        )

        self.conv4_2 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU()
        )
        self.max_pooling4 = nn.MaxPool2d(kernel_size=2,
                                         stride=2)

        # batchsize * 512 * 2 *2 --> batchsize * (512 * 4)
        self.fc = nn.Linear(512 * 4, 10)

    def forward(self, x):
        """
        加载在初始化中定义好的网络参数,进行前向传播
        :param x:
        :return:
        """
        batchsize = x.size(0)
        out = self.conv1(x)
        out = self.max_pooling1(out)
        out = self.conv2_1(out)
        out = self.conv2_2(out)
        out = self.max_pooling2(out)

        #
        out = self.conv3_1(out)
        out = self.conv3_2(out)
        out = self.max_pooling3(out)

        out = self.conv4_1(out)
        out = self.conv4_2(out)
        out = self.max_pooling4(out)
        #
        out = out.view(batchsize, -1)
        # batchsize * c * h * w --> batchsize * n

        out = self.fc(out)  # batchsize * 11
        out = F.log_softmax(out, dim=1)

        return out


def VGGNet():
    return VGGbase()

2、使用网络做预测

import torch
import torch.nn as nn
import torchvision
from vggnet import VGGNet
# from resnet import ResNet18
# from mobilenetv1 import mobilenetv1_small
# from inceptionMolule import InceptionNetSmall
# from base_resnet import resnet
# from resnetV1 import resnet as resnetV1
# from pre_resnet import pytorch_resnet18
from load_cifar10 import train_loader, test_loader
import os
import tensorboardX

# 是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
epoch_num = 200
lr = 0.1
batch_size = 128

net = VGGNet().to(device)
# net = pytorch_resnet18().to(device)

loss_func = nn.CrossEntropyLoss()

# optimizer
optimizer = torch.optim.Adam(net.parameters(), lr= lr)
# optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
# 定义学习率衰减,这里使用按步骤衰减
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=10,  # 每十步调整一次
                                            gamma=0.9  # 原来的0.9倍
                                            )

model_path = "./models/vgg"
# model_path = "models/pytorch_resnet18"
log_path = "./logs/vgg"
# log_path = "logs/pytorch_resnet18"
if not os.path.exists(log_path):
    os.makedirs(log_path)
if not os.path.exists(model_path):
    os.makedirs(model_path)
writer = tensorboardX.SummaryWriter(log_path)

# 训练
step_n = 0
for epoch in range(epoch_num):
    print(" epoch is ", epoch)
    # 表明当前是训练阶段
    net.train()  # train BN dropout

    for i, data in enumerate(train_loader):
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)
        # 等价这种写法:module.forward(data),自动会调用forward函数因为在__call__函数里面
        outputs = net(inputs)
        # loss  自动会调用forward函数因为在__call__函数里面
        loss = loss_func(outputs, labels)
        # 初始化我们的参数
        optimizer.zero_grad()
        loss.backward()
        # 更新我们的参数
        optimizer.step()

        _, pred = torch.max(outputs.data, dim=1)

        correct = pred.eq(labels.data).cpu().sum()
        # print("epoch is ", epoch)
        # print("train lr is ", optimizer.state_dict()["param_groups"][0]["lr"])
        # print("train step", i, "loss is:", loss.item(),
        #       "mini-batch correct is:", 100.0 * correct / batch_size)

        writer.add_scalar("train loss", loss.item(), global_step=step_n)
        writer.add_scalar("train correct",
                          100.0 * correct.item() / batch_size, global_step=step_n)
        # 将图片拼在一起
        im = torchvision.utils.make_grid(inputs)
        writer.add_image("train im", im, global_step=step_n)

        step_n += 1

    # 保存模型
    torch.save(net.state_dict(), "{}/{}.pth".format(model_path, epoch + 1))
    # 更新学习率
    scheduler.step()

    # 实验阶段
    sum_loss = 0
    sum_correct = 0
    for i, data in enumerate(test_loader):
        net.eval()
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = net(inputs)
        loss = loss_func(outputs, labels)
        _, pred = torch.max(outputs.data, dim=1)
        correct = pred.eq(labels.data).cpu().sum()

        sum_loss += loss.item()
        sum_correct += correct.item()
        im = torchvision.utils.make_grid(inputs)
        writer.add_image("test im", im, global_step=step_n)

    test_loss = sum_loss * 1.0 / len(test_loader)
    test_correct = sum_correct * 100.0 / len(test_loader) / batch_size

    writer.add_scalar("test loss", test_loss, global_step=epoch + 1)
    writer.add_scalar("test correct",
                      test_correct, global_step=epoch + 1)

    print("epoch is", epoch + 1, "loss is:", test_loss,
          "test correct is:", test_correct)

writer.close()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

浅蓝的风

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值