pytorch实现VGG16

运行代码:

import torch
from torch import optim
import torchvision
import matplotlib.pyplot as plt
import numpy as np
from torchvision.utils import make_grid
import time

# 配置参数
DOWNLOAD_CIFAR = True
batch_size = 32  # 每次喂入的数据量
lr = 0.01  # 学习率
step_size = 10  # 每n个epoch更新一次学习率
epoch_num = 50  # 总迭代次数
num_print = int(50000//batch_size//4)  #每n次batch打印一次

# cifar10训练数据加载
train_data = torchvision.datasets.CIFAR10(
    root='.',  # 保存或者提取位置
    train=True,  # this is training data
    transform=torchvision.transforms.ToTensor(),  # 转换 PIL.Image or numpy.ndarray 成 torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间
    download=DOWNLOAD_CIFAR,  # 没下载就下载, 下载了就不用再下了
)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                          shuffle=True)
# cifar10测试数据加载
test_data = torchvision.datasets.CIFAR10(
    root='.',  # 保存或者提取位置
    train=False,  # this is test data
    transform=torchvision.transforms.ToTensor(),  # 转换 PIL.Image or numpy.ndarray 成 torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间
    download=DOWNLOAD_CIFAR,  # 没下载就下载, 下载了就不用再下了
)
test_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
                                          shuffle=False)

# 按batch_size 打印出dataset里面一部分images和label
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

def image_show(img):
    img = img / 2 + 0.5
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    # plt.show()


def label_show(loader):
    global classes
    dataiter = iter(loader)  # 迭代遍历图片
    images, labels = dataiter.__next__()
    image_show(make_grid(images))
    print(' '.join('%5s' % classes[labels[j]] for j in range(batch_size)))
    return images, labels


label_show(train_loader)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# from .Vgg16_Net import *
import Vgg_Net
from torch import nn
model = Vgg_Net.Vgg16Net().to(device)

# 损失函数
criterion = nn.CrossEntropyLoss()
# SGD优化器
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8, weight_decay=0.001)
# 动态调整学习率
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.5, last_epoch=-1)

# 训练

loss_list = []
start = time.time()

for epoch in range(epoch_num):
    running_loss = 0.0
    for i, (inputs, labels) in enumerate(train_loader, 0):
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()  # 将梯度初始化为零
        outputs = model(inputs)  # 前向传播求出预测的值
        loss = criterion(outputs, labels).to(device)  # 求loss,对应loss += (label[k] - h) * (label[k] - h) / 2

        loss.backward()  # 反向传播求梯度
        optimizer.step()  # 更新所有参数

        running_loss += loss.item()
        loss_list.append(loss.item())
        if i % num_print == num_print - 1:
            print('[%d epoch, %d] loss: %.6f' % (epoch + 1, i + 1, running_loss / num_print))
            running_loss = 0.0
    lr_1 = optimizer.param_groups[0]['lr']
    print('learn_rate : %.15f' % lr_1)
    scheduler.step()

end = time.time()
print('time:{}'.format(end-start))

torch.save(model, './model.pkl')   #保存模型
model = torch.load('./model.pkl')  #加载模型

# test
model.eval()
correct = 0.0
total = 0
with torch.no_grad():  # 测试集不需要反向传播
    for inputs, labels in test_loader:
        inputs, labels = inputs.to(device), labels.to(device) # 将输入和目标在每一步都送入GPU
        outputs = model(inputs)
        pred = outputs.argmax(dim=1)  # 返回每一行中最大值元素索引
        total += inputs.size(0)
        correct += torch.eq(pred,labels).sum().item()
print('Accuracy of the network on the 10000 test images: %.2f %%' % (100.0 * correct / total))

class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
for inputs, labels in test_loader:
    inputs, labels = inputs.to(device), labels.to(device)
    outputs = model(inputs)
    pred = outputs.argmax(dim=1)  # 返回每一行中最大值元素索引
    c = (pred == labels.to(device)).squeeze()
    for i in range(4):
        label = labels[i]
        class_correct[label] += float(c[i])
        class_total[label] += 1
#每个类的ACC
for i in range(10):
    print('Accuracy of %5s : %.2f %%' % (classes[i], 100 * class_correct[i] / class_total[i]))

VGG网络:

from torch import nn


class Vgg16Net(nn.Module):
    def __init__(self):
        super(Vgg16Net, self).__init__()

        # 第一层,2个卷积层和一个最大池化层
        self.layer1 = nn.Sequential(
            # 输入3通道,卷积核3*3,输出64通道(如32*32*3的样本图片,(32+2*1-3)/1+1=32,输出32*32*64)
            nn.Conv2d(3, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            # 输入64通道,卷积核3*3,输出64通道(输入32*32*64,卷积3*3*64*64,输出32*32*64)
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            # 输入32*32*64,输出16*16*64
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第二层,2个卷积层和一个最大池化层
        self.layer2 = nn.Sequential(
            # 输入64通道,卷积核3*3,输出128通道(输入16*16*64,卷积3*3*64*128,输出16*16*128)
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            # 输入128通道,卷积核3*3,输出128通道(输入16*16*128,卷积3*3*128*128,输出16*16*128)
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            # 输入16*16*128,输出8*8*128
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第三层,3个卷积层和一个最大池化层
        self.layer3 = nn.Sequential(
            # 输入128通道,卷积核3*3,输出256通道(输入8*8*128,卷积3*3*128*256,输出8*8*256)
            nn.Conv2d(128, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            # 输入256通道,卷积核3*3,输出256通道(输入8*8*256,卷积3*3*256*256,输出8*8*256)
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            # 输入256通道,卷积核3*3,输出256通道(输入8*8*256,卷积3*3*256*256,输出8*8*256)
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            # 输入8*8*256,输出4*4*256
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第四层,3个卷积层和1个最大池化层
        self.layer4 = nn.Sequential(
            # 输入256通道,卷积3*3,输出512通道(输入4*4*256,卷积3*3*256*512,输出4*4*512)
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入4*4*512,卷积3*3*512*512,输出4*4*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入4*4*512,卷积3*3*512*512,输出4*4*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入4*4*512,输出2*2*512
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第五层,3个卷积层和1个最大池化层
        self.layer5 = nn.Sequential(
            # 输入512通道,卷积3*3,输出512通道(输入2*2*512,卷积3*3*512*512,输出2*2*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入2*2*512,卷积3*3*512*512,输出2*2*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入2*2*512,卷积3*3*512*512,输出2*2*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入2*2*512,输出1*1*512
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.conv_layer = nn.Sequential(
            self.layer1,
            self.layer2,
            self.layer3,
            self.layer4,
            self.layer5
        )

        self.fc = nn.Sequential(
            nn.Linear(512, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),

            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),

            nn.Linear(4096, 1000)
        )

    def forward(self, x):
        x = self.conv_layer(x)
        x = x.view(-1, 512)
        x = self.fc(x)
        return x

打印输出:

D:\ProgramData\Anaconda3\python.exe D:/AI/project/ai-exercise/master-course/lesson1/task.py
Files already downloaded and verified
Files already downloaded and verified
  car  deer  frog plane horse  frog   car truck   cat   dog   cat truck truck  frog  deer truck   cat horse plane plane   car  frog  ship   cat  bird   car  frog   dog   dog   dog   dog truck
[1 epoch, 390] loss: 1.985215
[1 epoch, 780] loss: 1.453820
[1 epoch, 1170] loss: 1.229948
[1 epoch, 1560] loss: 1.096986
learn_rate : 0.010000000000000
[2 epoch, 390] loss: 0.975227
[2 epoch, 780] loss: 0.912646
[2 epoch, 1170] loss: 0.866929
[2 epoch, 1560] loss: 0.806879
learn_rate : 0.010000000000000
[3 epoch, 390] loss: 0.715630
[3 epoch, 780] loss: 0.722076
[3 epoch, 1170] loss: 0.686348
[3 epoch, 1560] loss: 0.685083
learn_rate : 0.010000000000000
[4 epoch, 390] loss: 0.567158
[4 epoch, 780] loss: 0.595756
[4 epoch, 1170] loss: 0.585781
[4 epoch, 1560] loss: 0.560971
learn_rate : 0.010000000000000
[5 epoch, 390] loss: 0.486664
[5 epoch, 780] loss: 0.488241
[5 epoch, 1170] loss: 0.504317
[5 epoch, 1560] loss: 0.479987
learn_rate : 0.010000000000000
[6 epoch, 390] loss: 0.400102
[6 epoch, 780] loss: 0.439239
[6 epoch, 1170] loss: 0.431584
[6 epoch, 1560] loss: 0.421430
learn_rate : 0.010000000000000
[7 epoch, 390] loss: 0.340930
[7 epoch, 780] loss: 0.370600
[7 epoch, 1170] loss: 0.387662
[7 epoch, 1560] loss: 0.374457
learn_rate : 0.010000000000000
[8 epoch, 390] loss: 0.298145
[8 epoch, 780] loss: 0.331686
[8 epoch, 1170] loss: 0.340236
[8 epoch, 1560] loss: 0.346394
learn_rate : 0.010000000000000
[9 epoch, 390] loss: 0.264006
[9 epoch, 780] loss: 0.288339
[9 epoch, 1170] loss: 0.312426
[9 epoch, 1560] loss: 0.293194
learn_rate : 0.010000000000000
[10 epoch, 390] loss: 0.232906
[10 epoch, 780] loss: 0.250507
[10 epoch, 1170] loss: 0.267439
[10 epoch, 1560] loss: 0.289119
learn_rate : 0.010000000000000
[11 epoch, 390] loss: 0.116728
[11 epoch, 780] loss: 0.109108
[11 epoch, 1170] loss: 0.117013
[11 epoch, 1560] loss: 0.114358
learn_rate : 0.005000000000000
[12 epoch, 390] loss: 0.068485
[12 epoch, 780] loss: 0.084366
[12 epoch, 1170] loss: 0.093366
[12 epoch, 1560] loss: 0.081501
learn_rate : 0.005000000000000
[13 epoch, 390] loss: 0.060922
[13 epoch, 780] loss: 0.069104
[13 epoch, 1170] loss: 0.084547
[13 epoch, 1560] loss: 0.091449
learn_rate : 0.005000000000000
[14 epoch, 390] loss: 0.052378
[14 epoch, 780] loss: 0.075047
[14 epoch, 1170] loss: 0.077396
[14 epoch, 1560] loss: 0.086952
learn_rate : 0.005000000000000
[15 epoch, 390] loss: 0.068118
[15 epoch, 780] loss: 0.077779
[15 epoch, 1170] loss: 0.080313
[15 epoch, 1560] loss: 0.076551
learn_rate : 0.005000000000000
[16 epoch, 390] loss: 0.054217
[16 epoch, 780] loss: 0.061671
[16 epoch, 1170] loss: 0.082457
[16 epoch, 1560] loss: 0.078222
learn_rate : 0.005000000000000
[17 epoch, 390] loss: 0.062331
[17 epoch, 780] loss: 0.073677
[17 epoch, 1170] loss: 0.078512
[17 epoch, 1560] loss: 0.084837
learn_rate : 0.005000000000000
[18 epoch, 390] loss: 0.059143
[18 epoch, 780] loss: 0.058793
[18 epoch, 1170] loss: 0.085597
[18 epoch, 1560] loss: 0.081460
learn_rate : 0.005000000000000
[19 epoch, 390] loss: 0.064989
[19 epoch, 780] loss: 0.074067
[19 epoch, 1170] loss: 0.080935
[19 epoch, 1560] loss: 0.077622
learn_rate : 0.005000000000000
[20 epoch, 390] loss: 0.050155
[20 epoch, 780] loss: 0.077470
[20 epoch, 1170] loss: 0.070547
[20 epoch, 1560] loss: 0.078990
learn_rate : 0.005000000000000
[21 epoch, 390] loss: 0.024611
[21 epoch, 780] loss: 0.021931
[21 epoch, 1170] loss: 0.016538
[21 epoch, 1560] loss: 0.015990
learn_rate : 0.002500000000000
[22 epoch, 390] loss: 0.006190
[22 epoch, 780] loss: 0.006148
[22 epoch, 1170] loss: 0.005756
[22 epoch, 1560] loss: 0.006406
learn_rate : 0.002500000000000
[23 epoch, 390] loss: 0.004508
[23 epoch, 780] loss: 0.003644
[23 epoch, 1170] loss: 0.005463
[23 epoch, 1560] loss: 0.003931
learn_rate : 0.002500000000000
[24 epoch, 390] loss: 0.001957
[24 epoch, 780] loss: 0.002889
[24 epoch, 1170] loss: 0.002510
[24 epoch, 1560] loss: 0.003367
learn_rate : 0.002500000000000
[25 epoch, 390] loss: 0.002821
[25 epoch, 780] loss: 0.002100
[25 epoch, 1170] loss: 0.002370
[25 epoch, 1560] loss: 0.001956
learn_rate : 0.002500000000000
[26 epoch, 390] loss: 0.001972
[26 epoch, 780] loss: 0.001821
[26 epoch, 1170] loss: 0.001654
[26 epoch, 1560] loss: 0.001793
learn_rate : 0.002500000000000
[27 epoch, 390] loss: 0.001415
[27 epoch, 780] loss: 0.001304
[27 epoch, 1170] loss: 0.001343
[27 epoch, 1560] loss: 0.001251
learn_rate : 0.002500000000000
[28 epoch, 390] loss: 0.001324
[28 epoch, 780] loss: 0.001670
[28 epoch, 1170] loss: 0.001529
[28 epoch, 1560] loss: 0.001303
learn_rate : 0.002500000000000
[29 epoch, 390] loss: 0.001319
[29 epoch, 780] loss: 0.001360
[29 epoch, 1170] loss: 0.001324
[29 epoch, 1560] loss: 0.001476
learn_rate : 0.002500000000000
[30 epoch, 390] loss: 0.001416
[30 epoch, 780] loss: 0.001365
[30 epoch, 1170] loss: 0.001347
[30 epoch, 1560] loss: 0.001377
learn_rate : 0.002500000000000
[31 epoch, 390] loss: 0.001329
[31 epoch, 780] loss: 0.001348
[31 epoch, 1170] loss: 0.001365
[31 epoch, 1560] loss: 0.001208
learn_rate : 0.001250000000000
[32 epoch, 390] loss: 0.001314
[32 epoch, 780] loss: 0.001317
[32 epoch, 1170] loss: 0.001311
[32 epoch, 1560] loss: 0.001339
learn_rate : 0.001250000000000
[33 epoch, 390] loss: 0.001267
[33 epoch, 780] loss: 0.001353
[33 epoch, 1170] loss: 0.001362
[33 epoch, 1560] loss: 0.001357
learn_rate : 0.001250000000000
[34 epoch, 390] loss: 0.001339
[34 epoch, 780] loss: 0.001336
[34 epoch, 1170] loss: 0.001266
[34 epoch, 1560] loss: 0.001353
learn_rate : 0.001250000000000
[35 epoch, 390] loss: 0.001402
[35 epoch, 780] loss: 0.001398
[35 epoch, 1170] loss: 0.001336
[35 epoch, 1560] loss: 0.001349
learn_rate : 0.001250000000000
[36 epoch, 390] loss: 0.001335
[36 epoch, 780] loss: 0.001345
[36 epoch, 1170] loss: 0.001280
[36 epoch, 1560] loss: 0.001330
learn_rate : 0.001250000000000
[37 epoch, 390] loss: 0.001357
[37 epoch, 780] loss: 0.001423
[37 epoch, 1170] loss: 0.001455
[37 epoch, 1560] loss: 0.001396
learn_rate : 0.001250000000000
[38 epoch, 390] loss: 0.001415
[38 epoch, 780] loss: 0.001367
[38 epoch, 1170] loss: 0.001356
[38 epoch, 1560] loss: 0.001424
learn_rate : 0.001250000000000
[39 epoch, 390] loss: 0.001386
[39 epoch, 780] loss: 0.001376
[39 epoch, 1170] loss: 0.001565
[39 epoch, 1560] loss: 0.001400
learn_rate : 0.001250000000000
[40 epoch, 390] loss: 0.001456
[40 epoch, 780] loss: 0.001517
[40 epoch, 1170] loss: 0.001475
[40 epoch, 1560] loss: 0.001446
learn_rate : 0.001250000000000
[41 epoch, 390] loss: 0.001397
[41 epoch, 780] loss: 0.001370
[41 epoch, 1170] loss: 0.001521
[41 epoch, 1560] loss: 0.001469
learn_rate : 0.000625000000000
[42 epoch, 390] loss: 0.001391
[42 epoch, 780] loss: 0.001394
[42 epoch, 1170] loss: 0.001474
[42 epoch, 1560] loss: 0.001485
learn_rate : 0.000625000000000
[43 epoch, 390] loss: 0.001462
[43 epoch, 780] loss: 0.001494
[43 epoch, 1170] loss: 0.001457
[43 epoch, 1560] loss: 0.001425
learn_rate : 0.000625000000000
[44 epoch, 390] loss: 0.001500
[44 epoch, 780] loss: 0.001392
[44 epoch, 1170] loss: 0.001401
[44 epoch, 1560] loss: 0.001446
learn_rate : 0.000625000000000
[45 epoch, 390] loss: 0.001356
[45 epoch, 780] loss: 0.001477
[45 epoch, 1170] loss: 0.001432
[45 epoch, 1560] loss: 0.001451
learn_rate : 0.000625000000000
[46 epoch, 390] loss: 0.001403
[46 epoch, 780] loss: 0.001589
[46 epoch, 1170] loss: 0.001484
[46 epoch, 1560] loss: 0.001434
learn_rate : 0.000625000000000
[47 epoch, 390] loss: 0.001391
[47 epoch, 780] loss: 0.001384
[47 epoch, 1170] loss: 0.001446
[47 epoch, 1560] loss: 0.001438
learn_rate : 0.000625000000000
[48 epoch, 390] loss: 0.001429
[48 epoch, 780] loss: 0.001419
[48 epoch, 1170] loss: 0.001464
[48 epoch, 1560] loss: 0.001455
learn_rate : 0.000625000000000
[49 epoch, 390] loss: 0.001438
[49 epoch, 780] loss: 0.001465
[49 epoch, 1170] loss: 0.001423
[49 epoch, 1560] loss: 0.001485
learn_rate : 0.000625000000000
[50 epoch, 390] loss: 0.001482
[50 epoch, 780] loss: 0.001485
[50 epoch, 1170] loss: 0.001466
[50 epoch, 1560] loss: 0.001451
learn_rate : 0.000625000000000
time:5329.099116802216
Accuracy of the network on the 10000 test images: 100.00 %
Accuracy of plane : 100.00 %
Accuracy of   car : 100.00 %
Accuracy of  bird : 100.00 %
Accuracy of   cat : 100.00 %
Accuracy of  deer : 100.00 %
Accuracy of   dog : 100.00 %
Accuracy of  frog : 100.00 %
Accuracy of horse : 100.00 %
Accuracy of  ship : 100.00 %
Accuracy of truck : 100.00 %

Process finished with exit code 0

  • 2
    点赞
  • 42
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值