pytorch 学习3(测试训练模型)

数据集准备

使用的是fashionMinst数据集

'''
数据集准备
'''
batch_size = 4    # 分批训练数据、每批数据量
DOWNLOAD_MNIST = False    # 是否网上下载数据

#数据准备 都是灰度图像 输入的数据的图像通道数为1
# FashionMNIST,我是保存在data文件夹
if not(os.path.exists('./data/FashionMNIST/')) or not os.listdir('./data/FashionMNIST/'):#判断mnist数据集是否已下载
    # not mnist dir or mnist is empyt dir
    DOWNLOAD_MNIST = True

train_dataset = datasets.FashionMNIST(
    root = './data',
    train= True,        #download train data
    transform = transforms.ToTensor(),
    download=DOWNLOAD_MNIST
)
test_dataset = datasets.FashionMNIST(
    root='./data',
    train=False,        #download test data False就表示下载测试集的数据
    transform=transforms.ToTensor(),
    download=DOWNLOAD_MNIST
)

#该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入
# 按照batch size封装成Tensor,后续只需要再包装成Variable即可作为模型的输入
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)    #shuffle 是否打乱加载数据
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

打印dataloader的信息和图片的方法

'''
打印的最后的内容
14999
torch.Size([4, 1, 28, 28]) 4- batchsize 1-channel 28*28-H*W
torch.Size([4]) batchsize
tensor([5, 3, 8, 9]) batch 个图像分别对应的lable
'''

'''
#显示dataloaer里的信息
for i,(img,target) in enumerate(train_loader):
    print(i)
    print(img.shape)
    print(target.shape)
    print(target)
'''




'''
dataloader本质上是一个可迭代对象
iter -- 迭代
显示一个batch的图像
'''

'''
def imshow(img, ):
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))


# 显示图片,将四张图片显示在一起了
plt.figure()
dataiter = iter(train_loader)
images, labels = dataiter.next()

imshow(torchvision.utils.make_grid(images))

plt.show()

'''
print(train_loader)
labels_map = {
    0: "T-Shirt",
    1: "Trouser",
    2: "Pullover",
    3: "Dress",
    4: "Coat",
    5: "Sandal",
    6: "Shirt",
    7: "Sneaker",
    8: "Bag",
    9: "Ankle Boot",
}

# 显示图像和label

dataiter = iter(train_loader)
images, labels = dataiter.next()

for i in range(batch_size):
    #显示图
    npimg = images[i].numpy()
    plt.subplot(1,batch_size,i+1)
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    label = labels_map[int(labels[i].numpy())]
    plt.title(label)

plt.show()

训练神经网络

Iter------一次迭代,是指一个min_batch的一次forward+backward

Epoch------迭代完所有的训练数据(1次),称为一个epoch

'''
训练过程
'''

# 开始训练
EPOCH = 20  # 迭代次数

for epoch in range(EPOCH):
    sum_loss = 0
    # 数据读取
    for i, data in enumerate(train_loader):
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)  # 有GPU则将数据置入GPU加速

        # 梯度清零
        optimizer.zero_grad()

        # 传递损失 + 更新参数
        output = net(inputs)
        loss = loss_fuc(output, labels)
        loss.backward()
        optimizer.step()

        # 每训练100个batch打印一次平均loss
        sum_loss += loss.item()
        if i % 100 == 99:
            print('[Epoch:%d, batch:%d] train loss: %.03f' % (epoch + 1, i + 1, sum_loss / 100))
            sum_loss = 0.0

    correct = 0
    total = 0

    for data in test_loader:
        test_inputs, labels = data
        test_inputs, labels = test_inputs.to(device), labels.to(device)
        outputs_test = net(test_inputs)
        _, predicted = torch.max(outputs_test.data, 1)  # 输出得分最高的类
        total += labels.size(0)  # 统计50个batch 图片的总个数
        correct += (predicted == labels).sum()  # 统计50个batch 正确分类的个数

    print('第{}个epoch的识别准确率为:{}%'.format(epoch + 1, 100 * correct.item() / total))



#模型保存
# --------保存模型-----------
torch.save(net, './model/LeNet.pth')    # 保存整个模型,体积比较大

训练部分整体代码

import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import os
import torchvision
from torch import optim
import matplotlib.pyplot as plt
import cv2 as cv

import numpy as np
'''
数据集准备
'''
batch_size = 64   # 分批训练数据、每批数据量
DOWNLOAD_MNIST = False    # 是否网上下载数据

#数据准备 都是灰度图像 输入的数据的图像通道数为1
# FashionMNIST,我是保存在data文件夹
if not(os.path.exists('./data/FashionMNIST/')) or not os.listdir('./data/FashionMNIST/'):#判断mnist数据集是否已下载
    # not mnist dir or mnist is empyt dir
    DOWNLOAD_MNIST = True

train_dataset = datasets.FashionMNIST(
    root = './data',
    train= True,        #download train data
    transform = transforms.ToTensor(),
    download=DOWNLOAD_MNIST
)
test_dataset = datasets.FashionMNIST(
    root='./data',
    train=False,        #download test data False就表示下载测试集的数据
    transform=transforms.ToTensor(),
    download=DOWNLOAD_MNIST
)

#该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入
# 按照batch size封装成Tensor,后续只需要再包装成Variable即可作为模型的输入
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)    #shuffle 是否打乱加载数据
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


'''
神经网络设计和修改
之前用LeNet-5, 要进行一定修改
如果输入的是灰度图 即 输入的channel = 1
彩图 channel = 3 ,就要修改输入
'''


class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        # 搭建卷积层C1 和 池化层 S2
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        )
        # 搭建卷积层C3 和 池化层 S4
        self.conv2 = nn.Sequential(
            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        )
        # 搭建全连接层C5 全连接层F6 输出层
        self.fc = nn.Sequential(
            nn.Linear(16 * 5 * 5, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10)
        )

    # 设置网络前向传播,按顺序
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)  # 全连接层均使用的nn.Linear()线性结构,输入输出维度均为一维,故需要把数据拉为一维
        x = self.fc(x)

        return x


net = LeNet()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 若检测到GPU环境则使用GPU,否则使用CPU
net = LeNet().to(device)  # 实例化网络,有GPU则将网络放入GPU加速

'''
误差和优化
'''
loss_fuc = nn.CrossEntropyLoss()  # 多分类问题,选择交叉熵损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)  # 选择SGD,学习率取0.001

'''
训练过程
'''

# 开始训练
EPOCH = 2  # 迭代次数

for epoch in range(EPOCH):
    sum_loss = 0
    # 数据读取
    for i, data in enumerate(train_loader):
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)  # 有GPU则将数据置入GPU加速

        # 梯度清零
        optimizer.zero_grad()

        # 传递损失 + 更新参数
        output = net(inputs)
        loss = loss_fuc(output, labels)
        loss.backward()
        optimizer.step()

        # 每训练100个batch打印一次平均loss
        sum_loss += loss.item()
        if i % 100 == 99:
            print('[Epoch:%d, batch:%d] train loss: %.03f' % (epoch + 1, i + 1, sum_loss / 100))
            sum_loss = 0.0

    correct = 0
    total = 0

    for data in test_loader:
        test_inputs, labels = data
        test_inputs, labels = test_inputs.to(device), labels.to(device)
        outputs_test = net(test_inputs)
        _, predicted = torch.max(outputs_test.data, 1)  # 输出得分最高的类
        total += labels.size(0)  # 统计50个batch 图片的总个数
        correct += (predicted == labels).sum()  # 统计50个batch 正确分类的个数

    print('第{}个epoch的识别准确率为:{}%'.format(epoch + 1, 100 * correct.item() / total))



#模型保存
# --------保存模型-----------
torch.save(net, './model/LeNet.pth')    # 保存整个模型,体积比较大

测试神经网络

测试结果

测试的整体代码

import numpy as np
import torch
import cv2 as cv
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
from pytorchlearn import net



'''
从dataset 随机抽取四个,进行test
'''
print('start predict')

training_data = datasets.FashionMNIST(
    root="data",
    train=True,
    download=True,
    transform=ToTensor()
)

test_data = datasets.FashionMNIST(
    root="data",
    train=False,
    download=True,
    transform=ToTensor()
)

'''
We can index Datasets manually like a list: training_data[index].
We use matplotlib to visualize some samples in our training data.
'''
labels_map = {
    0: "T-Shirt",
    1: "Trouser",
    2: "Pullover",
    3: "Dress",
    4: "Coat",
    5: "Sandal",
    6: "Shirt",
    7: "Sneaker",
    8: "Bag",
    9: "Ankle Boot",
}


test_num = 4
test_imgs = [] #用于模型测试
test_labels = []
#随机从训练集中的图片中抽取部分进行显示

for i in range(test_num):
    sample_idx = torch.randint(len(training_data), size=(1,)).item()
    img, label = training_data[sample_idx]
    test_imgs.append(img)

    test_labels.append(labels_map[label])


#将图片数据在 转换成 tensor
def model_test(img):
    img_np = np.array(img)
    img_tensor = torch.from_numpy(img_np)

    img_tensor = img_tensor.view(1,1,28,28) #batch * channel * h * w
    img_tensor = img_tensor.to(device)

    out = model(img_tensor)
    _, pred = torch.max(out, 1)

    print('预测为:数字{}。'.format(pred))
    return pred


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.load('./model/LeNet.pth') #加载模型
model = model.to(device)
model.eval()    #把模型转为test模式




test_res = []
for i in range(test_num):
    img = test_imgs[i]
    n_cpu = model_test(img).cpu()
    l = labels_map[int(n_cpu.numpy())]
    res = test_res.append(l)


    test_res.append(res)

print('测试结果',test_res)
print('实际结果',test_labels)

 

  • 0
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值