机器学习学习记录5:基于时装(Fashion_mnist)数据集训练2层隐含层神经网络,输出acc,混淆矩阵可视化

本文介绍了一个使用PyTorch构建的神经网络模型来对Fashion-MNIST数据集进行分类的任务。模型采用全连接层并通过Dropout技术防止过拟合。文章详细记录了数据预处理、模型训练及评估的过程,并展示了训练损失与精度的变化趋势。
摘要由CSDN通过智能技术生成

题目是:

查找资料:

(1条消息) Python 绘制混淆矩阵函数 confusion_matrix_赫法格米的博客-CSDN博客

(1条消息) pytorch搭建神经网络分类Fashion-MNIST数据集_hxxjxw的博客-CSDN博客

(1条消息) pytorch卷积神经网络识别Fashion-MNIST(注释超详细)_xingS1992的博客-CSDN博客

花费几小时修改完成

代码如下:

# -*- coding:utf-8 -*-
"""
作者:zz123123
日期:2022年11月17日
"""

import torch  # 导入pytorch
from torch import nn, optim  # 导入神经网络与优化器对应的类
import torch.nn.functional as F
from torchvision import datasets, transforms  ## 导入数据集与数据预处理的方法
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import seaborn as sns
import copy
import time
import torch
from torch.optim import Adam
import torch.utils.data as Data
from torchvision import transforms
from torchvision.datasets import FashionMNIST

# 使用FashionMNIST数据,准备训练数据集
train_data = FashionMNIST(
    root='../data/FashionMNIST',  # 数据路径
    train=True,  # 只使用训练数据集
    transform=transforms.ToTensor(),
    download=True
)
# 定义一个数据加载器
train_loader = Data.DataLoader(
    dataset=train_data,  # 使用的数据集
    batch_size=64,  # 批处理大小
    shuffle=False,  # 每次迭代前不打乱数据
    num_workers=0,  # 使用两个进程
)
# 计算有多少个batch
# print("train_loader的batch数量为:",len(train_loader))
# 获得一个batch的数据
for step, (b_x, b_y) in enumerate(train_loader):
    if step > 0:
        break
    print(b_x.size(0))
# print(b_y.shape)
print(b_x.shape)
# 可视化一个batch图像
# batch_x = b_x.squeeze().numpy()
# batch_y = b_y.numpy()
class_label = train_data.classes
class_label[0] = "T-shirt"
# plt.figure(figsize=(12, 5))
# for ii in np.arange(len(batch_y)):
#     plt.subplot(4, 16, ii + 1)
#     plt.imshow(batch_x[ii, :, :], cmap=plt.cm.gray)
#     plt.title(class_label[batch_y[ii]], size=9)
#     plt.axis("off")
#     plt.subplots_adjust(wspace=0.05)
# 对测试集进行处理
test_data = FashionMNIST(
    root='../data/FashionMNIST',  # 数据路径
    train=False,  # 不使用训练数据集
    download=False
)
# 为数据添加一个通道维度,并且取值范围放缩到0-1之间
test_data_x = test_data.data.type(torch.FloatTensor) / 255.0
test_data_x = torch.unsqueeze(test_data_x, dim=1)
test_data_y = test_data.targets  # 测试集的标签
# print("test_data_x.shape:", test_data_x.shape)
# print("test_data_y.shape:", test_data_y.shape)


# 搭建神经网络
class Classifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(784, 300)
        self.fc2 = nn.Linear(300, 100)
        self.fc3 = nn.Linear(100, 10)

        # 构造Dropout方法,在每次训练过程中都随机“掐死”百分之二十的神经元,防止过拟合。
        self.dropout = nn.Dropout(p=0.2)

    def forward(self, x):
        # 确保输入的tensor是展开的单列数据,把每张图片的通道、长度、宽度三个维度都压缩为一列
        x = x.view(x.shape[0], -1)

        # 在训练过程中对隐含层神经元的正向推断使用Dropout方法
        x = self.dropout(F.relu(self.fc1(x)))
        x = self.dropout(F.relu(self.fc2(x)))

        # 在输出单元不需要使用Dropout方法
        x = F.log_softmax(self.fc3(x), dim=1)

        return x


# 输出网络结构
myconvnet = Classifier()


# print(myconvnet)

# 定义网络的训练过程函数
def train_model(model, traindataloader, train_rate, criterion, optimizer, num_epoch=0):
    """

    :param model: 网络模型
    :param traindataloader:训练数据集,切分为训练集和验证集
    :param train_rate: 训练集百分百
    :param criterion: 损失函数
    :param optimizer: 优化方法
    :param num_epoch: 训练轮数
    :return:
    """
    # 计算训练使用的batch数量
    batch_num = len(traindataloader)
    train_batch_num = round(batch_num * train_rate)  # 返回浮点数x的四舍五入值
    # 复制模型的参数
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    train_loss_all = []
    train_acc_all = []
    val_loss_all = []
    val_acc_all = []
    since = time.time()
    for epoch in range(num_epoch):
        print('Epoch {}/{}'.format(epoch, num_epoch - 1))
        print('-' * 10)
        # 每个epoch有两个训练阶段
        train_loss = 0.0
        train_correct = 0
        train_num = 0
        val_loss = 0.0
        val_correct = 0
        val_num = 0
        for step, (b_x, b_y) in enumerate(traindataloader):
            if step < train_batch_num:
                model.train()  # 设置模式为训练模式
                output = model(b_x)
                pre_lab = torch.argmax(output, 1)  # 返回指定维度最大值的序号
                loss = criterion(output, b_y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                train_loss += loss.item() * b_x.size(0)  # b_x.size(0):取出第一个维度的数字,这里是64;.item()获得张量中的元素值
                train_correct += torch.sum(pre_lab == b_y.data)  # 将预测值与标签值相等的数累加
                train_num += b_x.size(0)
            else:
                model.eval()  # 设置模式为评估模式
                output = model(b_x)
                pre_lab = torch.argmax(output, 1)
                loss = criterion(output, b_y)
                val_loss += loss.item() * b_x.size(0)
                val_correct += torch.sum(pre_lab == b_y.data)
                val_num += b_x.size(0)
        # 计算一个epoch在训练集和验证集上的损失和精度
        train_loss_all.append(train_loss / train_num)
        train_acc_all.append(train_correct.double().item() / train_num)
        val_loss_all.append(val_loss / val_num)
        val_acc_all.append(val_correct.double().item() / val_num)
        print('{} Train Loss:{:.4f} Train Acc:{:.4f}'.format(epoch, train_loss_all[-1], train_acc_all[-1]))  # -1表示获取最后一个元素
        print('{} Val Loss:{:.4f} Val Acc:{:.4f}'.format(epoch, val_loss_all[-1], val_acc_all[-1]))
        # 拷贝模型最高精度下的参数
        if val_acc_all[-1] > best_acc:
            best_acc = val_acc_all[-1]
            best_model_wts = copy.deepcopy(model.state_dict())
        time_use = time.time() - since
        print("Train and val complete in {:.0f}m {:.0f}s".format(time_use // 60, time_use % 60))
    # 使用最好模型的参数
    model.load_state_dict(best_model_wts)
    train_process = pd.DataFrame(
        data={"epoch": range(num_epoch),
              "train_loss_all": train_loss_all,
              "val_loss_all": val_loss_all,
              "train_acc_all": train_acc_all,
              "val_acc_all": val_acc_all})
    return model, train_process


# 对模型进行训练
optimizer = torch.optim.Adam(myconvnet.parameters(), lr=0.0003)
criterion = nn.CrossEntropyLoss()  # 损失函数
myconvnet, train_pross = train_model(myconvnet, train_loader, 0.8, criterion, optimizer, num_epoch=2) #num_epoch数值控制训练轮数(num_epoch-1)

plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(train_pross.epoch, train_pross.train_loss_all, "ro-", label="Train loss")
plt.plot(train_pross.epoch, train_pross.val_loss_all, "bs-", label="Val loss")
plt.legend()
plt.xlabel("epoch")
plt.ylabel("Loss")
plt.subplot(1, 2, 2)
plt.plot(train_pross.epoch, train_pross.train_acc_all, "ro-", label="Train acc")
plt.plot(train_pross.epoch, train_pross.val_acc_all, "bs-", label="Val acc")
plt.xlabel("epoch")
plt.ylabel("acc")
plt.legend()
plt.savefig("loss & acc")
plt.show()


# 对测试集进行预测,并可视化预测效果
myconvnet.eval()
output = myconvnet(test_data_x)
pre_lab = torch.argmax(output, 1)  # 返回预测值标签:torch.Size([64, 1, 28, 28])
acc = accuracy_score(test_data_y, pre_lab)  # 分类准确率分数是指所有分类正确的百分比
print("在测试集上的预测精度为:", acc)

# 计算混淆矩阵并可视化
def plot_confusion_matrix(y_true, y_pred, labels):
    import matplotlib.pyplot as plt
    from sklearn.metrics import confusion_matrix
    cmap = plt.cm.binary
    cm = confusion_matrix(y_true, y_pred)
    tick_marks = np.array(range(len(labels))) + 0.5
    np.set_printoptions(precision=2)
    cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    plt.figure(figsize=(10, 8), dpi=120)
    ind_array = np.arange(len(labels))
    x, y = np.meshgrid(ind_array, ind_array)
    intFlag = 0 # 标记在图片中对文字是整数型还是浮点型
    for x_val, y_val in zip(x.flatten(), y.flatten()):
        #

        if (intFlag):
            c = cm[y_val][x_val]
            plt.text(x_val, y_val, "%d" % (c,), color='red', fontsize=8, va='center', ha='center')

        else:
            c = cm_normalized[y_val][x_val]
            if (c > 0.01):
                #这里是绘制数字,可以对数字大小和颜色进行修改
                plt.text(x_val, y_val, "%0.2f" % (c,), color='red', fontsize=7, va='center', ha='center')
            else:
                plt.text(x_val, y_val, "%d" % (0,), color='red', fontsize=7, va='center', ha='center')
    if(intFlag):
        plt.imshow(cm, interpolation='nearest', cmap=cmap)
    else:
        plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)
    plt.gca().set_xticks(tick_marks, minor=True)
    plt.gca().set_yticks(tick_marks, minor=True)
    plt.gca().xaxis.set_ticks_position('none')
    plt.gca().yaxis.set_ticks_position('none')
    plt.grid(True, which='minor', linestyle='-')
    plt.gcf().subplots_adjust(bottom=0.15)
    plt.title('')
    plt.colorbar()
    xlocations = np.array(range(len(labels)))
    plt.xticks(xlocations, labels, rotation=90)
    plt.yticks(xlocations, labels)
    plt.ylabel('Index of True Classes')
    plt.xlabel('Index of Predict Classes')
    plt.savefig('confusion_matrix.jpg', dpi=300)
    plt.show()

a=plot_confusion_matrix(test_data_y, pre_lab, class_label)

结果是:

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值