Pytorch-Fashion MNIST数据集卷积神经网络实例代码程序 训练预测准确率94%_fashionmnist数据集训练(1)

img
img

既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上物联网嵌入式知识点,真正体系化!

由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、电子书籍、讲解视频,并且后续会持续更新

需要这些体系化资料的朋友,可以加我V获取:vip1024c (备注嵌入式)

如果你需要这些资料,可以戳这里获取

import os
from tqdm import tqdm
from time import time
os.environ[“KMP_DUPLICATE_LIB_OK”]=“TRUE”
from IPython import display

import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams[‘font.sans-serif’] = [‘SimHei’]
from sklearn.metrics import confusion_matrix

import torchvision
import torch.utils.data.dataloader as loader
import torch.utils.data as Dataset
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
from torchvision import models


2、数据集下载以及数据增强



对训练集进行数据增强并通过ToTensor实例将图像数据从PIL类型变换tensor类型

transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))])

transform1 = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))])

mnist_train = torchvision.datasets.FashionMNIST(
root=“data”, train=True, transform=transform, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root=“data”, train=False, transform=transform1, download=True)

BATCH_SIZE = 100
trainloader =loader.DataLoader(mnist_train,batch_size = BATCH_SIZE,shuffle = True)
testloader =loader.DataLoader(mnist_test,batch_size = BATCH_SIZE,shuffle = False)


二、观察数据集图片:



“”“返回Fashion-MNIST数据集的文本标签。”“”
labels = [‘t-shirt’, ‘trouser’, ‘pullover’, ‘dress’, ‘coat’,
‘sandal’, ‘shirt’, ‘sneaker’, ‘bag’, ‘ankle boot’]

def show_images(imgs, num_rows, num_cols,targets,labels=None, scale=1.5):
“”“Plot a list of images.”“”
figsize = (num_cols * scale, num_rows * scale)
_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for ax, img,target in zip(axes, imgs,targets):
if torch.is_tensor(img):
# 图片张量
ax.imshow(img.numpy())
else:
# PIL
ax.imshow(img)
# 设置坐标轴不可见
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.subplots_adjust(hspace = 0.35)
if labels:
ax.set_title(‘{}-’.format(target)+labels[target])
return axes

将dataloader转换成迭代器才可以使用next方法

X, y = next(iter(data.DataLoader(mnist_train, batch_size=24,shuffle = True)))
show_images(X.reshape(24, 28, 28), 3, 8, labels=labels, targets = y)


![](https://img-blog.csdnimg.cn/8c7a67de9395494da4009f7792983c39.png?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBA5bCP5paH5aSn5pWw5o2u,size_20,color_FFFFFF,t_70,g_se,x_16)


三、建立模型


1、模型一:三层卷积加两层全连接,使用dropout层



class Net(nn.Module):
def init(self):
super(Net,self).init()
self.conv = nn.Sequential(
nn.Conv2d(1,40,2),
nn.ReLU(),
nn.MaxPool2d(2,1),
nn.Conv2d(40,80,2),
nn.ReLU(),
nn.MaxPool2d(2,1),
nn.Conv2d(80,160,3,padding = 1),
nn.ReLU(),
nn.Dropout(p = 0.5),
nn.MaxPool2d(3,3),)

    self.classifier = nn.Sequential(
    nn.Linear(160*8*8,200),
    nn.ReLU(),

nn.Linear(120,84),

nn.ReLU(),

nn.Linear(84,42),

nn.ReLU(),

    nn.Dropout(p = 0.5),
    nn.Linear(200,10))
    
    
def forward(self,x):
    x = self.conv(x)
    x = x.view(x.size(0),-1)
    x = self.classifier(x)

    return x

2、模型二:参考vgg模型使用两个vgg块和两个全连接,使用批标准化



训练30个epochs后测试集准确率高达93.8%

class Net(nn.Module):
def init(self):
super(Net,self).init()
self.conv1 = nn.Conv2d(1,128,1,padding=1)
self.conv2 = nn.Conv2d(128,128,3,padding=1)
self.pool1 = nn.MaxPool2d(2, 2)
self.bn1 = nn.BatchNorm2d(128)
self.relu1 = nn.ReLU()

    self.conv3 = nn.Conv2d(128,256,3,padding=1)
    self.conv4 = nn.Conv2d(256, 256, 3,padding=1)
    self.pool2 = nn.MaxPool2d(2, 2, padding=1)
    self.bn2 = nn.BatchNorm2d(256)
    self.relu2 = nn.ReLU()

    self.fc5 = nn.Linear(256*8*8,512)
    self.drop1 = nn.Dropout2d()
    self.fc6 = nn.Linear(512,10)


def forward(self,x):
    x = self.conv1(x)
    x = self.conv2(x)
    x = self.pool1(x)
    x = self.bn1(x)
    x = self.relu1(x)


    x = self.conv3(x)
    x = self.conv4(x)
    x = self.pool2(x)
    x = self.bn2(x)
    x = self.relu2(x)

    #print(" x shape ",x.size())
    x = x.view(-1,256*8*8)
    x = F.relu(self.fc5(x))
    x = self.drop1(x)
    x = self.fc6(x)
    return x

四、训练前准备:


1、模型函数初始化



net =Net() # 模型初始化
loss = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.Adam(net.parameters(),lr = 0.001) # 随机梯度下降优化算法



xavier初始化

def init_xavier(model):
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain(‘relu’))

凯明初始化

def init_kaiming(model):
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(m.weight, mode=‘fan_out’, nonlinearity=‘relu’)

init_kaiming(net)


2、使用GPU(无则自动使用CPU)



“”“使用GPU”“”
def use_gpu(net):
device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)
net.to(device)
gpu_nums = torch.cuda.device_count()
if gpu_nums > 1:
print(“Let’s use”, gpu_nums, “GPUs”)
net = nn.DataParallel(net)
elif gpu_nums == 1:
print(“Let’s use GPU”)
else:
print(“Let’s use CPU”)
return device


3、编写模型训练程序辅助函数


(1)可视化训练效果动画函数



class Animator: #@save
“”“在动画中绘制数据。”“”
def init(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale=‘linear’, yscale=‘linear’,
fmts=(‘-’, ‘m–’, ‘g-.’, ‘r:’), nrows=1, ncols=1,
figsize=(5, 3.5)):
# 增量地绘制多条线
if legend is None:
legend = []

    # 使用矢量图
    display.set_matplotlib_formats('svg')
    self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
    if nrows * ncols == 1:
        self.axes = [self.axes, ]
    # 使用lambda函数捕获、保存参数
    self.config_axes = lambda: self.set_axes(
        self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
    self.X, self.Y, self.fmts = None, None, fmts

def add(self, x, *y):
    # 向图表中添加多个数据点
    n = len(y)
    x = [x] * n
    if not self.X:
        self.X = [[] for _ in range(n)]
    if not self.Y:
        self.Y = [[] for _ in range(n)]
    for i, (a, b) in enumerate(zip(x, y)):
        if a is not None and b is not None:
            self.X[i].append(a)
            self.Y[i].append(b)
            
    self.axes[0].cla() # 清除当前活动的axes
    for x, y, fmt in zip(self.X, self.Y, self.fmts):
        self.axes[0].plot(x, y, fmt,linewidth = 2)
        
    self.axes[0].set_yticks(ticks = np.linspace(0,1,11))
    self.config_axes()
    display.display(self.fig)
    # 清除输出,使重画的图在原位置输出,形成动图效果
    display.clear_output(wait=True)  

def set_axes(self,axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
    # 设置matplotlib的轴。\

    axes.grid(True)
    axes.set_title("gaojianwen")
    axes.set_xlabel(xlabel)
    axes.set_ylabel(ylabel)
    axes.set_xscale(xscale)
    axes.set_yscale(yscale)
    axes.set_xlim(xlim)
    axes.set_ylim(ylim)
    if legend:
        axes.legend(legend)

(2)累加器(存储中间数据,如准确率等)



class Accumulator:
“”“定义累加器”“”
def init(self, n):
self.data = [0.0 ] * n

def add(self, *args):
    # 累加
    self.data = [a + float(b) for a, b in zip(self.data, args)]
    
def reset(self):
    # 重置累加器
    self.data = [0.0] * len(self.data)
    
def __getitem__(self,index):
    return  self.data[index]



**收集整理了一份《2024年最新物联网嵌入式全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升的朋友。**
![img](https://img-blog.csdnimg.cn/img_convert/6aca8991caf2a969e5387ae6d42b3ffb.png)
![img](https://img-blog.csdnimg.cn/img_convert/21b799bb216862c20b96d13b54a813c4.png)

**[如果你需要这些资料,可以戳这里获取](https://bbs.csdn.net/topics/618679757)**

**需要这些体系化资料的朋友,可以加我V获取:vip1024c (备注嵌入式)**

**一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人**

**都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!**

网嵌入式全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升的朋友。**
[外链图片转存中...(img-vBMRsb6a-1715894981327)]
[外链图片转存中...(img-xPRJXvAC-1715894981327)]

**[如果你需要这些资料,可以戳这里获取](https://bbs.csdn.net/topics/618679757)**

**需要这些体系化资料的朋友,可以加我V获取:vip1024c (备注嵌入式)**

**一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人**

**都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!**

  • 24
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,我可以为您提供一份基于PyTorch实现LeNet-5模型的代码,并使用FashionMNIST数据集进行训练。 首先,您需要安装PyTorch和torchvision库,以便使用LeNet-5模型和FashionMNIST数据集。您可以使用以下命令在终端中安装它们: ``` pip install torch pip install torchvision ``` 然后,您可以使用以下代码来定义LeNet-5模型: ```python import torch.nn as nn import torch.nn.functional as F class LeNet5(nn.Module): def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, kernel_size=5, stride=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 16 * 4 * 4) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x ``` 这个LeNet-5模型定义了两个卷积层和三个全连接层。在forward()函数中,我们使用ReLU激活函数和max pooling操作来提取特征,并将特征传递到全连接层中进行分类。 接下来,您可以使用以下代码来加载FashionMNIST数据集并进行训练: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms import torchvision.datasets as datasets # 定义一些超参数 batch_size = 64 learning_rate = 0.01 num_epochs = 10 # 加载FashionMNIST数据集 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) train_dataset = datasets.FashionMNIST(root='./data', train=True, transform=transform, download=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) # 实例化LeNet-5模型和损失函数 model = LeNet5() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=learning_rate) # 训练模型 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): # 前向传播和反向传播 outputs = model(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() # 每100步打印一次日志 if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, len(train_loader), loss.item())) ``` 在这个训练循环中,我们首先使用SGD优化器和交叉熵损失函数实例化了LeNet-5模型。然后,我们将FashionMNIST数据集加载到train_loader中,并使用train_loader在每个epoch中进行训练。对于每个batch,我们首先执行前向传播,计算输出和损失,然后执行反向传播并更新模型参数。最后,我们在每个epoch的日志中记录损失值。 希望这个代码对您有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值