LeNet5的pytorch以及tensorflow实现

模型架构

#readdata.py

import numpy as np
import os
import cv2
import random

def readdata_all(path,train_num,test_num,use_num):


    # if load_tip:
    #     train_imag = (np.load('train_imag.npy', allow_pickle=True)).tolist()
    #     train_label = (np.load('train_label.npy', allow_pickle=True)).tolist()
    #     test_imag = (np.load('test_imag.npy', allow_pickle=True)).tolist()
    #     test_label = (np.load('test_label.npy', allow_pickle=True)).tolist()
    #     use_imag = (np.load('use_imag.npy', allow_pickle=True)).tolist()
    #     use_label = (np.load('use_label.npy', allow_pickle=True)).tolist()
    # else:

    img_all = [] #[[] for _ in range(10)]
    label_all = [] #[[] for _ in range(10)]
    single_num =int((train_num+test_num+use_num)/10)
    jishu = np.zeros(10)
    for filename in os.listdir(path):
        img = cv2.imread(path + "\\" + filename,0)#0代表单通道,1代表3通道
        xx = filename.split(".")
        if sum(jishu) < 10*single_num:
            print(filename)
            if jishu[int(xx[0])]<single_num:
                jishu[int(xx[0])]  = jishu[int(xx[0])]+1
                img_all.append(img)
                label_all.append(int(xx[0].strip()))#img_all[0].shape,(28, 28)

    num = list(range(0,train_num+test_num+use_num)) #
    random.shuffle(num)#打乱顺序
    train_imag = []
    train_label = []
    test_imag = []
    test_label = []
    use_imag = []
    use_label = []
    jishu = np.zeros(10)
    single_num_train = int((train_num ) / 10)
    single_num_test = int((train_num + test_num ) / 10)
    for n in range(train_num+test_num+use_num):
        jishu[label_all[num[n]]] =jishu[label_all[num[n]]]+1
        if jishu[label_all[num[n]]] <= single_num_train:
            train_imag.append(img_all[num[n]])
            train_label.append(label_all[num[n]])
        elif jishu[label_all[num[n]]] <= single_num_test:
            test_imag.append(img_all[num[n]])
            test_label.append(label_all[num[n]])
        else:
            use_imag.append(img_all[num[n]])
            use_label.append(label_all[num[n]])

    np.save('train_imag.npy', train_imag)
    np.save('train_label.npy', train_label)

    np.save('test_imag.npy', test_imag)
    np.save('test_label.npy', test_label)

    np.save('use_imag.npy', use_imag)
    np.save('use_label.npy', use_label)

    return (train_imag,train_label,test_imag,test_label,use_imag,use_label)

def readdata_train():
    train_imag = (np.load('train_imag.npy', allow_pickle=True)).tolist()
    train_label = (np.load('train_label.npy', allow_pickle=True)).tolist()
    return (train_imag,train_label)

def readdata_test():
    test_imag = (np.load('test_imag.npy', allow_pickle=True)).tolist()
    test_label = (np.load('test_label.npy', allow_pickle=True)).tolist()
    return (test_imag,test_label)

def readdata_use():
    use_imag = (np.load('use_imag.npy', allow_pickle=True)).tolist()
    use_label = (np.load('use_label.npy', allow_pickle=True)).tolist()
    return (use_imag,use_label)





if __name__ == '__main__':
    path = 'G:\\csdn\\1\\minist_pictures\\mnist_data'
    train_num=1000
    test_num = 1000
    use_num = 20
    readdata_all(path,train_num,test_num,use_num)

【pytorch】

#model.py

import torch.nn as nn
import torch.nn.functional as F
# import numpy as np
# import torch


# 构建模型(简单的卷积神经网络)
class lenet5Net(nn.Module):
    def __init__(self):#Python中类的初始化形式def __init__(self)和def __init__(self,args)
        # super(lenet5Net, self).__init__()
        # 是在Python中用于调用父类(超类)的方法。在深度学习中,我们通常使用类来定义模型。
        # 当我们定义子类模型时,我们需要调用父类的初始化方法来继承父类的属性和方法。
        # 在PyTorch中,通常使用super()函数来调用父类的初始化方法。
        # 在上述代码中,LeNet类继承自父类(超类)nn.Module,因此需要调用nn.Module的初始化方法,以便继承其属性和方法。
        # 使用super(lenet5Net, self).__init__()就可以调用nn.Module的初始化方法。
        # 其中,lenet5Net是当前子类的名称,self是当前子类的对象。
        super(lenet5Net, self).__init__()
        #tuple,列表转元组
        # Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
        #当元组(5,)只有一个元素时,需要在元素后面加上逗号,以避免与其他语法产生歧义。
        #conv各个参数kernel_size,stride,padding维度要对应上,要不都是2维要不都是一维
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=(5,5), stride=(1,1),
                               padding=(2,2))
        self.AvgPool2d1 = nn.AvgPool2d(kernel_size=(2,2), stride=(2,2),
                               padding=(0,0))
        self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5,5), stride=(1,1),
                               padding=(0,0))
        self.AvgPool2d2 = nn.AvgPool2d(kernel_size=(2,2), stride=(2,2),
                               padding=(0,0))
        self.Flatten1 = nn.Flatten()
        self.Ln1 = nn.Linear(5 * 5 * 16, 200)
        self.Ln2 = nn.Linear(200, 84)
        self.Ln3 = nn.Linear(84, 10)

    def forward(self, x):#前向传播函数
        out = self.conv1(x)
        out = F.sigmoid(out)
        out = self.AvgPool2d1(out)
        out = self.conv2(out)
        out = F.sigmoid(out)
        out = self.AvgPool2d2(out)
        out = self.Flatten1(out)
        out = self.Ln1(out)
        out = F.sigmoid(out)
        out = self.Ln2(out)
        out = F.sigmoid(out)
        out = self.Ln3(out)

        return out

if __name__ == '__main__':
   lenet5Net()

#train.py

import torch.nn as nn
from torch import optim
from model import lenet5Net
from readdata import readdata_all
from readdata import readdata_train
from readdata import readdata_test
import numpy as np
import torch
import cv2
import matplotlib.pyplot as plt


def train():
   # 1、设置参数
   learning_rate = 0.001
   path = 'G:\\csdn\\1\\minist_pictures\\mnist_data'
   train_num = 10000
   test_num = 10000
   use_num = 20

   epochs = 50
   batch_size = 50

   # 2、构建模型
   net = lenet5Net()
   # 3、数据集准备
   # train_imag,train_label,test_imag,test_label,use_imag,use_label = readdata_all(path, train_num, test_num, use_num)
   train_imag,train_label = readdata_train()
   test_imag, test_label = readdata_test()

   # 4、定义损失函数和优化器
   # 用于计算均方误差(Mean Squared Error,MSE)。它衡量了预测值与目标值之间的平均平方差。
   # criterion = nn.MSELoss(size_average=None, reduce=None, reduction='mean')
   # 优化器类,用于实现随机梯度下降(Stochastic Gradient Descent,SGD)算法。
   # optimizer = optim.SGD(net.parameters(), lr=learning_rate)


   # 4、定义损失函数和优化器
   # 用于多分类问题。它将输入和目标之间的交叉熵作为损失函数
   criterion = nn.CrossEntropyLoss()
   #优化器,用于训练神经网络模型。它实现了Adam算法,这是一种比梯度下降算法更高效的优化算法。
   # Adam算法结合了动量法和自适应学习率的思想,能够更快地收敛并且对于不同的参数具有自适应性。在使用Adam优化器时,我们需要传入需要被优化的参数和学习率。
   optimizer = optim.Adam(net.parameters(), lr=learning_rate)

   # 5、训练
   my_list_train =  [i for i in range(train_num)]
   my_list_test = [i for i in range(test_num)]
   for i in range(epochs):
      net.train()
      for j in my_list_train[(batch_size-1)::batch_size]:
         #处理数据
         images = train_imag[j-batch_size+1:j+1][:][:]#(8,28,28)
         labels = train_label[j-batch_size+1:j+1]#(8,)
         images = torch.tensor(images).float()
         labels = torch.tensor(labels)
         images = images.view(batch_size, 1, 28, 28)#(8,1,28,28)
         # labels = torch.unsqueeze(labels, 1)  # [8]扩充到[8,1]
         # 数据进入网络模型
         outputs = net(images/255.0)
         # 计算损失函数
         loss = criterion(outputs, labels)
         _, predicted = torch.max(outputs.data, 1)
         correct = (predicted == labels).sum().item()
         total =  (labels == labels).sum().item()
         # print(predicted)
         # print(labels)
         # 求梯度之前对梯度清零以防梯度累加
         optimizer.zero_grad()
         loss.backward() # 反向传播计算梯度
         optimizer.step()# 使用优化器更新参数
         if (j+1) % (batch_size*30) == 0:
            print(
               f'Epoch: {i + 1}, Step: {j + 1}, Loss: {loss.item():.4f}, trainAccuracy: {correct / total * 100:.2f}%')

      #6、test,test数据集错误,图像和标签没有对应上,导致查了很久原因
      correct = 0
      total = 0
      with torch.no_grad():
         net.eval()
         for j in my_list_test[(batch_size - 1)::batch_size]:
            # 处理数据
            images = test_imag[j - batch_size + 1:j + 1][:][:]  # (8,28,28)
            labels = test_label[j - batch_size + 1:j + 1]  # (8,)

            images = torch.tensor(images).float()
            labels = torch.tensor(labels)
            images = images.view(batch_size, 1, 28, 28)  # (8,1,28,28)

            # #画图
            # images2 = images[0,0,:,:]
            # images2 = images2.view(28, 28,1)
            # if 0:
            #    cv2.imwrite("text.png", images2.numpy())
            # else:
            #    plt.imshow(images2.numpy(), cmap="gray")
            #    plt.show()
            #    # plt.savefig('fig_cat.png', bbox_inches='tight', pad_inches=0)

            # labels = torch.unsqueeze(labels, 1)  # [8]扩充到[8,1]
            # 数据进入网络模型
            outputs = net(images/255.0)

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            # print(predicted)
            # print(labels)

      print(f'testAccuracy: {correct / total * 100:.2f}%')
   torch.save(net, 'lenet.pkl')  # save model
   return 0


if __name__ == '__main__':
   train()

#use.py

import torch.nn as nn
from torch import optim
# from model import lenet5Net
from readdata import readdata_use
# import numpy as np
import torch
import cv2
import matplotlib.pyplot as plt


def use():
    # 1、设置参数
    learning_rate = 0.001

    path = 'G:\\csdn\\1\\minist_pictures\\mnist_data'
    train_num = 10000
    test_num = 10000
    use_num = 20

    # epochs = 50
    batch_size = 1

    # 2、导入模型
    net = torch.load("lenet.pkl")

    # 3、数据集准备
    use_imag, use_label = readdata_use()

    # 4、定义损失函数和优化器
    # 用于计算均方误差(Mean Squared Error,MSE)。它衡量了预测值与目标值之间的平均平方差。
    # criterion = nn.MSELoss(size_average=None, reduce=None, reduction='mean')
    # 优化器类,用于实现随机梯度下降(Stochastic Gradient Descent,SGD)算法。
    # optimizer = optim.SGD(net.parameters(), lr=learning_rate)

    # 4、定义损失函数和优化器
    # 用于多分类问题。它将输入和目标之间的交叉熵作为损失函数
    criterion = nn.CrossEntropyLoss()
    # 优化器,用于训练神经网络模型。它实现了Adam算法,这是一种比梯度下降算法更高效的优化算法。
    # Adam算法结合了动量法和自适应学习率的思想,能够更快地收敛并且对于不同的参数具有自适应性。在使用Adam优化器时,我们需要传入需要被优化的参数和学习率。
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)



    # 6、use
    correct = 0
    total = 0
    my_list_use = [i for i in range(use_num)]
    with torch.no_grad():
        net.eval()
        for j in my_list_use[(batch_size - 1)::batch_size]:
            # 处理数据
            images = use_imag[j - batch_size + 1:j + 1][:][:]  # (8,28,28)
            labels = use_label[j - batch_size + 1:j + 1]  # (8,)
            images = torch.tensor(images).float()
            labels = torch.tensor(labels)
            images = images.view(batch_size, 1, 28, 28)  # (8,1,28,28)
            #画图
            images2 = images[0,0,:,:]
            images2 = images2.view(28, 28,1)
            if 0:
               cv2.imwrite("text.png", images2.numpy())
            else:
               plt.imshow(images2.numpy(), cmap="gray")
               plt.show()
               # plt.savefig('fig_cat.png', bbox_inches='tight', pad_inches=0)
            # labels = torch.unsqueeze(labels, 1)  # [8]扩充到[8,1]
            # 数据进入网络模型
            outputs = net(images / 255.0)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            print(predicted)
            print(labels)
            xx=0
    print(f'useAccuracy: {correct / total * 100:.2f}%')
    return 0


if __name__ == '__main__':
    use()

【tensorflow】

#model.py

import tensorflow as tf
import tensorflow.keras.layers as layers


class lenet5Net(tf.keras.Model):
    def __init__(self):
        super(lenet5Net,self).__init__()
        self.conv1 = layers.Conv2D(filters=6, strides=(1, 1), padding='same', kernel_size=(5, 5),input_shape=(28, 28, 1), activation='relu')
        self.AvgPool2d1 = layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')
        # self.bn1 = layers.BatchNormalization(axis=3)
        self.conv2 = layers.Conv2D(filters=16, strides=(1, 1), padding='same', kernel_size=(5, 5), activation='relu')
        self.AvgPool2d2 = layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')
        # self.bn2 = layers.BatchNormalization(axis=3)
        self.Flatten1 = layers.Flatten()
        self.Ln1 = layers.Dense(200, activation='relu')#activation=tf.nn.sigmoid应该是一样的
        self.Ln2 = layers.Dense(84, activation='relu')#sigmoid relu None
        self.Ln3 = layers.Dense(10, activation='sigmoid')#最后这个全连接也要激活函数,否则train效果不好

    def call(self, x, training=True):  # 前向传播函数
        out = self.conv1(x)
        out = self.AvgPool2d1(out)
        # out = self.bn1(out)
        out = self.conv2(out)
        out = self.AvgPool2d2(out)
        # out = self.bn2(out)
        out = self.Flatten1(out)
        out = self.Ln1(out)
        out = self.Ln2(out)
        out = self.Ln3(out)

        return out

#train.py

import tensorflow as tf
from model import lenet5Net
from readdata import readdata_all
from readdata import readdata_train
from readdata import readdata_test
import numpy as np
import cv2
import matplotlib.pyplot as plt


def train():
   # 1、设置参数
   learning_rate = 0.001
   path = 'G:\\csdn\\1\\minist_pictures\\mnist_data'
   train_num = 10000
   test_num = 10000
   use_num = 20

   epochs = 50
   batch_size = 50

   # 2、构建模型
   net = lenet5Net()
   # 3、数据集准备
   # train_imag,train_label,test_imag,test_label,use_imag,use_label = readdata_all(path, train_num, test_num, use_num)
   train_imag,train_label = readdata_train()
   test_imag, test_label = readdata_test()
   train_imag = ((np.array(train_imag)) / 255.0).tolist()
   test_imag = ((np.array(test_imag)) / 255.0).tolist()

   # 4、定义损失函数和优化器
   # 用于计算均方误差(Mean Squared Error,MSE)。它衡量了预测值与目标值之间的平均平方差。
   # criterion = tf.keras.losses.MeanSquaredError()
   # 优化器类,用于实现随机梯度下降(Stochastic Gradient Descent,SGD)算法。
   # optimizer = tf.keras.optimizers.experimental.SGD(learning_rate=learning_rate)


   # 4、定义损失函数和优化器
   # 用于多分类问题。它将输入和目标之间的交叉熵作为损失函数
   criterion =  tf.keras.losses.SparseCategoricalCrossentropy() #tf.keras.losses.CategoricalCrossentropy()
   #优化器,用于训练神经网络模型。它实现了Adam算法,这是一种比梯度下降算法更高效的优化算法。
   # Adam算法结合了动量法和自适应学习率的思想,能够更快地收敛并且对于不同的参数具有自适应性。在使用Adam优化器时,我们需要传入需要被优化的参数和学习率。
   optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)

   # 5、训练
   my_list_train =  [i for i in range(train_num)]
   my_list_test = [i for i in range(test_num)]
   for i in range(epochs):
      for j in my_list_train[(batch_size-1)::batch_size]:
         #处理数据
         images = train_imag[j-batch_size+1:j+1][:][:]#(8,28,28)
         labels = train_label[j-batch_size+1:j+1]#(8,)
         images = tf.reshape(images, (batch_size, 28, 28, 1)) # (8,1,28,28)
         # labels =  tf.constant(labels)#list转成tf
         # labels = tf.cast(labels, tf.float32)#转类型
         # labels = tf.expand_dims(labels, 1)#扩展维度
         # 数据进入网络模型
         with tf.GradientTape() as tape:
             # 前向传播
             outputs = net(images,training=True)
             # 计算损失函数
             loss = criterion(labels,outputs)#真实值放在前面,预测值放到后面
             predicted = tf.argmax(outputs, axis=1)
             correct =sum(tf.cast(predicted == labels, dtype=tf.int32))
             total = sum(tf.cast(predicted == predicted, dtype=tf.int32))
             # print(predicted)
             # print(labels)
         grads = tape.gradient(loss, net.trainable_variables) # 反向传播计算梯度
         optimizer.apply_gradients(zip(grads, net.trainable_variables))# 使用优化器更新参数
         if (j+1) % (batch_size*10) == 0:
            print(
               f'Epoch: {i + 1}, Step: {j + 1}, Loss: {loss:.4f}, trainAccuracy: {correct / total * 100:.2f}%')

      #6、test,test数据集错误,图像和标签没有对应上,导致查了很久原因
      correct = 0
      total = 0
      for j in my_list_test[(batch_size - 1)::batch_size]:
        # 处理数据
        images = test_imag[j - batch_size + 1:j + 1][:][:]  # (8,28,28)
        labels = test_label[j - batch_size + 1:j + 1]  # (8,)
        images = tf.reshape(images, (batch_size, 28, 28, 1)) # (8,1,28,28)
        # labels = tf.constant(labels)#list转成tf
        # labels = tf.cast(labels, tf.float32)
        # labels = tf.expand_dims(labels, 1)
        # #画图
        # images2 = images[0,0,:,:]
        # images2 = images2.view(28, 28,1)
        # if 0:
        #    cv2.imwrite("text.png", images2.numpy())
        # else:
        #    plt.imshow(images2.numpy(), cmap="gray")
        #    plt.show()
        #    # plt.savefig('fig_cat.png', bbox_inches='tight', pad_inches=0)

        # labels = torch.unsqueeze(labels, 1)  # [8]扩充到[8,1]
        # 数据进入网络模型
        outputs = net(images,training=False)

        predicted = tf.argmax(outputs, axis=1)
        correct = sum(tf.cast(predicted == labels, dtype=tf.int32))
        total = sum(tf.cast(predicted == predicted, dtype=tf.int32))
        # print(predicted)
        # print(labels)

      print(f'testAccuracy: {correct / total * 100:.2f}%')
   net.save_weights('model_weight') # save model
   return 0


if __name__ == '__main__':
   train()

#use.py

import tensorflow as tf
from model import lenet5Net
from readdata import readdata_use
import numpy as np
import cv2
import matplotlib.pyplot as plt


def use():
    # 1、设置参数
    learning_rate = 0.001

    path = 'G:\\csdn\\1\\minist_pictures\\mnist_data'
    train_num = 10000
    test_num = 10000
    use_num = 20

    # epochs = 50
    batch_size = 1

    # 2、导入模型
    net = lenet5Net()
    net.load_weights('model_weight')

    # 3、数据集准备
    use_imag, use_label = readdata_use()
    use_imag = ((np.array(use_imag)) / 255.0).tolist()

    # 4、定义损失函数和优化器
    # 用于计算均方误差(Mean Squared Error,MSE)。它衡量了预测值与目标值之间的平均平方差。
    # criterion = tf.keras.losses.MeanSquaredError()
    # 优化器类,用于实现随机梯度下降(Stochastic Gradient Descent,SGD)算法。
    # optimizer = tf.keras.optimizers.experimental.SGD(learning_rate=learning_rate)

    # 4、定义损失函数和优化器
    # 用于多分类问题。它将输入和目标之间的交叉熵作为损失函数
    criterion = tf.keras.losses.SparseCategoricalCrossentropy()  # tf.keras.losses.CategoricalCrossentropy()
    # 优化器,用于训练神经网络模型。它实现了Adam算法,这是一种比梯度下降算法更高效的优化算法。
    # Adam算法结合了动量法和自适应学习率的思想,能够更快地收敛并且对于不同的参数具有自适应性。在使用Adam优化器时,我们需要传入需要被优化的参数和学习率。
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)



    # 6、use
    correct = 0
    total = 0
    my_list_use = [i for i in range(use_num)]
    for j in my_list_use[(batch_size - 1)::batch_size]:
        # 处理数据
        images = use_imag[j - batch_size + 1:j + 1][:][:]  # (8,28,28)
        labels = use_label[j - batch_size + 1:j + 1]  # (8,)
        images = tf.reshape(images, (batch_size, 28, 28, 1))  # (8,28,28,1)
        #画图
        images2 = images[0,:,:,0]
        images2 = tf.reshape(images2, (28, 28, 1))  # (28,28,1)
        if 0:
           cv2.imwrite("text.png", images2.numpy())
        else:
           plt.imshow(images2.numpy(), cmap="gray")
           plt.show()
           # plt.savefig('fig_cat.png', bbox_inches='tight', pad_inches=0)
        # labels = torch.unsqueeze(labels, 1)  # [8]扩充到[8,1]
        # 数据进入网络模型
        outputs = net(images, training=False)

        predicted = tf.argmax(outputs, axis=1)
        correct = sum(tf.cast(predicted == labels, dtype=tf.int32))
        total = sum(tf.cast(predicted == predicted, dtype=tf.int32))
        print(predicted)
        print(labels)
        xx=0
    print(f'useAccuracy: {correct / total * 100:.2f}%')
    return 0


if __name__ == '__main__':
    use()

  • 13
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值