本文主要实现CNN卷积神经网络实现MNIST手写数字识别中的前向传播,其中的参数(权重与偏置)由上一篇文章“CNN卷积神经网络实现MNIST手写数字识别(一)”训练得到
本文参考:https://blog.csdn.net/Yang8465/article/details/90412314
其源码如下:
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 11:12:09 2021
@author: ZZJin
"""
# 本代码参考网上教程,完成卷积神经网络的前向传播,以与自己写的前向传播对比数据
# 参考网址:https://blog.csdn.net/Yang8465/article/details/90412314
# 测试可用 20210413
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os
import numpy as np
import sys
import math#导入math模块
import time
sys.path.append(r"D:\Anaconda_project\CNNInFPGA\tf2.0_CNN_keras")
from Function import conv2d_33
from Function import maxpol_22
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #禁止控制台打印无关信息
IMAGE_SIZE = 28 #图片大小28*28
NUM_CHANNELS =1 #图片通道数
CONV1_SIZE = 3 #第一层卷积核大小
CONV1_KERNEL_NUM = 32 #第一层使用了32个卷积核
CONV2_SIZE = 3
CONV2_KERNEL_NUM = 64
OUTPUT_NODE = 10 #10分类输出
FC_SIZE = 512 #隐藏层节点个数
#
# 加载数据集
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data() #加载数据集
# /255.使得x的数据范围从 [0~255] 变为 [0~1.]
x1 = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255. #创建tensor
y1 = tf.convert_to_tensor(y_test, dtype=tf.int32)
# 读取已经训练好的神经网络参数
old = np.load
np.load.__defaults__=(None, True, True, 'ASCII')
loadData = np.load('CNN_weight.npy')
np.load.__defaults__=(None, False, True, 'ASCII')
# def get_weight(shape,regularizer):
# #正态分布生成,去掉最大偏离点的
# w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
# if regularizer!=None:
# tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
# return w
# 偏执B生成函数,初始值为0
# def get_bias(shape):
# b = tf.Variable(tf.zeros(shape))
# return b
#求卷积
def conv2d(x,w): #x 输入,所用卷积核W
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
# def forward(x,train,regularizer):
#初始化化第一层卷积核W ,B
x = tf.reshape(x1[2], [1, 28, 28, 1])
conv1_w = loadData[0]
conv1_b = loadData[1]
conv1 = conv2d(x,conv1_w)
#对conv1添加偏执,使用relu激活函数
bias1 = tf.nn.bias_add(conv1,conv1_b)
relu1 = tf.nn.relu(bias1)
#池化
pool1 = max_pool_2x2(relu1)
conv2_w = loadData[2]
conv2_b = loadData[3]
conv2 = conv2d(pool1, conv2_w)
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
pool2= max_pool_2x2(relu2)#第二层卷积的输出
pool_shape = pool2.get_shape().as_list()#得到pool2 输出矩阵的维度,存入list中
#提取特征的长,宽,深度
nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
#pool_shape[0]一个batch的值
#将pool2 表示成,pool_shape[0]行,nodes列
reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
# 全连接网络
#第一层
fc1_w = loadData[4]
fc1_b = loadData[5]
fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_w)+fc1_b)
# if train:fc1 = tf.nn.dropout(fc1,0.5)
#第二层
fc2_w = loadData[6]
fc2_b = loadData[7]
y = tf.matmul(fc1,fc2_w)+fc2_b
y_softmax = tf.nn.softmax(y)
# return y