1.前向传播 mnist_cnn_forward.py
import tensorflow as tf
IMAGE_SIZE = 28 #图片大小28*28
NUM_CHANNELS =1 #图片通道数
CONV1_SIZE = 5 #第一层卷积核大小
CONV1_KERNEL_NUM = 32 #第一层使用了32个卷积核
CONV2_SIZE = 5
CONV2_KERNEL_NUM = 64
OUTPUT_NODE = 10 #10分类输出
FC_SIZE = 512 #隐藏层节点个数
# 权重w生成函数
def get_weight(shape,regularizer):
#正态分布生成,去掉最大偏离点的
w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
if regularizer!=None:
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
return w
# 偏执B生成函数,初始值为0
def get_bias(shape):
b = tf.Variable(tf.zeros(shape))
return b
#求卷积
def conv2d(x,w): #x 输入,所用卷积核W
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def forward(x,train,regularizer):
#初始化化第一层卷积核W ,B
conv1_w = get_weight([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_KERNEL_NUM],regularizer)
conv1_b = get_bias([CONV1_KERNEL_NUM])
conv1 = conv2d(x,conv1_w)
#对conv1添加偏执,使用relu激活函数
relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_b))
#池化
pool1 = max_pool_2x2(relu1)
conv2_w = get_weight([C