在实现之前我们先看一下CNN的整体框架:
卷积层:指定fiter=3*3 有w ,h ,c(深度),这里图像的大小为28*28
每一层的深度输入都与上一层的深度一样,全连接层是将卷积和pool之后的特征图,最后形成一个向量的形式。
经过卷积之后的计算公式 h0 = (h1 - fiter(h) + 2 P)/stride +1 我们可以计算得到各个卷积之后的大小:
经过第一个卷积之后的大小为:(28 -3 + 2)/1 + 1 =28
经过第一个pool之后的大小为:14*14
经过第二个卷积之后的大小为: (14-3 +1)/1 +1 =14
经过第二个pool 之后的大小为: 7*7
Fc2 为我们分类的大小。
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import input_data
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print ("MNIST ready")
np.shape(trainimg)
// (55000, 784)
n_input = 784
n_output = 10
// fiter 的参数:h, w, 之前的深度, fiter的个数
stddev = 0.1
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), #第一层卷积完之后,tensorflow 的这四个数都是有说法的,指定一个fiter的第一个h有多大,#第二个为fiter的w有多大,第三个是深度,64 张特征图
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.1)), #这里64,因为之前得到的特征图为64,所以第二次卷积连接的深度是64的
'wd1': tf.Variable(tf.random_normal([7*7*128, 1024], stddev=0.1)), #全连接层1
'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1)) #全连接层2,分类任务
}
biases = {
'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),
'bc2': tf.Variable(tf.random_normal([128], stddev=0.1)),
'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
# INPUT tensorflow的输入是四维的 n,h,w,c n为batch的大小,h高,w为宽,c为深度,通道数为1,因为是灰度图,-1的话让自己去做一个推断
#因为其它维知道那么就可以算出来了
_input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) #我们需要将我们的输入转换为我们tensorflow的格式,格式是怎么样的,我们看我我们的文档
# CONV LAYER 1 在tensorflow nn.conv2d中 strides 要求定义为四维的格式,
#第一个1是在btachsize上的一个大小,第二个1代表在h上的大小,第三个1代表在w上的大小,
#第四个1,代表在深度上。我们一般stride等于2,那么h,w等于2就行了
_conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
#_mean, _var = tf.nn.moments(_conv1, [0, 1, 2])
#_conv1 = tf.nn.batch_normalization(_conv1, _mean, _var, 0, 1, 0.0001)
_conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1']))
#kszie 是窗口[batch, height, width, channels]
_pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#_keepratio 保留的比例,后面第二层卷积第二层池化都一样的
_pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
# CONV LAYER 2
_conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME')
#_mean, _var = tf.nn.moments(_conv2, [0, 1, 2])
#_conv2 = tf.nn.batch_normalization(_conv2, _mean, _var, 0, 1, 0.0001)
_conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2']))
_pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
_pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
# VECTORIZE 拿到一个 get_shape() 保存为 list as_list()
_dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])
# FULLY CONNECTED LAYER 1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# FULLY CONNECTED LAYER 2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# RETURN
out = { 'input_r': _input_r, 'conv1': _conv1, 'pool1': _pool1, 'pool1_dr1': _pool_dr1,
'conv2': _conv2, 'pool2': _pool2, 'pool_dr2': _pool_dr2, 'dense1': _dense1,
'fc1': _fc1, 'fc_dr1': _fc_dr1, 'out': _out
}
return out
print ("CNN READY")
a = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1))
print (a)
a = tf.Print(a, [a], "a: ")
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
print(help(tf.nn.conv2d)) #查看帮助文档 padding的两种选择
#print (help(tf.nn.conv2d))
print (help(tf.nn.max_pool))
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_output])
keepratio = tf.placeholder(tf.float32)
# FUNCTIONS
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred, labels=y))
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(_corr, tf.float32))
init = tf.global_variables_initializer()
# SAVER
print ("GRAPH READY")
sess = tf.Session()
sess.run(init)
training_epochs = 15
batch_size = 16
display_step = 1
for epoch in range(training_epochs):
avg_cost = 0.
#total_batch = int(mnist.train.num_examples/batch_size)
total_batch = 10 #这里所有的网络使用进来,运行的很慢,所以只取了十个
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
#test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel, keepratio:1.})
#print (" Test accuracy: %.3f" % (test_acc))
print ("OPTIMIZATION FINISHED")