TensorFlow(3) 使用卷积神经网络识别MNIST手写数字集

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist = input_data.read_data_sets('MNIST_data/',one_hot = True)

#分配权重 truncated_normal为-产生不超过两倍标准差的值-的正太分布,stddev为标准差
def weight(shape):
    return tf.Variable(tf.truncated_normal(shape, stddev=0.1),name='W')

#偏差
def bias(shape):
    return tf.Variable(tf.constant(0.1, shape=shape),name='b')

#卷积 stride格式为[1,X,Y,1]  X,Y为输入矩阵的大小,padding为边缘感受野取值方式
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

#池化层 ksize格式为[1,X,Y,1] 为池化滤镜大小,strides格式也一样,为X,Y方向上移动的步长
def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],
                         strides=[1,2,2,1],
                         padding='SAME')

#构建CNN网络-----------------------------------------------------------------------------
#输入层
with tf.name_scope('Input_Layer'):
    x = tf.placeholder('float', shape=[None, 784], name='x')
    x_image = tf.reshape(x,[-1,28,28,1])

#卷积层1
with tf.name_scope('C1_Conv'):
    W1 = weight([5,5,1,16])
    b1 = bias([16])
    Conv1 = conv2d(x_image,W1)+b1
    C1_Conv = tf.nn.relu(Conv1)

#池化层1
with tf.name_scope('C1_pool'):
    C1_Pool = max_pool_2x2(C1_Conv)

#卷积层2
with tf.name_scope('C2_Conv'):
    W2 = weight([5,5,16,36])
    b2 = bias([36])
    Conv2 = conv2d(C1_Pool,W2)+b2
    C2_Conv = tf.nn.relu(Conv2)

#池化层2
with tf.name_scope('C2_Pool'):
    C2_Pool = max_pool_2x2(C2_Conv)

#平坦层,将矩阵变为向量
with tf.name_scope('D_Flat'):
    D_Flat = tf.reshape(C2_Pool,[-1,1764])

#隐藏层
with tf.name_scope('D_Hidden_Layer'):
    W3 = weight([1764,128])
    b3 = bias([128])
    D_Hidden = tf.nn.relu(tf.matmul(D_Flat,W3)+b3)
    D_Hidden_Dropout = tf.nn.dropout(D_Hidden,keep_prob=0.8)

#输出层
with tf.name_scope('Output_Layer'):
    W4 = weight([128,10])
    b4 = bias([10])
    y_predict = tf.nn.softmax(tf.matmul(D_Hidden_Dropout,W4)+b4)

#优化器,其中定义损失函数loss(交叉熵)和优化方法(adam)
with tf.name_scope('optimizer'):
    y_label = tf.placeholder('float',shape=[None,10],name='y_label')
    loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_predict, labels=y_label))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)\
    .minimize(loss_function)

#模型评价
with tf.name_scope('evaluate_model'):
    correct_prediction = tf.equal(tf.argmax(y_predict,1),tf.argmax(y_label,1)),
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

#开始训练--------------------------------------------------------------------------------
trainEpochs = 30
batchSize = 100
totalBatchs = int(mnist.train.num_examples/batchSize)
epoch_list=[];accuracy_list=[];loss_list=[];
from time import time
startTime = time()
sess = tf.Session()
sess.run(tf.global_variables_initializer())

for epoch in range(trainEpochs):
    for i in range(totalBatchs):
        batch_x,batch_y = mnist.train.next_batch(batchSize)#从中输出batch
        sess.run(optimizer, feed_dict={x:batch_x,y_label:batch_y}) #运行优化器优化
    
    loss,acc = sess.run([loss_function,accuracy],feed_dict = {x:mnist.validation.images,                                                          y_label:mnist.validation.labels}) #运行完一个epoch,输出loss和acc

    epoch_list.append(epoch)
    loss_list.append(loss)
    accuracy_list.append(acc)
    print('Train Epoch:','%02d'%(epoch+1),'loss=',\
          '{:.9f}'.format(loss),'Accuracy=',acc)
duration = time()-startTime
print('Train Finished takes:',duration)

#画出相应的loss值
import matplotlib.pyplot as plt
%matplotlib inline
figure = plt.gcf()
plt.plot(epoch_list, loss_list, label='loss')


#测试集测试得到准确率
print(sess.run(accuracy,feed_dict={x:mnist.test.images, y_label:mnist.test.labels}))

#使用测试集预测结果
prediction_result = sess.run(tf.argmax(y_predict,1),feed_dict={x:mnist.test.images})

#将计算图保存,使用TensorBoard查看
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('log/CNN',sess.graph)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值