TensorFlow——深入MNIST

程序(有些不甚明白的地方改日修订):

  1 # _*_coding:utf-8_*_
  2 
  3 import inputdata
  4 mnist = inputdata.read_data_sets('MNIST_data', one_hot=True)    # mnist是一个以numpy数组形式存储训练、验证和测试数据的轻量级类
  5 
  6 import tensorflow as tf
  7 sess = tf.InteractiveSession()
  8 
  9 
 10 x = tf.placeholder("float",shape=[None, 784])
 11 y_ = tf.placeholder("float", shape=[None, 10])
 12 
 13 W = tf.Variable(tf.zeros([784, 10]))
 14 b = tf.Variable(tf.zeros([10]))
 15 
 16 sess.run(tf.initialize_all_variables())
 17 
 18 y = tf.nn.softmax(tf.matmul(x,W)+b)     # nn:neural network
 19 
 20 # 代价函数
 21 cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
 22 
 23 # 最优化算法
 24 train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)    # 会更新权值
 25 
 26 for i in range(1000):
 27     batch = mnist.train.next_batch(50)
 28     train_step.run(feed_dict={x:batch[0], y_:batch[1]})    # 可以用feed_dict来替代任何张量,并不仅限于替换placeholder
 29 
 30 correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))
 31 
 32 accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
 33 
 34 print accuracy.eval(feed_dict={x:mnist.test.images, y_:mnist.test.labels})
 35 
 36 # 构建多层卷积网络模型
 37 
 38 # 初始化W,b的函数
 39 def weight_variable(shape):
 40     initial = tf.truncated_normal(shape, stddev=0.1)    # truncated_normal表示的是截断的正态分布
 41     return tf.Variable(initial)
 42 
 43 def bias_variable(shape):
 44     initial = tf.constant(0.1, shape=shape)
 45     return tf.Variable(initial)
 46 
 47 # 卷积和池化
 48 def conv2d(x, W):    # 卷积用原版,1步长0边距
 49     return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
 50 
 51 def max_pool_2x2(x):    # 池化用传统的2*2模板做max polling
 52     return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
 53 
 54 # 第一层卷积
 55 W_conv1 = weight_variable([5,5,1,32])
 56 b_conv1 = bias_variable([32])
 57 
 58 x_image = tf.reshape(x, [-1,28,28,1])
 59 
 60 h_conv1= tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
 61 h_pool1 = max_pool_2x2(h_conv1)
 62 
 63 # 第二层卷积
 64 W_conv2 = weight_variable([5, 5, 32, 64])
 65 b_conv2 = bias_variable([64])
 66 
 67 h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
 68 h_pool2 = max_pool_2x2(h_conv2)
 69 
 70 # 密集连接层
 71 W_fc1 = weight_variable([7 * 7 * 64, 1024])
 72 b_fc1 = bias_variable([1024])
 73 
 74 h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
 75 h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
 76 
 77 # dropout
 78 keep_prob = tf.placeholder("float")
 79 h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
 80 
 81 # 输出层
 82 W_fc2= weight_variable([1024, 10])
 83 b_fc2 = bias_variable([10])
 84 
 85 y_conv= tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
 86 
 87 # 训练和评估模型
 88 cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
 89 train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
 90 correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
 91 accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
 92 sess.run(tf.initialize_all_variables())
 93 for i in range(20000):
 94     batch = mnist.train.next_batch(50)
 95     if i%100 == 0:
 96         train_accuracy = accuracy.eval(feed_dict={ x:batch[0], y_: batch[1], keep_prob: 1.0})
 97         print "step %d, training accuracy %g" %(i, train_accuracy)
 98     train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob:0.5})
 99 
100 print "test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})

运行结果:

0.9092
step 0, training accuracy 0.08
step 100, training accuracy 0.9
step 200, training accuracy 0.94
step 300, training accuracy 0.98
step 400, training accuracy 0.98
step 500, training accuracy 0.9
step 600, training accuracy 0.96
step 700, training accuracy 0.96
step 800, training accuracy 0.96
step 900, training accuracy 0.94
step 1000, training accuracy 0.98
step 1100, training accuracy 1
step 1200, training accuracy 0.92
step 1300, training accuracy 0.96
step 1400, training accuracy 0.92
step 1500, training accuracy 0.98
...明天早上跑出来再贴

 

转载于:https://www.cnblogs.com/DianeSoHungry/p/7158426.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值