# -*- coding:utf-8 _*-
"""
@author:zhangxianke
@file: test.py
@time: 2018/11/09
"""
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
data = input_data.read_data_sets('./MNIST_data', one_hot= True)
train_x, train_y = data.train.images, data.train.labels
test_x, test_y = data.test.images, data.test.labels
sequence_length = 28 #整个RNN结构输入序列数(手写体字行数)
frame_size = 28 #每个序列中的元素个数(每行数据中的元素个数)
n_hidden = 100 #隐藏层
n_class = 10
w = tf.Variable(tf.random_normal([n_hidden, n_class]), dtype= tf.float32)
b = tf.Variable(tf.zeros([1,n_class]), dtype= tf.float32)
x = tf.placeholder(tf.float32, [None, sequence_length * frame_size])
y = tf.placeholder(tf.float32, [None, n_class])
def RNN(_x, _w, _b):
_x = tf.reshape(_x, shape= [-1, sequence_length, frame_size])
cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden) # rnn单元
output, h = tf.nn.dynamic_rnn(cell, _x, dtype= tf.float32)
return tf.nn.softmax(tf.matmul(output[:, -1, :], w) + b)
iters = 1000
batch = 1280
learn_rate = 0.001
y_predict = RNN(x, w, b)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_predict, labels= y))
optm = tf.train.AdamOptimizer(learn_rate).minimize(loss)
result = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.to_float(result))
#init
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(iters):
cost = count = 0.0
for batch_index in range(0, train_y.shape[0], batch):
count += 1
feed = {x: train_x[batch_index: batch_index + batch], y: train_y[batch_index: batch_index + batch]}
sess.run(optm, feed_dict= feed)
cost += sess.run(loss, feed_dict=feed)
cost /= count
feed_test = {x: test_x, y: test_y}
accuracy = sess.run(acc, feed_dict= feed_test)
print ('第 %d 次迭代, 损失函数值为 %.6f, 准确率为 %.6f' % (i + 1, cost, accuracy))