DL9-循环神经网络RNN实现

import tensorflow as tf
import input_data
import numpy as np
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets('./data/MNIST/', one_hot=True)
trainimgs, trainlabels, testimgs, testlabels \
 = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
ntrain, ntest, dim, nclasses \
 = trainimgs.shape[0], testimgs.shape[0], trainimgs.shape[1], trainlabels.shape[1]
diminput = 28
dimhidden = 128
dimoutput = nclasses
nsteps = 28  #图片像素28*28,分成28个行数据

weights = {
    'hidden': tf.Variable(tf.random_normal([diminput, dimhidden])),
    'out': tf.Variable(tf.random_normal([dimhidden, dimoutput]))
}
biases = {
    'hidden': tf.Variable(tf.random_normal([dimhidden])),
    'out': tf.Variable(tf.random_normal([dimoutput]))
}
from tensorflow.contrib import rnn
def _RNN(_X, _W, _b, _nsteps, _name):
    # 1. Permute input from [batchsize, nsteps, diminput] 
    #   => [nsteps, batchsize, diminput] 这是RNN网络的格式要求
    _X = tf.transpose(_X, [1, 0, 2])
    # 2. Reshape input to [nsteps*batchsize, diminput] 
    _X = tf.reshape(_X, [-1, diminput])
    # 3. Input layer => Hidden layer
    _H = tf.matmul(_X, _W['hidden']) + _b['hidden']
    # 4. Splite data to 'nsteps' chunks. An i-th chunck indicates i-th batch data 
    _Hsplit = tf.split(_H, _nsteps, 0) 
    # 5. Get LSTM's final output (_LSTM_O) and state (_LSTM_S)
    #    Both _LSTM_O and _LSTM_S consist of 'batchsize' elements
    #    Only _LSTM_O will be used to predict the output. 
    lstm_cell = rnn.BasicLSTMCell(dimhidden,reuse=tf.get_variable_scope().reuse)
    _LSTM_O, _LSTM_S = rnn.static_rnn(lstm_cell, _Hsplit,dtype=tf.float32)
    # 6. Output
    _O = tf.matmul(_LSTM_O[-1], _W['out']) + _b['out']    
    # Return! 
    return {
        'X': _X, 'H': _H, 'Hsplit': _Hsplit,
        'LSTM_O': _LSTM_O, 'LSTM_S': _LSTM_S, 'O': _O 
    }
learning_rate = 0.001
x      = tf.placeholder("float", [None, nsteps, diminput])
y      = tf.placeholder("float", [None, dimoutput])
myrnn  = _RNN(x, weights, biases, nsteps, 'basic')
pred   = myrnn['O']
cost   = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y)) 
optm   = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Adam Optimizer
accr   = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1), tf.argmax(y,1)), tf.float32))
init   = tf.global_variables_initializer()
training_epochs = 20
batch_size      = 100   #合理选择这三个参数,否则训练时间会比较长
display_step    = 2
sess = tf.Session()
sess.run(init)
print ("Start optimization")
for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(mnist.train.num_examples/batch_size)
    #total_batch = 100
    # Loop over all batches
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        batch_xs = batch_xs.reshape((batch_size, nsteps, diminput))
        # Fit training using batch data
        feeds = {x: batch_xs, y: batch_ys}
        sess.run(optm, feed_dict=feeds)
        # Compute average loss
        avg_cost += sess.run(cost, feed_dict=feeds)/total_batch
    # Display logs per epoch step
    if epoch % display_step == 0: 
        print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
        feeds = {x: batch_xs, y: batch_ys}
        train_acc = sess.run(accr, feed_dict=feeds)
        print (" Training accuracy: %.3f" % (train_acc))
        testimgs = testimgs.reshape((ntest, nsteps, diminput))
        feeds = {x: testimgs, y: testlabels}
        test_acc = sess.run(accr, feed_dict=feeds)
        print (" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished.")
Start optimization
Epoch: 000/020 cost: 1.300222228
 Training accuracy: 0.780
 Test accuracy: 0.715
Epoch: 002/020 cost: 0.559806970
 Training accuracy: 0.880
 Test accuracy: 0.828
Epoch: 004/020 cost: 0.413306093
 Training accuracy: 0.870
 Test accuracy: 0.861
Epoch: 006/020 cost: 0.332932736
 Training accuracy: 0.920
 Test accuracy: 0.874
Epoch: 008/020 cost: 0.273848717
 Training accuracy: 0.900
 Test accuracy: 0.893
Epoch: 010/020 cost: 0.239274396
 Training accuracy: 0.940
 Test accuracy: 0.915
Epoch: 012/020 cost: 0.210719406
 Training accuracy: 0.950
 Test accuracy: 0.919
Epoch: 014/020 cost: 0.187901423
 Training accuracy: 0.950
 Test accuracy: 0.927
Epoch: 016/020 cost: 0.170967947
 Training accuracy: 0.960
 Test accuracy: 0.934
Epoch: 018/020 cost: 0.155789046
 Training accuracy: 0.920
 Test accuracy: 0.929
Optimization Finished.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值