深度学习-LSTM

    • 本来理解和使用不是很清楚,整理以后思路清晰了很多。
    • LSTM之keras实现

      import numpy as np
      np.random.seed(2017)  #为了复现
      from __future__ import print_function
      from keras.datasets import mnist
      from keras.utils import np_utils
      from keras.models import Sequential
      from keras.layers import LSTM, Activation, Dense
      from keras.optimizers import Adam
      
      (X_train, y_train), (X_test, y_test) = mnist.load_data()
      
      #参数
      #学习率
      learning_rate = 0.001 
      #迭代次数
      epochs = 2
      #每块训练样本数
      batch_size = 128
      #输入
      n_input = 28
      #步长
      n_step = 28
      #LSTM Cell
      n_hidden = 128
      #类别
      n_classes = 10
      
      #x标准化到0-1  y使用one-hot  输入 nxm的矩阵 每行m维切成n个输入
      X_train = X_train.reshape(-1, n_step, n_input)/255.
      X_test = X_test.reshape(-1, n_step, n_input)/255.
      
      y_train = np_utils.to_categorical(y_train, num_classes=10)
      y_test = np_utils.to_categorical(y_test, num_classes=10)
      
      model = Sequential()
      model.add(LSTM(n_hidden,
                     batch_input_shape=(None, n_step, n_input),
                     unroll=True))
      
      model.add(Dense(n_classes))
      model.add(Activation('softmax'))
      
      adam = Adam(lr=learning_rate)
      #显示模型细节
      model.summary()
      model.compile(optimizer=adam,
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])
      
      model.fit(X_train, y_train,
                batch_size=batch_size,
                epochs=epochs,
                verbose=1, #0不显示 1显示
                validation_data=(X_test, y_test))
      
      scores = model.evaluate(X_test, y_test, verbose=0)
      print('LSTM test score:', scores[0]) #loss
      print('LSTM test accuracy:', scores[1])

      TensorFlow之LSTM

      import tensorflow as tf
      from tensorflow.examples.tutorials.mnist import input_data
      
      # set random seed for comparing the two result calculations
      tf.set_random_seed(1)
      
      # this is data
      mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
      
      # hyperparameters
      lr = 0.001
      training_iters = 100000
      batch_size = 128
      
      n_inputs = 28   # MNIST data input (img shape: 28*28)
      n_steps = 28    # time steps
      n_hidden_units = 128   # neurons in hidden layer 隐藏神经元个数
      n_classes = 10      # MNIST classes (0-9 digits)
      
      # tf Graph input
      x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
      y = tf.placeholder(tf.float32, [None, n_classes])
      
      # Define weights
      weights = {
          # (28, 128)
          'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
          # (128, 10)
          'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
      }
      biases = {
          # (128, )
          'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
          # (10, )
          'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
      }
      
      
      def RNN(X, weights, biases):
          # hidden layer for input to cell
          ########################################
      
          # transpose the inputs shape from
          # X ==> (128 batch * 28 steps, 28 inputs)
          X = tf.reshape(X, [-1, n_inputs])
      
          # into hidden
          # X_in = (128 batch * 28 steps, 128 hidden)
          X_in = tf.matmul(X, weights['in']) + biases['in']
          # X_in ==> (128 batch, 28 steps, 128 hidden)
          X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
      
          # cell
          ##########################################
      
          # basic LSTM Cell.
          cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units)
          # lstm cell is divided into two parts (c_state, h_state)
          init_state = cell.zero_state(batch_size, dtype=tf.float32)
      
          # You have 2 options for following step.
          # 1: tf.nn.rnn(cell, inputs);
          # 2: tf.nn.dynamic_rnn(cell, inputs).
          # If use option 1, you have to modified the shape of X_in, go and check out this:
          # https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py
          # In here, we go for option 2.
          # dynamic_rnn receive Tensor (batch, steps, inputs) or (steps, batch, inputs) as X_in.
          # Make sure the time_major is changed accordingly.
          outputs, final_state = tf.nn.dynamic_rnn(cell, X_in, initial_state=init_state, time_major=False)
      
          # hidden layer for output as the final results
          #############################################
          # results = tf.matmul(final_state[1], weights['out']) + biases['out']
      
          # # or
          # unpack to list [(batch, outputs)..] * steps #交换维度
          outputs = tf.unstack(tf.transpose(outputs, [1,0,2]))
          results = tf.matmul(outputs[-1], weights['out']) + biases['out']    # shape = (128, 10)
      
          return results
      
      
      pred = RNN(x, weights, biases)
      cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
      train_op = tf.train.AdamOptimizer(lr).minimize(cost)
      
      correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
      accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
      
      with tf.Session() as sess:
          init = tf.global_variables_initializer()
          sess.run(init)
          step = 0
          while step * batch_size < training_iters:
              batch_xs, batch_ys = mnist.train.next_batch(batch_size)
              batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
              sess.run([train_op], feed_dict={
                  x: batch_xs,
                  y: batch_ys,
              })
              if step % 20 == 0:
                  print(sess.run(accuracy, feed_dict={
                  x: batch_xs,
                  y: batch_ys,
                  }))
              step += 1



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值