Recurrent Neural Network

供个人学习记录,来源于:
https://github.com/machinelearningmindset/TensorFlow-Course#why-use-tensorflow

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse

######################
# Optimization Flags #
######################

learning_rate = 0.001 # initial learning rate
seed = 111   

##################
# Training Flags #
##################
batch_size = 128 # Batch size for training
num_epoch = 10 # Number of training iterations

###############
# Model Flags #
###############
hidden_size = 128 # Number of neurons for RNN hodden layer

# Reset the graph set the random numbers to be the same using "seed"
tf.reset_default_graph()
tf.set_random_seed(seed)   #图级别
np.random.seed(seed)   #np级别

# Divide 28x28 images to rows of data to feed to RNN as sequantial information
step_size = 28
input_size = 28
output_size = 10

# Input tensors
X = tf.placeholder(tf.float32, [None, step_size, input_size])
y = tf.placeholder(tf.int32, [None])

# Rnn
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=hidden_size)   #设置基本RNN单元
# output=[ batch_size, max_time, cell.output_size ]
# state=[batch_size, cell.output_size ]
# batch_size是输入的这批数据的数量,max_time就是这批数据中序列的最长长度,cell.output_size其实就是rnn cell中神经元的个数
output, state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)  

# Forward pass and loss calcualtion
logits = tf.layers.dense(state, output_size)   #全连接层
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)   #softmax+cross_entropy
loss = tf.reduce_mean(cross_entropy)

# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

# Prediction
prediction = tf.nn.in_top_k(logits, y, 1)   #判断预测结果与实际结果是否相等
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))

# input data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")

# Process MNIST
X_test = mnist.test.images # X_test shape: [num_test, 28*28]
X_test = X_test.reshape([-1, step_size, input_size])
y_test = mnist.test.labels

# initialize the variables
init = tf.global_variables_initializer()

# Empty list for tracking
loss_train_list = []
acc_train_list = []

# train the model
with tf.Session() as sess:
    sess.run(init)
    n_batches = mnist.train.num_examples // batch_size   #整除
    for epoch in range(num_epoch):
        for batch in range(n_batches):
            X_train, y_train = mnist.train.next_batch(batch_size)
            X_train = X_train.reshape([-1, step_size, input_size])
            sess.run(optimizer, feed_dict={X: X_train, y: y_train})
        loss_train, acc_train = sess.run(
            [loss, accuracy], feed_dict={X: X_train, y: y_train})
        loss_train_list.append(loss_train)
        acc_train_list.append(acc_train)
        print('Epoch: {}, Train Loss: {:.3f}, Train Acc: {:.3f}'.format(
            epoch + 1, loss_train, acc_train))
    loss_test, acc_test = sess.run(
        [loss, accuracy], feed_dict={X: X_test, y: y_test})
    print('Test Loss: {:.3f}, Test Acc: {:.3f}'.format(loss_test, acc_test))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值