import tensorflow as tf
"""
tf.nn.rnn_cell # 定义rnn 细胞核相关的信息的
tf.nn.rnn_cell_impl # 定义rnn细胞核具体是如何实现的
tf.nn.dynamic_rnn() # 单向动态rnn。
tf.nn.bidirectional_dynamic_rnn() # 双向动态rnn
tf.nn.static_rnn() # 单向静态rnn
tf.nn.static_bidirectional_rnn() # 双向静态rnn
"""
"""
tf.nn.dynamic_rnn() # 单向动态rnn。
特点 1、在每个批次执行之间构建rnn的执行结构。 允许每个批量数据的 时间步不一致。 效率慢。
2、输入要求:3-D tensor[N, n_steps, num_classes]
3、输出形状: 3-D tensor[N, n_steps, lstm_size]
tf.nn.static_rnn() # 单向静态rnn
1、静态rnn在执行之前,执行结构已经构建好, 每个批量数据的时间步必须一致。 效果快
2、输入的要求是一个列表(每一个值对应一个时刻的输入):
[[N, num_classes], [N, num_classes],[N, num_classes] ....]
3、返回的输出,也是一个列表(每一个值对应一个时刻的输出)
[[N, lstm_size], [N, lstm_size],[N, lstm_size] ....]
"""
# 一、关于rnn中的细胞核cell
"""
tf.nn.rnn_cell.BasicLSTMCell # 基础的LSTM cell
tf.nn.rnn_cell.LSTMCell() # 带peephole的 LSTM cell
tf.nn.rnn_cell.BasicRNNCell() # 基础的rnn vanilla rnn
tf.nn.rnn_cell.GRUCell() # GRU实现
tf.nn.rnn_cell.MultiRNNCell() # 堆栈多层隐藏层的
tf.nn.rnn_cell.DropoutWrapper() # rnn的dropout
tf.nn.rnn_cell.RNNCell # 所有cell实现的父类
"""
def BasicRNN():
"""
学习tf.nn.rnn_cell.BasicRNNCell() 使用
:return:
"""
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=64, activation=tf.nn.tanh)
print(cell.state_size, cell.output_size)
def BasicRNN_n_steps():
# 定义一个输入,每个时刻2个样本(batch_size),每个样本由3个维度组成(one-hot)
batch_size = tf.placeholder_with_default(2, shape=[], name='batch')
inputs1 = tf.placeholder(tf.float32, shape=[2, 3])
inputs2 = tf.placeholder(tf.float32, shape=[2, 3])
inputs = [inputs1, inputs2]
# 实例化一个细胞核
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=4)
# 初始化一个状态值
state0 = cell.zero_state(batch_size, tf.float32)
# 获取t=1时刻的输出 (需要传入 t=1时刻的输入和 上一时刻的状态值)
# output1, state1 = cell.__call__(inputs[0], state0)
output1, state1 = cell(inputs[0], state0)
print(output1, state1)
# 获取t=2时刻的输出 (需要传入 t=2时刻的输入和 上一时刻的状态值state1)
output2, state2 = cell(inputs[1], state1)
print(output2, state2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data1 = [
[1.2, 2.3, 3.4],
[2.3, 3.5, 3.8]
]
data2 = [
[1.32, 2.33, 3.34],
[2.33, 3.35, 3.38]
]
feed = {inputs1: data1, inputs2: data2}
output1_, state1_, output2_, state2_ = sess.run(
[output1, state1, output2, state2], feed)
print(output1_, state1_)
print('**'*56)
print(output2_, state2_)
def BasicLSTM():
"""
学习lstm cell
:return:
"""
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=3)
# 定义一个输入,总共2个时刻(时间步),每个时刻输入4个样本(batch_size,),每个样本由2个维度组成(one-hot维度)
old_inputs = tf.placeholder(tf.float32, [8, 2])
inputs = tf.split(old_inputs, num_or_size_splits=2, axis=0)
print(inputs)
# 初始化状态值
s0 = cell.zero_state(batch_size=4, dtype=tf.float32)
# 直接将输入 传入静态rnn。
tf.nn.static_bidirectional_rnn()
rnn_outputs, final_state = tf.nn.static_rnn(cell=cell, inputs=inputs, initial_state=s0)
# rnn_outputs [[N, lstm_size], [N, lstm_size]]
print(rnn_outputs)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data = [
[2.3, 2.5],
[2.33, 2.5],
[2.34, 2.45],
[2.3, 23.5],
[2.33, 2.45],
[2.3, 2.5],
[2.33, 2.65],
[2.3, 2.5]
]
print(sess.run([rnn_outputs, final_state], feed_dict={old_inputs: data}))
if __name__ == '__main__':
# BasicRNN()
# BasicRNN_n_steps()
BasicLSTM()
rnnapi
最新推荐文章于 2023-09-09 10:29:10 发布