tf探索RNN模型结构

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
n_steps = 2
n_inputs = 3
n_neurons = 5
X = tf.placeholder(tf.float32,[None, n_steps, n_inputs])
basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)
seq_length = tf.placeholder(tf.int32,[None])
outputs,states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,sequence_length=seq_length)
init = tf.global_variables_initializer()
X_batch=  np.array([
    [[0,1,2],[9,8,7]], # 这一层共有两个step
    [[3,4,5],[0,0,0]], # 这一层也是共有两个step
    [[6,7,8],[6,5,4]], # 同样两个step,但是每个step的内部,有3个输入,这3个输入会和5个神经元进行运算,得到5个输出。(1×3) * (3×5) = (1,5)
    [[9,0,1],[3,2,1]], # 同样两个stprintep,3个输入
])
print(X_batch.shape)
print(X_batch)

(4, 2, 3)
[[[0 1 2]
[9 8 7]]

[[3 4 5]
[0 0 0]]

[[6 7 8]
[6 5 4]]

[[9 0 1]
[3 2 1]]]

seq_length_batch = np.array([2,1,2,2])
print(seq_length_batch)
print(seq_length_batch.shape)

[2 1 2 2]
(4,)

with tf.Session() as sess:
    sess.run(init)
    # states是每一层的最后那个step的输出
    outputs_val, states_val = sess.run(
        [outputs,states],feed_dict = {X:X_batch,seq_length:seq_length_batch}
    )
    print('outputs_val.shape:',outputs_val.shape,'\n','states_val.shape:',states_val.shape)
    print('outputs_val:',outputs_val,'\n','states_val:',states_val)

outputs_val.shape: (4, 2, 5)
states_val.shape: (4, 5)print
outputs_val: [[[ 0.81034565 -0.82188725 -0.43817806 0.21998554 0.12257475]
[-0.1270539 -0.9999956 -0.9981977 0.9930948 -0.34095988]]

[[ 0.93452823 -0.99842227 -0.93783927 0.8417908 -0.04894711]
[ 0. 0. 0. 0. 0. ]]

[[ 0.97836965 -0.99998724 -0.9947459 0.9771818 -0.21762982]
[-0.8643079 -0.99986285 -0.9826787 0.97771955 0.01242264]]

[[-0.99816316 -0.96931624 0.8047482 0.505479 -0.92204905]
[ 0.41773438 -0.9614967 0.07887521 0.98976755 0.33363777]]]
states_val: [[-0.1270539 -0.9999956 -0.9981977 0.9930948 -0.34095988]
[ 0.93452823 -0.99842227 -0.93783927 0.8417908 -0.04894711]
[-0.8643079 -0.99986285 -0.9826787 0.97771955 0.01242264]
[ 0.41773438 -0.9614967 0.07887521 0.98976755 0.33363777]]
在这里插入图片描述

多个隐藏层的情况(BasicRNN)

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
n_teps = 2
n_inputs = 3
n_neurons = 5
n_layers = 3
X = tf.placeholder(tf.float32,[None,n_steps,n_inputs])
seq_length = tf.placeholder(tf.int32,[None])

layers = [tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons,activation = tf.nn.relu) for layer in range(n_layers)]
multi_layer_cell = tf.nn.rnn_cell.MultiRNNCell(layers)
# X中的None对应于dynamic_rnn中的参数:sequence_length
outputs,states = tf.nn.dynamic_rnn(multi_layer_cell,X,dtype=tf.float32,sequence_length=seq_length)
init = tf.global_variables_initializer()

X_batch=  np.array([
    [[0,1,2],[9,8,7]], # 这一层共有两个step
    [[3,4,5],[0,0,0]], # 这一层也是共有两个step
    [[6,7,8],[6,5,4]], # 同样两个step,但是每个step的内部,有3个输入,这3个输入会和5个神经元进行运算,得到5个输出。(1×3) * (3×5) = (1,5)
    [[9,0,1],[3,2,1]], # 同样两个step,3个输入
])

seq_length_batch = np.array([2,1,2,2])
# 从输出的结果我们可以知道,outpus是最后一层的输出,即[batch_size,step,n_neurons]=[4,2,5],
# states是每一层(注意:每一层)最后一个step的输出。即三个结构为[batch_size,n_neurons]=[4,5]的tensor
with tf.Session() as sess:
    sess.run(init)
    outputs_val,states_val = sess.run(
        [outputs,states],feed_dict={X:X_batch,seq_length:seq_length_batch}
    )
    print('outpus_val.shape:',outputs_val.shape,'\nlen(states_val):',len(states_val),'\nstates_val[0].shape',states_val[0].shape)
    print('outputs_val--------------------------')
    print('outputs_val:',outputs_val,'\nstates_val---------------@@@@@@@\nstates_val:',states_val)

outpus_val.shape: (4, 2, 5)
len(states_val): 3
states_val[0].shape (4, 5)
outputs_val--------------------------
outputs_val: [[[0. 0.07646854 0.24991421 0.1089821 0.14738367]
[0. 0.47950923 3.0206172 0.88744605 1.1424427 ]]

[[0. 0.28368935 1.100303 0.39404172 0.5585399 ]
[0. 0. 0. 0. 0. ]]

[[0. 0.5129226 2.095811 0.7896844 1.0234959 ]
[0. 0.7222415 2.5808253 0.3015337 0. ]]

[[0.08379822 0.48727143 0. 0.85559225 0. ]
[0. 0. 2.771887 0. 1.0182933 ]]]
states_val---------------@@@@@@@
states_val: (array([[0. , 4.4480453 , 0. , 0. , 3.842202 ],
[0. , 2.8573442 , 0.35427135, 0. , 0.4013871 ],
[0. , 0.84323883, 0. , 0. , 1.862844 ],
[0. , 2.7314935 , 0. , 0. , 2.25468 ]],
dtype=float32), array([[0. , 4.885102 , 3.2651732 , 1.9114573 , 2.0921707 ],
[0. , 1.6109308 , 1.6505947 , 0.83961606, 1.249507 ],
[1.9023452 , 4.456662 , 0. , 0.06245628, 0.5274455 ],
[0. , 4.088084 , 0. , 0. , 1.1591175 ]],
dtype=float32), array([[0. , 0.47950923, 3.0206172 , 0.88744605, 1.1424427 ],
[0. , 0.28368935, 1.100303 , 0.39404172, 0.5585399 ],
[0. , 0.7222415 , 2.5808253 , 0.3015337 , 0. ],
[0. , 0. , 2.771887 , 0. , 1.0182933 ]],
dtype=float32))
在这里插入图片描述

多个隐藏层的情况(LSTM)

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
n_steps = 2
n_inputs = 3
n_neurons = 5
n_layers = 3
X = tf.placeholder(tf.float32,[None,n_steps,n_inputs])
seq_length = tf.placeholder(tf.int32,[None])

layers = [tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons,activation = tf.nn.relu) for layer in range(n_layers)]
multi_layer_cell = tf.nn.rnn_cell.MultiRNNCell(layers)
# X中的None对应于dynamic_rnn中的参数:sequence_length
outputs,states = tf.nn.dynamic_rnn(multi_layer_cell,X,dtype=tf.float32,sequence_length=seq_length) #
init = tf.global_variables_initializer()

X_batch=  np.array([
    [[0,1,2],[9,8,7]], # 这一层共有两个step
    [[3,4,5],[0,0,0]], # 这一层也是共有两个step
    [[6,7,8],[6,5,4]], # 同样两个step,但是每个step的内部,有3个输入,这3个输入会和5个神经元进行运算,得到5个输出。(1×3) * (3×5) = (1,5)
    [[9,0,1],[3,2,1]], # 同样两个step,3个输入
])

seq_length_batch = np.array([2,1,2,2])
# 从输出的结果我们可以知道,outpus是最后一层的输出,即[batch_size,step,n_neurons]=[4,2,5],
# states是每一层(注意:每一层)最后一个step的输出。即三个结构为[batch_size,n_neurons]=[4,5]的tensor
with tf.Session() as sess:
    sess.run(init)
    outputs_val,states_val = sess.run(
        [outputs,states],feed_dict={X:X_batch,seq_length:seq_length_batch}
    )
    #print('outpus_val.shape:',outputs_val.shape,'\nlen(states_val):',len(states_val),'\nstates_val[0].shape',states_val[0].shape)
    print('outputs_val--------------------------')
    print('outputs_val:',outputs_val,'\nstates_val---------------@@@@@@@\nstates_val:',states_val)

outputs_val--------------------------
outputs_val: [[[1.2189428e-04 0.0000000e+00 9.1467059e-04 5.8743550e-04 7.9261739e-04]
[9.0805421e-05 0.0000000e+00 6.3243252e-03 3.9998074e-03 1.2471758e-02]]

[[4.6948360e-05 0.0000000e+00 3.5201167e-04 2.2613116e-04 3.0527476e-04]
[0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00]]

[[0.0000000e+00 0.0000000e+00 1.0970234e-03 3.7570429e-04 2.0589796e-03]
[0.0000000e+00 0.0000000e+00 1.2815776e-03 3.2290791e-03 1.0550662e-02]]

[[0.0000000e+00 0.0000000e+00 0.0000000e+00 1.2635149e-02 1.8557845e-02]
[0.0000000e+00 0.0000000e+00 0.0000000e+00 2.4842786e-02 3.1909555e-02]]]
states_val---------------@@@@@@@
states_val: (LSTMStateTuple(c=array([[1.3327965 , 0. , 0.00894412, 0.22311509, 0.03028038],
[0. , 0. , 0.01238959, 0. , 0.03147274],
[1.1183366 , 0. , 0. , 0.5472436 , 0.02206855],
[2.0191607 , 0. , 0. , 2.6519086 , 0.59182495]],
dtype=float32), h=array([[1.2576088e+00, 0.0000000e+00, 2.2009923e-04, 2.0464467e-01,
3.0277649e-02],
[0.0000000e+00, 0.0000000e+00, 2.3497457e-03, 0.0000000e+00,
3.1161346e-02],
[9.1592205e-01, 0.0000000e+00, 0.0000000e+00, 4.4004232e-01,
2.2003504e-02],
[9.3828839e-01, 0.0000000e+00, 0.0000000e+00, 1.7480042e+00,
5.6287253e-01]], dtype=float32)), LSTMStateTuple(c=array([[0.1035694 , 0.03449963, 0. , 0.14750442, 0. ],
[0. , 0. , 0. , 0.00652156, 0. ],
[0.0870434 , 0.01877719, 0.03016192, 0.09903809, 0.02479297],
[0.04195413, 0. , 0.25288635, 0.07827503, 0.6962559 ]],
dtype=float32), h=array([[0.05529634, 0.01781071, 0. , 0.08374746, 0. ],
[0. , 0. , 0. , 0.00326549, 0. ],
[0.04538089, 0.00983556, 0.0137444 , 0.05214344, 0.01144662],
[0.02290848, 0. , 0.12686448, 0.03508626, 0.37408575]],
dtype=float32)), LSTMStateTuple(c=array([[1.7770827e-04, 0.0000000e+00, 1.2407922e-02, 7.8595774e-03,
2.4938188e-02],
[9.3860253e-05, 0.0000000e+00, 7.0359174e-04, 4.5201261e-04,
6.1053125e-04],
[0.0000000e+00, 0.0000000e+00, 2.5325045e-03, 6.3966960e-03,
2.1059981e-02],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 5.1615380e-02,
6.4897582e-02]], dtype=float32), h=array([[9.0805421e-05, 0.0000000e+00, 6.3243252e-03, 3.9998074e-03,
1.2471758e-02],
[4.6948360e-05, 0.0000000e+00, 3.5201167e-04, 2.2613116e-04,
3.0527476e-04],
[0.0000000e+00, 0.0000000e+00, 1.2815776e-03, 3.2290791e-03,
1.0550662e-02],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 2.4842786e-02,
3.1909555e-02]], dtype=float32)))

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值