为了解torch.nn.RNN的算法过程,用python简单实现了一个单层单向的RNN:
# input:输入数据,形状为sequence_length*feature_size
# hidden_szie:隐藏层的特征数
# wih:输入层到隐藏层的权重值,形状为hidden_size*feature_size
# bih:输入层到隐藏的偏置值,形状为1*hidden_size
# whh:h[t-1]到h[t]的权重值,形状为hidden_size*hidden_size
# bhh:h[t-1]到h[t]的偏置值,形状为1*hidden_size
def tanh_RNN(input,hidden_size,wih,bih,whh,bhh):
h=[]
sequence_length=len(input)
for i in range(sequence_length):
tmp=[]
if i==0:
h0_raw=np.dot(input[0],wih.T)+bih+bhh
print(input[0])
for x in h0_raw:
tmp.append(tanh(x))
h.append(tmp)
else:
input=np.dot(input[i],wih.T)+bih+bhh
# 这里注意对whh要用转置操作
ht_1=np.dot(h[i-1],whh.T)
for i in range(hidden_size):
tmp.append(tanh(input[i]+ht_1[i]))
h.append(tmp)
return h
可以用torch.nn.RNN中的结果来验证一下正确性:
import torch
import torch.nn as nn
input_size=5
hidden_size=3
num_layers=1
single_rnn=nn.RNN(input_size,hidden_size,num_layers,batch_first=True)
input=torch.randn(1,2,input_size)
print(single_rnn)
print(input)
# 获取初始化后的各个权重值及偏置值
print("input2hidden,weights=",single_rnn.weight_ih_l0)
print("single_rnn.bias_ih_l0=",single_rnn.bias_ih_l0)
print("single_rnn.weight_hh_l0=",single_rnn.weight_hh_l0)
print("single_rnn.bias_hh_l0=",single_rnn.bias_hh_l0)
output,h_n=single_rnn(input)
print("output=",output)
以上输出为:
tanh_RNN的表现:
基本相差无几
下面是一个完整的调用实例:
# sl*fs
input=np.array([[-0.7692, 1.7660, -0.1400, -0.1676, 0.5469],
[-0.1650, 0.8947, -0.9094, 0.4510, -0.8920]])
# hs*fs
wih=np.array([[ 0.4630, 0.4379, 0.4532, 0.5622, 0.4481],
[ 0.3023, -0.2723, -0.1808, 0.0313, -0.2247],
[ 0.4271, -0.3673, -0.2805, -0.3349, -0.1799]])
# 1*hs
bih=np.array([-0.4720, -0.0638, 0.5293])
# hs*hs
whh=np.array([[ 0.1496, 0.5724, -0.4582],
[-0.2243, -0.5099, -0.5595],
[-0.4785, -0.3711, -0.1362]])
# 1*hs
bhh=np.array([-0.5698, -0.0084, 0.0359])
print(tanh_RNN(input,hidden_size,wih,bih,whh,bhh))