p习题6-1P 推导RNN反向传播算法BPTT
是净输入,净输入经过激活函数后得到的
:第
𝑘 − 1
时刻隐状态的第
𝑗
维;
习题6-2 推导公式(6.40)和公式(6.41)中的梯度.
公式(6.40),关于权重W的梯度:
公式(6.41),关于偏置b的梯度:
此时的a就是输入相当于x
习题6-3 当使用公式(6.50)作为循环神经网络的状态更新公式时, 分析其可能存在梯度爆炸的原因并给出解决方法.
公式(6.50):
g()非线性函数,是参数,
改变这种现象是通过门控机制,LSTM
门控分为遗忘门和输入门
在RNN网络结构中,由于使用Logistic或者Tanh函数,所以很容易导致梯度消失的问题,即在相隔很远的时刻时,前者对后者的影响几乎不存在了,LSTM的机制正是为了解决这种长期依赖问题。LSTM 有通过精心设计的称作为“门”的结构来去除或者增加信息到细胞状态的能力。门是一种让信息选择式通过的方法。他们包含一个 sigmoid 神经网络层和一个按位的乘法操作。
LSTM在后续作业具体学习
具体是因为,原理这里没看懂,但参考一下大佬同学的博客,NNDL作业10 手动推导BPTT+numpy和Pytorch代码实现梯度计算-CSDN博客
习题6-2P 设计简单RNN模型,分别用Numpy、Pytorch实现反向传播算子,并代入数值测试.
L5W1作业1 手把手实现循环神经网络-CSDN博客
import torch
import numpy as np
class RNNCell:
def __init__(self, weight_ih, weight_hh,bias_ih, bias_hh):
self.weight_ih = weight_ih # 初始化输入和隐藏状态的输入权重
self.weight_hh = weight_hh
self.bias_ih = bias_ih # 初始化输入和隐藏状态的偏置
self.bias_hh = bias_hh
self.x_stack = [] # 输入
self.dx_list = [] # 输入梯度
self.dw_ih_stack = [] # 权重梯度
self.dw_hh_stack = []
self.db_ih_stack = [] # 偏置梯度
self.db_hh_stack = []
self.prev_hidden_stack = [] # 存储之前的隐藏状态的梯度
self.next_hidden_stack = [] # 存储下一个隐藏状态的梯度
# temporary cache
self.prev_dh = None # 暂存上一个状态的梯度
def __call__(self, x, prev_hidden):
self.x_stack.append(x)
next_h = np.tanh( # 下一步隐藏状态,通过激活函数到权重和偏置
np.dot(x, self.weight_ih.T)
+ np.dot(prev_hidden, self.weight_hh.T)
+ self.bias_ih + self.bias_hh)
self.prev_hidden_stack.append(prev_hidden) # 将之前的隐藏状态加入[]中
self.next_hidden_stack.append(next_h) # 将下一步隐藏状态加入[]中
# clean cache
self.prev_dh = np.zeros(next_h.shape) # 全0,清空梯度缓存
return next_h
# 计算梯度,通过自己手动推的进行编写
def backward(self, dh):
x = self.x_stack.pop() # 从堆栈取出之前输入,前一步隐藏状态,下一步隐藏状态
prev_hidden = self.prev_hidden_stack.pop()
next_hidden = self.next_hidden_stack.pop()
# 链式法则
d_tanh = (dh + self.prev_dh) * (1 - next_hidden ** 2)
self.prev_dh = np.dot(d_tanh, self.weight_hh)
dx = np.dot(d_tanh, self.weight_ih)
self.dx_list.insert(0, dx)
dw_ih = np.dot(d_tanh.T, x)
self.dw_ih_stack.append(dw_ih)
dw_hh = np.dot(d_tanh.T, prev_hidden)
self.dw_hh_stack.append(dw_hh)
self.db_ih_stack.append(d_tanh)
self.db_hh_stack.append(d_tanh)
return self.dx_list
if __name__ == '__main__':
#设置在numpy和pytorch随机种子
np.random.seed(123)
torch.random.manual_seed(123)
# 设置numpy打印,保留小数点后6位,打印小鼠不显示末尾0
np.set_printoptions(precision=6, suppress=True)
# 设置pytorch,特征数量4,隐藏单元5
rnn_PyTorch = torch.nn.RNN(4, 5).double()
rnn_numpy = RNNCell(rnn_PyTorch.all_weights[0][0].data.numpy(),
rnn_PyTorch.all_weights[0][1].data.numpy(),
rnn_PyTorch.all_weights[0][2].data.numpy(),
rnn_PyTorch.all_weights[0][3].data.numpy())
nums = 3
x3_numpy = np.random.random((nums, 3, 4))
x3_tensor = torch.tensor(x3_numpy, requires_grad=True)
h3_numpy = np.random.random((1, 3, 5))
h3_tensor = torch.tensor(h3_numpy, requires_grad=True)
dh_numpy = np.random.random((nums, 3, 5))
dh_tensor = torch.tensor(dh_numpy, requires_grad=True)
h3_tensor = rnn_PyTorch(x3_tensor, h3_tensor)
h_numpy_list = []
h_numpy = h3_numpy[0]
for i in range(nums):
h_numpy = rnn_numpy(x3_numpy[i], h_numpy)
h_numpy_list.append(h_numpy)
h3_tensor[0].backward(dh_tensor)
for i in reversed(range(nums)):
rnn_numpy.backward(dh_numpy[i])
print("numpy_hidden :\n", np.array(h_numpy_list))
print("torch_hidden :\n", h3_tensor[0].data.numpy())
print("-----------------------------------------------")
print("dx_numpy :\n", np.array(rnn_numpy.dx_list))
print("dx_torch :\n", x3_tensor.grad.data.numpy())
print("------------------------------------------------")
print("dw_ih_numpy :\n",
np.sum(rnn_numpy.dw_ih_stack, axis=0))
print("dw_ih_torch :\n",
rnn_PyTorch.all_weights[0][0].grad.data.numpy())
print("------------------------------------------------")
print("dw_hh_numpy :\n",
np.sum(rnn_numpy.dw_hh_stack, axis=0))
print("dw_hh_torch :\n",
rnn_PyTorch.all_weights[0][1].grad.data.numpy())
print("------------------------------------------------")
print("db_ih_numpy :\n",
np.sum(rnn_numpy.db_ih_stack, axis=(0, 1)))
print("db_ih_torch :\n",
rnn_PyTorch.all_weights[0][2].grad.data.numpy())
print("-----------------------------------------------")
print("db_hh_numpy :\n",
np.sum(rnn_numpy.db_hh_stack, axis=(0, 1)))
print("db_hh_torch :\n",
rnn_PyTorch.all_weights[0][3].grad.data.numpy())
输出结果是
发现权重,偏置的梯度相同
我写的迟,今天老师说到了各种激活函数
再复习一下
Sigmoid
tanh
ReLU
leaky ReLU
softplus
softmax
REF:
梯度消失与梯度爆炸产生、原理和解决方案_梯度消失和梯度爆炸-CSDN博客