目录
习题6-2 推导公式(6.40)和公式(6.41)中的梯度.
习题6-3 当使用公式(6.50)作为循环神经网络的状态更新公式时, 分析其可能存在梯度爆炸的原因并给出解决方法.
习题6-2P 设计简单RNN模型,分别用Numpy、Pytorch实现反向传播算子,并代入数值测试.
3. 分别用numpy和pytorh去实现反向传播算子,二者对比
习题6-1P 推导RNN反向传播算法BPTT.
习题6-2 推导公式(6.40)和公式(6.41)中的梯度.
参数设置:
RNN的反向传播 (BPTT) 和之前的前馈神经网络和卷积神经网络的反向传播(BP)的不同主要在于:
- 空间上:首先需要先对某时刻参数的梯度进行反向传播,由于所有层参数共享,所以参数梯度为所有层参数梯度的和;
- 时间上,由于全部时刻共享同一个U,反向传播时,每个时刻的U都需要计算,总损失函数等于全部时刻损失函数之和。
习题6-3 当使用公式(6.50)作为循环神经网络的状态更新公式时, 分析其可能存在梯度爆炸的原因并给出解决方法.
公式(7.50)为:
可能存在梯度爆炸的原因:RNN发生梯度消失和梯度爆炸的原因如图所示,将公式改为上式后当γ<1时,t-k趋近于无穷时,γ不会趋近于零,解决了梯度消失问题,但是梯度爆炸仍然存在。当γ>1时,随着传播路径的增加,γ趋近于无穷,产生梯度爆炸。
解决方案:
- 设置梯度截断当梯度的模大于一定阈值时,就将它截断成为一个较小的数。
- 在公式的基础上引入门控机制来控制信息的累积速度,有选择地加入新的信息,并有选择地遗忘之前累积的信息。
习题6-2P 设计简单RNN模型,分别用Numpy、Pytorch实现反向传播算子,并代入数值测试.
1. 定义反向求导函数
import numpy as np
import torch.nn
# GRADED FUNCTION: rnn_cell_forward
def softmax(a):
exp_a = np.exp(a)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def rnn_cell_forward(xt, a_prev, parameters):
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
a_next = np.tanh(np.dot(Wax, xt) + np.dot(Waa, a_prev) + ba)
yt_pred = softmax(np.dot(Wya, a_next) + by)
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
def rnn_cell_backward(da_next, cache):
(a_next, a_prev, xt, parameters) = cache
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
dtanh = (1 - a_next * a_next) * da_next
dxt = np.dot(Wax.T, dtanh)
dWax = np.dot(dtanh, xt.T)
da_prev = np.dot(Waa.T, dtanh)
dWaa = np.dot(dtanh, a_prev.T)
dba = np.sum(dtanh, keepdims=True, axis=-1)
gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba}
return gradients
# GRADED FUNCTION: rnn_forward
np.random.seed(1)
xt = np.random.randn(3, 10)
a_prev = np.random.randn(5, 10)
Wax = np.random.randn(5, 3)
Waa = np.random.randn(5, 5)
Wya = np.random.randn(2, 5)
ba = np.random.randn(5, 1)
by = np.random.randn(2, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a_next, yt, cache = rnn_cell_forward(xt, a_prev, parameters)
da_next = np.random.randn(5, 10)
gradients = rnn_cell_backward(da_next, cache)
print("gradients[\"dxt\"][1][2] =", gradients["dxt"][1][2])
print("gradients[\"dxt\"].shape =", gradients["dxt"].shape)
print("gradients[\"da_prev\"][2][3] =", gradients["da_prev"][2][3])
print("gradients[\"da_prev\"].shape =", gradients["da_prev"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
gradients["dxt"][1][2] = -0.4605641030588796
gradients["dxt"].shape = (3, 10)
gradients["da_prev"][2][3] = 0.08429686538067724
gradients["da_prev"].shape = (5, 10)
gradients["dWax"][3][1] = 0.39308187392193034
gradients["dWax"].shape = (5, 3)
gradients["dWaa"][1][2] = -0.28483955786960663
gradients["dWaa"].shape = (5, 5)
gradients["dba"][4] = [0.80517166]
gradients["dba"].shape = (5, 1)
2. 实现rnn_backward
函数。
首先用零初始化返回变量,然后循环遍历所有时间步,同时在每个时间步调用rnn_cell_backward
,相应地更新其他变量。
# GRADED FUNCTION: rnn_forward
def rnn_forward(x, a0, parameters):
caches = []
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wya"].shape
a = np.zeros((n_a, m, T_x))
y_pred = np.zeros((n_y, m, T_x))
a_next = a0
for t in range(T_x):
a_next, yt_pred, cache = rnn_cell_forward(x[:, :, t], a_next, parameters)
a[:, :, t] = a_next
y_pred[:, :, t] = yt_pred
caches.append(cache)
caches = (caches, x)
return a, y_pred, caches
np.random.seed(1)
x = np.random.randn(3, 10, 4)
a0 = np.random.randn(5, 10)
Waa = np.random.randn(5, 5)
Wax = np.random.randn(5, 3)
Wya = np.random.randn(2, 5)
ba = np.random.randn(5, 1)
by = np.random.randn(2, 1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a, y_pred, caches = rnn_forward(x, a0, parameters)
print("a[4][1] = ", a[4][1])
print("a.shape = ", a.shape)
print("y_pred[1][3] =", y_pred[1][3])
print("y_pred.shape = ", y_pred.shape)
print("caches[1][1][3] =", caches[1][1][3])
print("len(caches) = ", len(caches))
3. 分别用numpy和pytorh去实现反向传播算子,二者对比
class RNNCell:
def __init__(self, weight_ih, weight_hh,
bias_ih, bias_hh):
self.weight_ih = weight_ih
self.weight_hh = weight_hh
self.bias_ih = bias_ih
self.bias_hh = bias_hh
self.x_stack = []
self.dx_list = []
self.dw_ih_stack = []
self.dw_hh_stack = []
self.db_ih_stack = []
self.db_hh_stack = []
self.prev_hidden_stack = []
self.next_hidden_stack = []
# temporary cache
self.prev_dh = None
def __call__(self, x, prev_hidden):
self.x_stack.append(x)
next_h = np.tanh(
np.dot(x, self.weight_ih.T)
+ np.dot(prev_hidden, self.weight_hh.T)
+ self.bias_ih + self.bias_hh)
self.prev_hidden_stack.append(prev_hidden)
self.next_hidden_stack.append(next_h)
# clean cache
self.prev_dh = np.zeros(next_h.shape)
return next_h
def backward(self, dh):
x = self.x_stack.pop()
prev_hidden = self.prev_hidden_stack.pop()
next_hidden = self.next_hidden_stack.pop()
d_tanh = (dh + self.prev_dh) * (1 - next_hidden ** 2)
self.prev_dh = np.dot(d_tanh, self.weight_hh)
dx = np.dot(d_tanh, self.weight_ih)
self.dx_list.insert(0, dx)
dw_ih = np.dot(d_tanh.T, x)
self.dw_ih_stack.append(dw_ih)
dw_hh = np.dot(d_tanh.T, prev_hidden)
self.dw_hh_stack.append(dw_hh)
self.db_ih_stack.append(d_tanh)
self.db_hh_stack.append(d_tanh)
return self.dx_list
if __name__ == '__main__':
np.random.seed(123)
torch.random.manual_seed(123)
np.set_printoptions(precision=6, suppress=True)
rnn_PyTorch = torch.nn.RNN(4, 5).double()
rnn_numpy = RNNCell(rnn_PyTorch.all_weights[0][0].data.numpy(),
rnn_PyTorch.all_weights[0][1].data.numpy(),
rnn_PyTorch.all_weights[0][2].data.numpy(),
rnn_PyTorch.all_weights[0][3].data.numpy())
nums = 3
x3_numpy = np.random.random((nums, 3, 4))
x3_tensor = torch.tensor(x3_numpy, requires_grad=True)
h3_numpy = np.random.random((1, 3, 5))
h3_tensor = torch.tensor(h3_numpy, requires_grad=True)
dh_numpy = np.random.random((nums, 3, 5))
dh_tensor = torch.tensor(dh_numpy, requires_grad=True)
h3_tensor = rnn_PyTorch(x3_tensor, h3_tensor)
h_numpy_list = []
h_numpy = h3_numpy[0]
for i in range(nums):
h_numpy = rnn_numpy(x3_numpy[i], h_numpy)
h_numpy_list.append(h_numpy)
h3_tensor[0].backward(dh_tensor)
for i in reversed(range(nums)):
rnn_numpy.backward(dh_numpy[i])
print("numpy_hidden :\n", np.array(h_numpy_list))
print("tensor_hidden :\n", h3_tensor[0].data.numpy())
print("------")
print("dx_numpy :\n", np.array(rnn_numpy.dx_list))
print("dx_tensor :\n", x3_tensor.grad.data.numpy())
print("------")
print("dw_ih_numpy :\n",
np.sum(rnn_numpy.dw_ih_stack, axis=0))
print("dw_ih_tensor :\n",
rnn_PyTorch.all_weights[0][0].grad.data.numpy())
print("------")
print("dw_hh_numpy :\n",
np.sum(rnn_numpy.dw_hh_stack, axis=0))
print("dw_hh_tensor :\n",
rnn_PyTorch.all_weights[0][1].grad.data.numpy())
print("------")
print("db_ih_numpy :\n",
np.sum(rnn_numpy.db_ih_stack, axis=(0, 1)))
print("db_ih_tensor :\n",
rnn_PyTorch.all_weights[0][2].grad.data.numpy())
print("------")
print("db_hh_numpy :\n",
np.sum(rnn_numpy.db_hh_stack, axis=(0, 1)))
print("db_hh_tensor :\n",
rnn_PyTorch.all_weights[0][3].grad.data.numpy())
实验结果:numpy实现和torch实现结果基本一样
numpy_hidden :
[[[ 0.4686 -0.298203 0.741399 -0.446474 0.019391]
[ 0.365172 -0.361254 0.426838 -0.448951 0.331553]
[ 0.589187 -0.188248 0.684941 -0.45859 0.190099]][[ 0.146213 -0.306517 0.297109 0.370957 -0.040084]
[-0.009201 -0.365735 0.333659 0.486789 0.061897]
[ 0.030064 -0.282985 0.42643 0.025871 0.026388]][[ 0.225432 -0.015057 0.116555 0.080901 0.260097]
[ 0.368327 0.258664 0.357446 0.177961 0.55928 ]
[ 0.103317 -0.029123 0.182535 0.216085 0.264766]]]
tensor_hidden :
[[[ 0.4686 -0.298203 0.741399 -0.446474 0.019391]
[ 0.365172 -0.361254 0.426838 -0.448951 0.331553]
[ 0.589187 -0.188248 0.684941 -0.45859 0.190099]][[ 0.146213 -0.306517 0.297109 0.370957 -0.040084]
[-0.009201 -0.365735 0.333659 0.486789 0.061897]
[ 0.030064 -0.282985 0.42643 0.025871 0.026388]][[ 0.225432 -0.015057 0.116555 0.080901 0.260097]
[ 0.368327 0.258664 0.357446 0.177961 0.55928 ]
[ 0.103317 -0.029123 0.182535 0.216085 0.264766]]]
------
dx_numpy :
[[[-0.643965 0.215931 -0.476378 0.072387]
[-1.221727 0.221325 -0.757251 0.092991]
[-0.59872 -0.065826 -0.390795 0.037424]][[-0.537631 -0.303022 -0.364839 0.214627]
[-0.815198 0.392338 -0.564135 0.217464]
[-0.931365 -0.254144 -0.561227 0.164795]][[-1.055966 0.249554 -0.623127 0.009784]
[-0.45858 0.108994 -0.240168 0.117779]
[-0.957469 0.315386 -0.616814 0.205634]]]
dx_tensor :
[[[-0.643965 0.215931 -0.476378 0.072387]
[-1.221727 0.221325 -0.757251 0.092991]
[-0.59872 -0.065826 -0.390795 0.037424]][[-0.537631 -0.303022 -0.364839 0.214627]
[-0.815198 0.392338 -0.564135 0.217464]
[-0.931365 -0.254144 -0.561227 0.164795]][[-1.055966 0.249554 -0.623127 0.009784]
[-0.45858 0.108994 -0.240168 0.117779]
[-0.957469 0.315386 -0.616814 0.205634]]]
------
dw_ih_numpy :
[[3.918335 2.958509 3.725173 4.157478]
[1.261197 0.812825 1.10621 0.97753 ]
[2.216469 1.718251 2.366936 2.324907]
[3.85458 3.052212 3.643157 3.845696]
[1.806807 1.50062 1.615917 1.521762]]
dw_ih_tensor :
[[3.918335 2.958509 3.725173 4.157478]
[1.261197 0.812825 1.10621 0.97753 ]
[2.216469 1.718251 2.366936 2.324907]
[3.85458 3.052212 3.643157 3.845696]
[1.806807 1.50062 1.615917 1.521762]]
------
dw_hh_numpy :
[[ 2.450078 0.243735 4.269672 0.577224 1.46911 ]
[ 0.421015 0.372353 0.994656 0.962406 0.518992]
[ 1.079054 0.042843 2.12169 0.863083 0.757618]
[ 2.225794 0.188735 3.682347 0.934932 0.955984]
[ 0.660546 -0.321076 1.554888 0.833449 0.605201]]
dw_hh_tensor :
[[ 2.450078 0.243735 4.269672 0.577224 1.46911 ]
[ 0.421015 0.372353 0.994656 0.962406 0.518992]
[ 1.079054 0.042843 2.12169 0.863083 0.757618]
[ 2.225794 0.188735 3.682347 0.934932 0.955984]
[ 0.660546 -0.321076 1.554888 0.833449 0.605201]]
------
db_ih_numpy :
[7.568411 2.175445 4.335336 6.820628 3.51003 ]
db_ih_tensor :
[7.568411 2.175445 4.335336 6.820628 3.51003 ]
------
db_hh_numpy :
[7.568411 2.175445 4.335336 6.820628 3.51003 ]
db_hh_tensor :
[7.568411 2.175445 4.335336 6.820628 3.51003 ]
总结:
- 此次课程中主要学习了BPTT的反向推导:跟之前很大的不同首先是空间上的参数共享,所有层都用同样的参数,反向传播求梯度需要将所有层的损失加起来,然后是有时间的参数共享,全部时刻共享同一个参数,反向传播时,每个时刻的参数都需要计算。总损失函数等于全部时刻损失函数之和。就得到了:
- 实时循环学习算法(RTRL): 通过前向模式应用链式法则来计算梯度.
- RNN的长程依赖问题:由于梯度爆炸或消失问题,RNN只能学习到短周期的依赖关系,即只能拥有短期记忆。
梯度爆炸的解决策略:权重衰减和梯度截断
梯度消失的解决策略:循环边改为线性依赖关系并且增加非线性,(相当于残差网络)公式如下。
即使可以单方面解决梯度消失或梯度爆炸,但是仍然可能存在记忆容量问题,引入了门控机制,构建长短期记忆网络LSTM来解决长程依赖问题。