8.13学习笔记,循环神经网络RNN

 

import torch
import torch.nn as nn
# 1.单向、单层RNN
single_rnn = nn.RNN(4,3,1,batch_first=True)
#batch_first是一个参数,用于指定输入数据的维度顺序。
# 默认情况下,RNN的输入数据维度顺序为(batch_size, sequence_length, input_size),
# 其中batch_size表示每个batch中的样本数,
# sequence_length表示每个样本的序列长度,
# input_size表示每个时间步的输入特征维度。
# 当batch_first设置为True时,输入数据的维度顺序变为(batch_size, input_size, sequence_length),
# 即将sequence_length放在最后一个维度上。
# 使用batch_first=True的好处是可以更方便地处理批量数据,
# 因为在很多情况下,批量数据的处理更为常见和方便。
# 此外,将sequence_length放在最后一个维度上也可以更好地与其他神经网络模型进行集成,
# 因为大多数神经网络模型的输入数据维度顺序都是(batch_size, input_size)。
input = torch.randn(1,2,4)#bs * sl * fs
output, h_n = single_rnn(input)
# print(output)
# print((h_n))


# 2. 双向、单层RNN
bidirectional_rnn = nn.RNN(4,3,1,batch_first=True,bidirectional=True)
bi_output, bi_h_n = bidirectional_rnn(input)
# print(bi_output)
# print(bi_h_n)
print(bi_output.shape,output.shape)
print(bi_h_n.shape,h_n.shape)
import torch
import torch.nn as nn
from torch.nn import RNN

bs, T = 2,3 # 批量大小, 输入序列长度
input_size, hidden_size = 2,3 # 输入特征大小, 隐含层特征大小
input = torch.randn(bs, T, input_size) #随机初始化一个输入特征序列
h_prev = torch.zeros(bs, hidden_size) #初始隐含状态

### step1 调用pytorch RNN API
rnn = nn.RNN(input_size,hidden_size,batch_first=True)
rnn_output,state_final = rnn(input, h_prev.unsqueeze(0))
# print("Pytorch API output")
# print(rnn_output)
# print(state_final)

### step2 手写一个rnn_forward函数,实现RNN的计算原理
def rnn_forward(input,weight_ih,weight_hh,bias_ih,bias_hh,h_prev):
    bs, T, input_size = input.shape
    h_dim = weight_ih.shape[0]
    h_out = torch.zeros(bs, T, h_dim) #初始化一个输出(状态)矩阵

    for t in range(T):
        x = input[:,t,:].unsqueeze(2) #获取当前时刻输入特征,bs * input_size * 1
        w_ih_batch = weight_ih.unsqueeze(0).tile(bs,1,1) #bs * h_dim * input_size
        w_hh_batch = weight_hh.unsqueeze(0).tile(bs,1,1) #bs * h_dim * h_dim

        w_times_x = torch.bmm(w_ih_batch, x).squeeze(-1) #bs * h_dim
        w_times_h = torch.bmm(w_hh_batch, h_prev.unsqueeze(2)).squeeze(-1) #bs * h_dim
        h_prev = torch.tanh(w_times_x + bias_ih + w_times_h + bias_hh)

        h_out[:,t,:] = h_prev

    return  h_out,h_prev.unsqueeze(0)
# 验证一下rnn_forward的正确性
# for k,v in rnn.named_parameters():
#     print(k,v)
# custom_rnn_output, custom_state_final = rnn_forward(input, rnn.weight_ih_l0, rnn.weight_hh_l0, \
#                                                     rnn.bias_ih_l0, rnn.bias_hh_l0, h_prev)
# print("\n rnn_forward function output:")
# print(custom_rnn_output)
# print(custom_state_final)

# step3 手写一个bidirectional_rnn_forward函数,实现双向RNN计算原理
def bidirectional_rnn_forward(input,weight_ih,weight_hh,bias_ih,bias_hh,h_prev, \
                              weight_ih_reverse, weight_hh_reverse, bias_ih_reverse, bias_hh_reverse, h_prev_reverse):
    bs, T, input_size = input.shape
    h_dim = weight_ih.shape[0]
    h_out = torch.zeros(bs, T, h_dim*2)  # 初始化一个输出(状态)矩阵,注意双向是两倍的特征大小

    forward_output = rnn_forward(input, weight_ih, weight_hh, bias_ih, bias_hh, h_prev)[0]#forward.layer
    backward_output = rnn_forward(torch.flip(input, [1]), weight_ih_reverse, weight_hh_reverse, bias_ih_reverse, bias_hh_reverse, \
                h_prev_reverse)[0] # backward layer
    h_out[:, :, :h_dim] = forward_output
    h_out[:, :, h_dim:] = backward_output

    h_n = torch.zeros(bs, 2, h_dim)
    h_n[:, 0,:] = forward_output[:, -1,:]
    h_n[:, 1,:] = backward_output[:, -1,:]

    h_n = h_n.transpose(0, 1)
    return h_out, h_n
# 验证一下bidirection_rnn_forward的正确性
bi_rnn = nn.RNN(input_size, hidden_size, batch_first=True, bidirectional=True)
h_prev = torch.zeros(2,bs,hidden_size)
bi_rnn_output,bi_state_final = bi_rnn(input,h_prev)
# for k,v in rnn.named_parameters():
#     print(k,v)

custom_bi_rnn_output, custon_bi_state_final = bidirectional_rnn_forward(input,rnn.weight_ih_l0,bi_rnn.weight_hh_l0,bi_rnn.bias_ih_l0,bi_rnn.bias_hh_l0,h_prev[0], \
                              bi_rnn.weight_ih_l0_reverse, bi_rnn.weight_hh_l0_reverse, bi_rnn.bias_ih_l0_reverse, bi_rnn.bias_hh_l0_reverse, h_prev[1])
print("Pytorch API output")
print(bi_rnn_output)
print(bi_state_final)
print("\n bidirectional_rnn_forward function output:")
print(custom_bi_rnn_output)
print(custon_bi_state_final)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值