一、LSTM
候选记忆单元(ct上面加浪线)(函数类似一个隐藏层传递)
记忆单元:是否忘记之前的信息以及是否保留输出
最后隐藏层再次决定:要不要把记忆单元的(之前的ht-1)的东西输出
二、代码实现
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
import Rnn
batch_size,num_steps=32,35
train_iter,vocab=d2l.load_data_time_machine(batch_size,num_steps)
num_hiddens = 256
vocab_size=20
num_inputs=vocab_size
lstm_layer=nn.LSTM(num_inputs,num_hiddens)
#初始化状态
state = torch.zeros((1,batch_size,num_steps))
X=torch.rand(size=(num_steps,batch_size,len(vocab)))
class RNNModel(nn.Module):
def __init__(self,rnn_layer,vocab_size,**kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn =rnn_layer
self.vocab_size=vocab_size
self.num_hiddens = self.rnn.hidden_size
if not self.rnn.bidirectional:
self.num_direction=1
self.linear=nn.Linear(self.num_hiddens,self.vocab_size)
else:
self.num_direction=2
self.linear=nn.Linear(self.num_hiddens*2,self.vocab_size)
def forward(self,inputs,state):
X=F.one_hot(inputs.T.long(),self.vocab_size)
X=X.to(torch.float32)
Y,state =self.rnn(X,state)
output = self.linear(Y.reshape((-1,Y.shape[-1])))
return output,state
model=d2l.RNNModel(lstm_layer,num_hiddens)
3、加深网络
lstm_layer = nn.LSTM(num_inputs,num_hiddens,num_layers)
通过num_layers对lstm赋予两层隐藏层
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
import Rnn
batch_size,num_steps=32,35
train_iter,vocab=d2l.load_data_time_machine(batch_size,num_steps)
#通过num_layers设置隐藏层数
vocab_size,num_hiddens,num_layers = len(vocab),256,2
num_inputs = vocab_size
device = d2l.try_gpu()
#这里实行的就是两层的lstm
lstm_layer = nn.LSTM(num_inputs,num_hiddens,num_layers)
model =d2l.RNNModel(lstm_layer,len(vocab))