class LSTMModel(nn.Module): def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, bias=True): super(LSTMModel, self).__init__() # Hidden dimensions self.hidden_dim = hidden_dim # Number of hidden layers self.layer_dim = layer_dim self.lstm = LSTMCell(input_dim, hidden_dim, layer_dim) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, x): # Initialize hidden state with zeros ####################### # USE GPU FOR MODEL # ####################### #print(x.shape,"x.shape")100, 28, 28 if torch.cuda.is_available(): h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).cuda()) else: h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)) # Initialize cell state if torch.cuda.is_available(): c0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).cuda()) else: c0 = Variable(torch.zeros(self.layer_dim, x.size(0), hidden_dim)) outs = [] cn = c0[0,:,:] hn = h0[0,:,:] for seq in range(x.size(1)): hn, cn = self.lstm(x[:,seq,:], (hn,cn)) outs.append(hn) out = outs[-1].squeeze() out = self.fc(out) # out.size() --> 100, 10 return out
Pytorch版本 LSTM
最新推荐文章于 2024-06-13 15:56:34 发布