【神经网络与深度学习】Long short-term memory网络(LSTM)

简单介绍

在这里插入图片描述
API介绍:

nn.LSTM(input_size=100, hidden_size=10, num_layers=1,batch_first=True, bidirectional=True)

inuput_size: embedding_dim
hidden_size: 每一层LSTM单元的数量
num_layers: RNN中LSTM的层数
batch_first: True对应[batch_size, seq_len, embedding_dim]
bidiectional: True对应使用双向LSTM

在这里插入图片描述
实例化LSTM对象后,不仅要传入数据,还有传入前一次的h_0和c_0
lstm(input, (h_0, c_0))
LSTM默认输出(output, (h_n, c_n))
output: [ seq_len, batch, hidden_size*num_directions ] (若batch_first=false)
h_n: [num_directions, batch, hidden_size]
c_n : [num_directions, batch, hidden_size]

import torch.nn as nn
import torch.nn.functional as F
import torch

batch_size = 10
seq_len =20 #句子长度
vocab_size = 100 # 词典数量
embedding_dim = 30 # 用embedding_dim长度的向量表示一个词语
hidden_size = 18

input = torch.randint(0, 100, [batch_size, seq_len])
print(input.size())
print("*"*100)
# 经过embedding
embed = nn.Embedding(vocab_size, embedding_dim)

input_embed = embed(input)  # [bs, seq_len, embedding_dim]
print(input_embed.size())
print("*"*100)
lstm = nn.LSTM(embedding_dim, hidden_size=hidden_size, num_layers=1, batch_first=True)
output,(h_n, c_n) = lstm(input_embed)
print(output.size())
print("*"*100)
print(h_n.size())
print("*"*100)
print(c_n.size())

通常由最后一个输出代替整个句子

使用双向LSTM实现

"""
定义模型
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib import ws,max_len
from dataset import get_data
import lib
import os
import numpy as np
class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.embedding = nn.Embedding(len(ws), 100)
        self.lstm = nn.LSTM(input_size=100, hidden_size=lib.hidden_size, num_layers=lib.num_layers,batch_first=True, bidirectional=lib.bidirectional, dropout=lib.dropout)
        self.fc = nn.Linear(lib.hidden_size*2, 2)
        



    def forward(self, input):
        """

        :param input: [batch_size, max_len]
        :return:
        """
        x = self.embedding(input) # [batch_size, max_len, 100]
        x,(h_n,c_n)= self.lstm(x)
        output = torch.cat([h_n[-2,:,:],h_n[-1,:,:]],dim=-1)
        output = self.fc(output)
        return F.log_softmax(output,dim=-1)

model = MyModel().to(lib.device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
if os.path.exists("./model0/model.pkl"):
    model.load_state_dict(torch.load("./model0/model.pkl"))
    optimizer.load_state_dict(torch.load("./model0/optimizer.pkl"))

def train(epoch):
    for idx,(input,target) in enumerate(get_data(train=True)):
        input = input.to(lib.device)
        target = target.to(lib.device)
        # 梯度清零
        optimizer.zero_grad()
        output= model(input)
        loss = F.nll_loss(output,target)
        loss.backward()
        optimizer.step()
        print(epoch, idx, loss.item())

        if idx%100==0:
            torch.save(model.state_dict(),"./model0/model.pkl")
            torch.save(optimizer.state_dict(),"./model0/optimizer.pkl")

def eval():
    loss_list = []
    acc_list = []
    for idx,(input,target) in enumerate(get_data(train=False, batch_size=lib.test_batch_size)):
        input = input.to(lib.device)
        target = target.to(lib.device)
        with torch.no_grad():
            output= model(input)
            loss = F.nll_loss(output,target)
            loss_list.append(loss.cpu().item())
            pre = output.max(dim=-1)[-1]
            acc = pre.eq(target).float().mean()
            acc_list.append(acc.cpu().item())

    print("total loss, acc:", np.mean(loss_list), np.mean(acc_list))




if __name__ == '__main__':
    for i in range(10):
        train(epoch=i)

    eval()






  • 4
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值