RNN LSTM GRU 代码实战 ---- 简单的文本生成任务

RNN LSTM GRU 代码实战 ---- 简单的文本生成任务

import torch
if torch.cuda.is_available():
    # Tell PyTorch to use the GPU.
    device = torch.device("cuda")
    print('There are %d GPU(s) available.' % torch.cuda.device_count())
    print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
    print('No GPU available, using the CPU instead.')
    device = torch.device("cpu")
There are 1 GPU(s) available.
We will use the GPU: GeForce GTX 1070
设置参数
import torchtext
from torchtext.vocab import Vectors
import numpy as np
import random
# 为了保证实验结果可以复现,我们经常会把各种random seed固定在某一个值
random.seed(53113)
np.random.seed(53113)
torch.manual_seed(53113)

BATCH_SIZE = 32 #一个batch多少个句子
EMBEDDING_SIZE = 650  #每个单词多少维
MAX_VOCAB_SIZE = 50000  #单词总数
下载数据集
TEXT = torchtext.data.Field(lower=True) 
# https://s0pytorch0org.icopy.site/text/data.html?highlight=torchtext%20data%20field#torchtext.data.Field

# torchtext提供了LanguageModelingDataset这个class来帮助我们处理语言模型数据集
train, val, test = torchtext.datasets.LanguageModelingDataset.splits(
    path=".", 
    train="train.txt", 
    validation="dev.txt", 
    test="test.txt", 
    text_field=TEXT)
构造词典
TEXT.build_vocab(train, max_size=MAX_VOCAB_SIZE)
# build_vocab可以根据我们提供的训练数据集来创建最高频单词的单词表,max_size帮助我们限定单词总量。
print("vocabulary size: {}".format(len(TEXT.vocab)))
vocabulary size: 50002
print(TEXT.vocab.itos[0:50]) 
# 这里增加了两个特殊的token,<unk>表示未知的单词,<pad>表示padding。
['<unk>', '<pad>', 'the', 'of', 'and', 'one', 'in', 'a', 'to', 'zero', 'nine', 'two', 'is', 'as', 'eight', 'for', 's', 'five', 'three', 'was', 'by', 'that', 'four', 'six', 'seven', 'with', 'on', 'are', 'it', 'from', 'or', 'his', 'an', 'be', 'this', 'he', 'at', 'which', 'not', 'also', 'have', 'were', 'has', 'but', 'other', 'their', 'its', 'first', 'they', 'had']
print(list(TEXT.vocab.stoi.items())[0:50])
[('<unk>', 0), ('<pad>', 1), ('the', 2), ('of', 3), ('and', 4), ('one', 5), ('in', 6), ('a', 7), ('to', 8), ('zero', 9), ('nine', 10), ('two', 11), ('is', 12), ('as', 13), ('eight', 14), ('for', 15), ('s', 16), ('five', 17), ('three', 18), ('was', 19), ('by', 20), ('that', 21), ('four', 22), ('six', 23), ('seven', 24), ('with', 25), ('on', 26), ('are', 27), ('it', 28), ('from', 29), ('or', 30), ('his', 31), ('an', 32), ('be', 33), ('this', 34), ('he', 35), ('at', 36), ('which', 37), ('not', 38), ('also', 39), ('have', 40), ('were', 41), ('has', 42), ('but', 43), ('other', 44), ('their', 45), ('its', 46), ('first', 47), ('they', 48), ('had', 49)]
创建迭代器
# BPTTIterator: 基于BPTT(基于时间的反向传播算法)的迭代器,一般用于语言模型中。
# BPTTIterator可以连续地得到连贯的句子,BPTT的全称是back propagation through time。
train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits(
    (train, val, test), 
    batch_size=BATCH_SIZE, 
    bptt_len=50, # 反向传播往回传的长度,这里我暂时理解为一个样本有多少个单词传入模型
    repeat=False, 
    shuffle=True)
构建语言模型
import torch
import torch.nn as nn

class My_Model(nn.Module):
    def __init__(self, rnn_type,ntoken,ninp,nhid,nlayers,dropout=0.5):
        super(My_Model,self).__init__()
        self.drop = nn.Dropout(dropout)
        self.encoder = nn.Embedding(ntoken,ninp)

        self.rnn_type = rnn_type
        self.nhid = nhid
        self.nlayers = nlayers
        self.rnn = getattr(nn, rnn_type)(ninp,nhid,nlayers,dropout=dropout)
        print(getattr(nn, rnn_type))
        self.decoder = nn.Linear(nhid, ntoken)

        # 初始化 weight 在 -0.1 到 0.1 之间
        initrange = 0.1
        self.encoder.weight.data.uniform_(-initrange,initrange)
        self.decoder.bias.data.zero_()
        self.decoder.weight.data.uniform_(-initrange,initrange)

    def forward(self, input,hidden):
        
         # input:[ 50, 32] -> emb: [ 650, 50, 32] [  50, 32,650]
         emb = self.encoder(input)
         emb = self.drop(emb)

         # nn.RNN(input_size, hidden_size, num_layers=1, nonlinearity=tanh, bias=True, batch_first=False, dropout=0,
         #        bidirectional=False)
         # emb: [ 50, 32,650] -> rnn_layer: [50, 32,1000] hidden: [1, 32,1000]
         rnn_layer, hidden = self.rnn(emb,hidden)

         rnn_layer = self.drop(rnn_layer )

         output = self.decoder(rnn_layer.view(rnn_layer.size(0)*rnn_layer.size(1),rnn_layer.size(2)))
         output = output.view(rnn_layer.size(0), rnn_layer.size(1), output.size(1))
         return output,hidden

    def init_hidden(self, bsz, requires_grad=True):
        # 这步我们初始化下隐藏层参数
        weight = next(self.parameters())
        if self.rnn_type == 'LSTM':
            return (weight.new_zeros((self.nlayers, bsz, self.nhid), requires_grad=requires_grad),
                    weight.new_zeros((self.nlayers, bsz, self.nhid), requires_grad=requires_grad))
            # return = (2 * 32 * 1000, 2 * 32 * 1000)
            # 这里不明白为什么需要weight.new_zeros,我估计是想整个计算图能链接起来
            # 这里特别注意hidden的输入不是model的参数,不参与更新,就跟输入数据x一样

        else:
            return weight.new_zeros((self.nlayers, bsz, self.nhid), requires_grad=requires_grad)
            # GRU神经网络把h层和c层合并了,所以这里只有一层。



nhid_size = 1000
VOCAB_SIZE = MAX_VOCAB_SIZE
model = My_Model("LSTM",MAX_VOCAB_SIZE,EMBEDDING_SIZE,nhid_size,2,dropout=0.5)
model = model.cuda()

loss_fn = nn.CrossEntropyLoss() # 交叉熵损失
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.5)
# 每调用一次这个函数,lenrning_rate就降一半,0.5就是一半的意思
<class 'torch.nn.modules.rnn.LSTM'>
截断运算图,减少内存压力
# 如果一直往后传,会造成整个计算图很庞大,反向传播会内存崩溃。所有每次一个batch的计
# 算图迭代完成后,需要把计算图截断,只保留隐藏层的输出值。
def repackage_hidden(h):
    """Wraps hidden states in new Tensors, to detach them from their history."""
    if isinstance(h, torch.Tensor):
        # 这个是GRU的截断,因为只有一个隐藏层
        # 判断h是不是torch.Tensor
        return h.detach() # 截断计算图,h是全的计算图的开始,只是保留了h的值
    else: # 这个是LSTM的截断,有两个隐藏层,格式是元组
        return tuple(repackage_hidden(v) for v in h)
模型评估
# 先从下面训练模式看起,在看evaluate
def evaluate(model, data):
    model.eval()  # 预测模式
    total_loss = 0.
    it = iter(data)
    total_count = 0.
    with torch.no_grad():
        hidden = model.init_hidden(BATCH_SIZE, requires_grad=False)
        # 这里不管是训练模式还是预测模式,h层的输入都是初始化为0,hidden的输入不是model的参数
        # 这里model里的model.parameters()已经是训练过的参数。
        for i, batch in enumerate(it):
            data, target = batch.text, batch.target
            # 取出验证集的输入的数据和输出的数据,相当于特征和标签
            data, target = data.cuda(), target.cuda()
            hidden = repackage_hidden(hidden)  # 截断计算图
            with torch.no_grad():  # 验证阶段不需要更新梯度
                output, hidden = model(data, hidden)
                # 调用model的forward方法进行一次前向传播,得到return输出值
            loss = loss_fn(output.view(-1, VOCAB_SIZE), target.view(-1))
            # 计算交叉熵损失

            total_count += np.multiply(*data.size())
            # 上面计算交叉熵的损失是平均过的,这里需要计算下总的损失
            # total_count先计算验证集样本的单词总数,一个样本有50个单词,一个batch32个样本
            # np.multiply(*data.size()) =50*32=1600
            total_loss += loss.item() * np.multiply(*data.size())
    # 每次batch平均后的损失乘以每次batch的样本的总的单词数 = 一次batch总的损失

    loss = total_loss / total_count  # 整个验证集总的损失除以总的单词数
    model.train()  # 训练模式
    return loss
开始训练模型
import copy

GRAD_CLIP = 1.
NUM_EPOCHS = 2

val_losses = []
for epoch in range(NUM_EPOCHS):
    model.train()  # 训练模式
    # iter,生成迭代器,这里train_iter也是迭代器,不用iter也可以
    it = iter(train_iter)
    # 得到hidden初始化后的维度
    hidden = model.init_hidden(BATCH_SIZE)

    for i, batch in enumerate(it):
        
        data, target = batch.text, batch.target
        # 取出训练集的输入的数据和输出的数据,相当于特征和标签
        data, target = data.cuda(), target.cuda()
            
        hidden = repackage_hidden(hidden)
        # 语言模型每个batch的隐藏层的输出值是要继续作为下一个batch的隐藏层的输入的
        # 因为batch数量很多,如果一直往后传,会造成整个计算图很庞大,反向传播会内存崩溃。
        # 所有每次一个batch的计算图迭代完成后,需要把计算图截断,只保留隐藏层的输出值。
        # 不过只有语言模型才这么干,其他比如翻译模型不需要这么做。
        # repackage_hidden自定义函数用来截断计算图的。
        model.zero_grad()  # 梯度归零,不然每次迭代梯度会累加
        output, hidden = model(data, hidden)
        # output = (50,32,50002)
        loss = loss_fn(output.view(-1, VOCAB_SIZE), target.view(-1))
        # output.view(-1, VOCAB_SIZE) = (1600,50002)
        # target.view(-1) =(1600),关于pytorch中交叉熵的计算公式请看下面链接。
        # https://blog.csdn.net/geter_CS/article/details/84857220
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP)
        # 防止梯度爆炸,设定阈值,当梯度大于阈值时,更新的梯度为阈值
        optimizer.step()
        if i % 1000 == 0:
            print("epoch", epoch, "iter", i, "loss", loss.item())

        if i % 10000 == 0:
            val_loss = evaluate(model, val_iter)

            if len(val_losses) == 0 or val_loss < min(val_losses):
                # 如果比之前的loss要小,就保存模型
                print("best model, val loss: ", val_loss)
                torch.save(model, "best_model.pkl")
            else:  # 否则loss没有降下来,需要优化
                scheduler.step()  # 自动调整学习率
                optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
                # 学习率调整后需要更新optimizer,下次训练就用更新后的
            val_losses.append(val_loss)  # 保存每10000次迭代后的验证集损失损失
epoch 0 iter 0 loss 10.730563163757324
best model, val loss:  10.478901235690277


D:\Anaconda\envs\jianbo\lib\site-packages\torch\serialization.py:402: UserWarning: Couldn't retrieve source code for container of type My_Model. It won't be checked for correctness upon loading.
  "type " + obj.__name__ + ". It won't be checked "


epoch 0 iter 1000 loss 6.0242919921875
epoch 0 iter 2000 loss 6.029582500457764
epoch 0 iter 3000 loss 5.8461594581604
epoch 0 iter 4000 loss 5.5147223472595215
epoch 0 iter 5000 loss 5.937921047210693
epoch 0 iter 6000 loss 5.6236090660095215
epoch 0 iter 7000 loss 5.482613563537598
epoch 0 iter 8000 loss 5.344069004058838
epoch 0 iter 9000 loss 5.418025970458984
epoch 1 iter 0 loss 5.486691474914551
best model, val loss:  5.002634433592716
epoch 1 iter 1000 loss 5.0923237800598145
epoch 1 iter 2000 loss 5.381066799163818
epoch 1 iter 3000 loss 5.237982273101807
epoch 1 iter 4000 loss 4.973425388336182
epoch 1 iter 5000 loss 5.4851861000061035
epoch 1 iter 6000 loss 5.201869010925293
epoch 1 iter 7000 loss 5.1173810958862305
epoch 1 iter 8000 loss 5.007303237915039
epoch 1 iter 9000 loss 5.120178699493408
保存训练后的模型
torch.save(model, "final_model.pkl")
取出最好的模型
# 加载保存好的模型参数
nhid_size = 1000
best_model = My_Model("LSTM", VOCAB_SIZE, EMBEDDING_SIZE, nhid_size, 2, dropout=0.5)
best_model = best_model.cuda()

PATH = './best_model.pkl' 
# 把模型参数load到best_model里
best_model = torch.load(PATH)
<class 'torch.nn.modules.rnn.LSTM'>
简单生成一些摘要文本
hidden = best_model.init_hidden(1) # batch_size = 1
input = torch.randint(VOCAB_SIZE, (1, 1), dtype=torch.long).to(device)
# (1,1)表示输出格式是1行1列的2维tensor,VOCAB_SIZE表示随机取的值小于VOCAB_SIZE=50002
# 我们input相当于取的是一个单词
words = []
for i in range(100):
    output, hidden = best_model(input, hidden)
    # output.shape = 1 * 1 * 50002
    # hidden = (2 * 1 * 1000, 2 * 1 * 1000)
    word_weights = output.squeeze().exp().cpu()
    # .exp()的两个作用:一是把概率更大的变得更大,二是把负数经过e后变成正数,下面.multinomial参数需要正数
    word_idx = torch.multinomial(word_weights, 1)[0]
    # 按照word_weights里面的概率随机的取值,概率大的取到的机会大。
    # torch.multinomial看这个博客理解:https://blog.csdn.net/monchin/article/details/79787621
    # 这里如果选择概率最大的,会每次生成重复的句子。
    input.fill_(word_idx) # 预测的单词index是word_idx,然后把word_idx作为下一个循环预测的input输入
    word = TEXT.vocab.itos[word_idx] # 根据word_idx取出对应的单词
    words.append(word) 
print(" ".join(words))
who become born epicurus and looking for them as a <unk> is to print using hypocrisy that married his corresponding a buffer of his bicycle and put her came that <unk> into the drink the abuse of manganese s into the liver and prayers the second it is his own nowhere of the earth recognizes his origin but has primarily been used by arthur gardner largely written for this reason he differs from his eight sequel to the catherine copula which appears to be that of it encoding beethoven s demonstration the last ship desires to invent wittenberg was an



  • 2
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值