NLP机器翻译

机器翻译:基于编码器-解码器和注意力机制的实现

机器翻译是指将一段文本从一种语言自动翻译到另一种语言。本文将通过一个具体的例子介绍如何利用编码器—解码器和注意力机制实现机器翻译。

1. 读取和预处理数据

我们定义了一些特殊符号:

  • <pad>:用于填充较短的序列,使每个序列等长。
  • <bos>:表示序列的开始。
  • <eos>:表示序列的结束。

首先,解压并导入必要的库和模块:

!tar -xf d2lzh_pytorch.tar
import collections
import os
import io
import math
import torch
from torch import nn
import torch.nn.functional as F
import torchtext.vocab as Vocab
import torch.utils.data as Data
import d2lzh_pytorch as d2l

PAD, BOS, EOS = '<pad>', '<bos>', '<eos>'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(torch.__version__, device)
# 输出: 1.5.0 cpu

定义辅助函数以预处理数据:

# 处理单个序列并添加到all_seqs中
def process_one_seq(seq_tokens, all_tokens, all_seqs, max_seq_len):
    all_tokens.extend(seq_tokens)
    seq_tokens += [EOS] + [PAD] * (max_seq_len - len(seq_tokens) - 1)
    all_seqs.append(seq_tokens)

# 构建词典并将序列转换为索引
def build_data(all_tokens, all_seqs):
    vocab = Vocab.Vocab(collections.Counter(all_tokens),
                        specials=[PAD, BOS, EOS])
    indices = [[vocab.stoi[w] for w in seq] for seq in all_seqs]
    return vocab, torch.tensor(indices)

读取和处理数据:

def read_data(max_seq_len):
    in_tokens, out_tokens, in_seqs, out_seqs = [], [], [], []
    with io.open('fr-en-small.txt') as f:
        lines = f.readlines()
    for line in lines:
        in_seq, out_seq = line.rstrip().split('\t')
        in_seq_tokens, out_seq_tokens = in_seq.split(' '), out_seq.split(' ')
        if max(len(in_seq_tokens), len(out_seq_tokens)) > max_seq_len - 1:
            continue
        process_one_seq(in_seq_tokens, in_tokens, in_seqs, max_seq_len)
        process_one_seq(out_seq_tokens, out_tokens, out_seqs, max_seq_len)
    in_vocab, in_data = build_data(in_tokens, in_seqs)
    out_vocab, out_data = build_data(out_tokens, out_seqs)
    return in_vocab, out_vocab, Data.TensorDataset(in_data, out_data)

设定最大序列长度并读取数据:

max_seq_len = 7
in_vocab, out_vocab, dataset = read_data(max_seq_len)
print(dataset[0])
# 输出: (tensor([ 5,  4, 45,  3,  2,  0,  0]), tensor([ 8,  4, 27,  3,  2,  0,  0]))

2. 含注意力机制的编码器—解码器

2.1 编码器

编码器将输入序列通过词嵌入层和多层GRU编码成隐藏状态:

class Encoder(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, drop_prob=0, **kwargs):
        super(Encoder, self).__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=drop_prob)

    def forward(self, inputs, state):
        embedding = self.embedding(inputs.long()).permute(1, 0, 2)
        return self.rnn(embedding, state)

    def begin_state(self):
        return None

# 测试编码器
encoder = Encoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
output, state = encoder(torch.zeros((4, 7)), encoder.begin_state())
print(output.shape, state.shape)
# 输出: torch.Size([7, 4, 16]) torch.Size([2, 4, 16])

2.2 注意力机制

实现注意力机制:

def attention_model(input_size, attention_size):
    model = nn.Sequential(
        nn.Linear(input_size, attention_size, bias=False),
        nn.Tanh(),
        nn.Linear(attention_size, 1, bias=False)
    )
    return model

def attention_forward(model, enc_states, dec_state):
    dec_states = dec_state.unsqueeze(dim=0).expand_as(enc_states)
    enc_and_dec_states = torch.cat((enc_states, dec_states), dim=2)
    e = model(enc_and_dec_states)
    alpha = F.softmax(e, dim=0)
    return (alpha * enc_states).sum(dim=0)

# 测试注意力机制
seq_len, batch_size, num_hiddens = 10, 4, 8
model = attention_model(2*num_hiddens, 10)
enc_states = torch.zeros((seq_len, batch_size, num_hiddens))
dec_state = torch.zeros((batch_size, num_hiddens))
print(attention_forward(model, enc_states, dec_state).shape)
# 输出: torch.Size([4, 8])

2.3 含注意力机制的解码器

解码器使用注意力机制来生成每个时间步的输出:

class Decoder(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, attention_size, drop_prob=0):
        super(Decoder, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.attention = attention_model(2*num_hiddens, attention_size)
        self.rnn = nn.GRU(num_hiddens + embed_size, num_hiddens, num_layers, dropout=drop_prob)
        self.out = nn.Linear(num_hiddens, vocab_size)

    def forward(self, cur_input, state, enc_states):
        c = attention_forward(self.attention, enc_states, state[-1])
        input_and_c = torch.cat((self.embedding(cur_input), c), dim=1)
        output, state = self.rnn(input_and_c.unsqueeze(0), state)
        output = self.out(output).squeeze(dim=0)
        return output, state

    def begin_state(self, enc_state):
        return enc_state

3. 训练模型

定义计算小批量损失的函数:

def batch_loss(encoder, decoder, X, Y, loss):
    batch_size = X.shape[0]
    enc_state = encoder.begin_state()
    enc_outputs, enc_state = encoder(X, enc_state)
    dec_state = decoder.begin_state(enc_state)
    dec_input = torch.tensor([out_vocab.stoi[BOS]] * batch_size)
    mask, num_not_pad_tokens = torch.ones(batch_size,), 0
    l = torch.tensor([0.0])
    for y in Y.permute(1,0):
        dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
        l = l + (mask * loss(dec_output, y)).sum()
        dec_input = y
        num_not_pad_tokens += mask.sum().item()
        mask = mask * (y != out_vocab.stoi[EOS]).float()
    return l / num_not_pad_tokens

定义训练函数:

def train(encoder, decoder, dataset, lr, batch_size, num_epochs):
    enc_optimizer = torch.optim.Adam(encoder.parameters(), lr=lr)
    dec_optimizer = torch.optim.Adam(decoder.parameters(), lr=lr)
    loss = nn.CrossEntropyLoss(reduction='none')
    data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
    for epoch in range(num_epochs):
        l_sum = 0.0
        for X, Y in data_iter:
            enc_optimizer.zero_grad()
            dec_optimizer.zero_grad()
            l = batch_loss(encoder, decoder, X, Y, loss)
            l.backward()
            enc_optimizer.step()
            dec_optimizer.step()
            l_sum += l.item()
        if (epoch + 1) % 10 == 0:
            print(f"epoch {epoch + 1}, loss {l_sum / len(data_iter):.3f}")

# 设置超参数并训练模型
embed_size, num_hiddens, num_layers = 64, 64, 2
attention_size, drop_prob, lr, batch_size, num_epochs = 10, 0.5, 0.01, 2, 50
encoder = Encoder(len(in_vocab), embed_size, num_hiddens, num_layers, drop_prob)
decoder = Decoder(len(out_vocab), embed_size, num_hiddens, num_layers, attention_size, drop_prob)
train(encoder, decoder, dataset, lr, batch_size, num_epochs)

输出:
在这里插入图片描述

4. 预测不定长的序列

在前面的章节中,我们介绍了不同的方法来生成解码器在每个时间步的输出。这里,我们将实现最简单的贪婪搜索方法。

def translate(encoder, decoder, input_seq, max_seq_len):
    in_tokens = input_seq.split(' ')
    in_tokens += [EOS] + [PAD] * (max_seq_len - len(in_tokens) - 1)
    enc_input = torch.tensor([[in_vocab.stoi[tk] for tk in in_tokens]])  # batch=1
    enc_state = encoder.begin_state()
    enc_output, enc_state = encoder(enc_input, enc_state)
    dec_input = torch.tensor([out_vocab.stoi[BOS]])
    dec_state = decoder.begin_state(enc_state)
    output_tokens = []
    
    for _ in range(max_seq_len):
        dec_output, dec_state = decoder(dec_input, dec_state, enc_output)
        pred = dec_output.argmax(dim=1)
        pred_token = out_vocab.itos[int(pred.item())]
        
        if pred_token == EOS:  # 当任一时间步搜索出EOS时,输出序列即完成
            break
        else:
            output_tokens.append(pred_token)
            dec_input = pred
    
    return output_tokens

我们来测试一下模型。输入法语句子“ils regardent.”,翻译后的英语句子应该是“they are watching.”。

input_seq = 'ils regardent .'
translation = translate(encoder, decoder, input_seq, max_seq_len)
print(translation)

输出:
在这里插入图片描述

通过上述实现,我们成功地使用贪婪搜索方法完成了序列的预测和翻译任务。在实际应用中,贪婪搜索通常是一种简单而有效的方法,尤其适用于生成不定长序列的任务。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值