机器翻译seq2seq(Attention)实现(Pytorch)版本

利用Pytorch实现seq2seq代码实现,数据集来自french_to_english数据集,ipynb文件已开源在个人github仓库,建议去github食用

注:纯自己手打代码,若有问题与疑问,欢迎私信交流

email: yuhan.huang@whu.edu.cn

翻译结果

代码实现:(建议去github下载ipynb文件食用)

import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
# read data
with open('Datasets/French_En_Small/fr-en-small.txt') as file:
    lines = file.readlines()

french_st, eng_st = [st.split('\t')[0] for st in lines], [st.split('\t')[1] for st in lines]    # [20, len(st)]

french_tks = [st.split() for st in french_st]
eng_tks = [st.split() for st in eng_st]
print('raw tks---')
print(french_tks[:3])
print(eng_tks[:3])

# get vocab of french and eng
french_vocab = sorted(set([tk for tks in french_tks for tk in tks]))
eng_vocab = sorted(set([tk for tks in eng_tks for tk in tks]))

french_itos = {i+3:s for i, s in enumerate(french_vocab)}
french_itos[0] = '<pad>'; french_itos[1] = '<bos>'; french_itos[2] = '<eos>'
french_stoi = {s:i for i, s in french_itos.items()}
french_vocab_size = len(french_itos)

eng_itos = {i+3:s for i, s in enumerate(eng_vocab)}
eng_itos[0] = '<pad>'; eng_itos[1] = '<bos>'; eng_itos[2] = '<eos>'
eng_stoi = {s:i for i, s in eng_itos.items()}
eng_vocab_size = len(eng_itos)
print('vocab ---')
print(french_vocab_size)
print(eng_vocab_size)

new_french_tks = [[french_stoi[tk] for tk in ['<bos>'] + tks + ['<eos>']] for tks in french_tks]
new_eng_tks = [[eng_stoi[tk] for tk in ['<bos>'] + tks + ['<eos>']] for tks in eng_tks]
print('new tks---')
print(new_french_tks[:3])
print(new_eng_tks[:3])

# analysis length of data
french_length = [len(line) for line in new_french_tks]
eng_length = [len(line) for line in new_eng_tks]
plt.subplots(1, 2, figsize=(10, 4))
plt.subplot(1, 2, 1); plt.hist(french_length, bins=10); plt.title('distribution of french length')
plt.subplot(1, 2, 2); plt.hist(eng_length, bins=10); plt.title('distribution of eng length')
plt.show()
print(f'max length {max(max(french_length), max(eng_length))}')

# padding data to a fixed length
max_length = max(max(french_length), max(eng_length))
train_X = torch.tensor([tks + [0] * (max_length - len(tks)) for tks in new_french_tks], dtype=torch.long, device="cuda" if torch.cuda.is_available() else "cpu")
train_y = torch.tensor([tks + [0] * (max_length - len(tks)) for tks in new_eng_tks], dtype=torch.long, device="cuda" if torch.cuda.is_available() else "cpu")
print(train_X.shape)
print(train_y.shape)

# encoder: encode french tks to vector
class Encoder(nn.Module):
    def __init__(self, vocab_size, vocab_dims, num_hidden, num_layers, drop_prob=0):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, vocab_dims)
        self.gru = nn.GRU(vocab_dims, num_hidden, num_layers, dropout=drop_prob)
    
    def forward(self, x, state=None):   # (num_steps, batch_size)
        x = self.embedding(x)  # (num_steps, batch_size, vocab_dims)
        return self.gru(x)         # (num_steps, batch_size, num_hidden) , (num_layers, batch_size, num_hidden)

# attention mechanism
class Attention(nn.Module):
    def __init__(self, input_size, attention_size):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_size, attention_size, bias=False),
            nn.Tanh(),
            nn.Linear(attention_size, 1,bias=False)
        )
    
    def forward(self, enc_states, dec_state):   # enc_states (num_steps, batch_size, num_hidden), dec_state (batch_size, num_hidden)
        broadcast_dec_state = dec_state.unsqueeze(dim=0).expand_as(enc_states) # (1, batch_size, num_hidden) -> (num_steps, batch_size, num_hidden) like broadcast but do not change data
        enc_and_dec_states = torch.cat([enc_states, broadcast_dec_state], dim=2) # (num_steps, batch_size, num_hidden*2)
        e = self.net(enc_and_dec_states)    # (num_steps, batch_size, 1)
        alpha = F.softmax(e, dim=0) # (num_steps, batch_size, 1)
        return (alpha * enc_states).sum(dim=0)  # (batch_size, num_hidden)

# decoder: decode vector to english
class Decoder(nn.Module):
    def __init__(self, vocab_size, vocab_dims, num_hidden, num_layers, attention_size=10, drop_prob=0): # assert encoder and decoder have same num_hidden
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, vocab_dims)
        self.gru = nn.GRU(num_hidden + vocab_dims, num_hidden, num_layers, dropout=drop_prob)     # num_hidden of Encoder + vocab_dims, consider both attention mechanism and inputs
        self.attention = Attention(num_hidden*2, attention_size)
        self.linear = nn.Linear(num_hidden, vocab_size)
    
    def forward(self, cur_input, state_decoder, states_encoder):    # (batch_size, ) , (num_layers, batch_size, num_hidden), (num_steps ,batch_size, num_hidden)
        c = self.attention(states_encoder, state_decoder[-1])   # (batch_size, num_hidden)
        input_c = torch.cat([self.embedding(cur_input), c], dim=1).unsqueeze(dim=0) # (1, batch_size, num_hidden + vocab_dims)
        output, new_state = self.gru(input_c, state_decoder)   # (1, batch_size, num_hidden), (num_layers, batch_size, num_hidden)
        output = self.linear(output).squeeze(dim=0) # (batch_size, vocab_size)
        return output, new_state

torch.manual_seed(42)
torch.cuda.empty_cache()

num_epochs, lr, batch_size = 1500, 0.01, 2
embed_size, num_hiddens, num_layers, attention_size, drop_prob = 64, 64, 2, 10, 0.5

device = "cuda" if torch.cuda.is_available() else "cpu"
encoder = Encoder(french_vocab_size, embed_size, num_hiddens, num_layers, drop_prob).to(device)
decoder = Decoder(eng_vocab_size, embed_size, num_hiddens, num_layers, attention_size,drop_prob).to(device)
loss = nn.CrossEntropyLoss(reduction='none').to(device)

optimizer = torch.optim.Adam([
    {'params':encoder.parameters()},
    {'params':decoder.parameters()}
], lr= 0.01)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.5)

lossi = []
for epoch in range(num_epochs):
    idx = torch.randint(0, train_X.shape[0], (batch_size, ))
    
    # forward pass
    # french to vector
    X, Y = train_X[idx], train_y[idx]   # (2, 8)    (batch_size, num_steps)
    enc_outputs, enc_states = encoder(X.permute(1, 0))
    
    # vector to eng
    Y = Y.permute(1, 0)     # (8, 2)
    dec_input = Y[0]    # (2)
    dec_state = enc_states  # (2, 2, 64)
    
    l = 0.0
    num_elements = 0    # do not include Y[0]
    for y in Y[1:]: # (2)
        dec_output, dec_state = decoder(dec_input, dec_state, enc_states)   # (batch_size, vocab_size), (num_layers, batch_size, num_hidden)
        mask = [0 if tk == 0 else 1 for tk in dec_input]
        l += (loss(dec_output, y) * torch.tensor(mask).to(device)).sum()
        num_elements += sum(mask)
        
    l /= num_elements
    
    # backward pass
    optimizer.zero_grad()
    l.backward()
    optimizer.step()
    scheduler.step()
    
    # track stats1
    lossi.append(l.item())
    if epoch % 100 == 0:
        print(f'epoch {epoch:6d}, loss {l.item() :10f}')
        
print(f'epoch {epoch:6d}, loss {l.item() :10f}') 

raw_st = 'ils regardent .'
raw_tks = ['<bos>'] + raw_st.split() + ['<eos>'] + ['<pad>'] * (max_length - len(raw_st.split()) - 2)
X = torch.tensor([french_stoi[tk] for tk in raw_tks], dtype=torch.long, device=device).view(-1, 1)  # (num_steps, batch_size) -> (8, 1)
print(raw_tks)

with torch.no_grad():
    outputs = []
    enc_outputs, enc_states = encoder(X)
    dec_input = torch.tensor([eng_stoi['<bos>']], dtype=torch.long, device=device)
    outputs.append(dec_input.item())
    dec_state = enc_states
    
    while 1:
        dec_output, dec_state = decoder(dec_input, dec_state, enc_states)   # (1, vocab_size(38?))
        probs = F.softmax(dec_output, dim=1)
        output = torch.argmax(probs, dim=1)
        print(output)
        outputs.append(output.item())
        
        dec_input = output
        if output == 2:
            break
print(''.join(eng_itos[tk] + ' ' for tk in outputs))
PyTorch是一种深度学习框架,可以用于实现序列到序列(seq2seq)的机器翻译任务。在seq2seq模型中,编码器将源序列编码为一个固定长度的向量,解码器则将该向量解码为目标序列。为了提高翻译质量,可以使用注意力机制来在解码器中引入上下文信息。 在PyTorch实现seq2seq模型,可以使用nn.Module类来定义模型架构。首先,需要定义编码器和解码器的结构。编码器通常使用循环神经网络(RNN)或卷积神经网络(CNN)进行实现,而解码器则需要使用注意力机制。注意力机制可以使解码器关注输入序列中最相关的部分并根据其进行翻译。 实现注意力机制时,需要计算每个输入序列位置和当前解码器状态之间的相似度。这可以通过计算点积或使用神经网络来实现。然后,可以将相似度作为权重,对输入序列进行加权求和,以计算上下文向量。最后,将上下文向量与当前解码器状态组合在一起,以生成下一个目标序列符号的概率分布。 在训练过程中,可以使用交叉熵损失函数来计算模型输出与正确目标序列之间的差异,并使用反向传播算法更新模型参数。在推理过程中,可以使用贪婪搜索或束搜索来生成翻译结果。 总的来说,PyTorch提供了一种灵活且高效的方式来实现seq2seq模型和注意力机制,可以用于各种自然语言处理任务,包括机器翻译、问答系统和对话生成等。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

香蕉也是布拉拉

随缘打赏不强求~ 谢谢大家

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值