实验14. 基于Transformer实现机器翻译(日译中)

1.实验预处理

1.1.安装实验所必需的包
import math
import torchtext
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from collections import Counter
from torchtext.vocab import Vocab
from torch.nn import TransformerEncoder, TransformerDecoder, TransformerEncoderLayer, TransformerDecoderLayer
import io
import time
import pandas as pd
import numpy as np
import pickle
import tqdm
import sentencepiece as spm
torch.manual_seed(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
1.2获取并行数据集


我们将使用从 JParaCrawl 下载的日英并行数据集![http://www.kecl.ntt.co.jp/icl/lirg/jparacrawl],它被描述为“NTT创建的最大的公开可用的英日平行语料库。它是通过大量抓取网络并自动对齐平行句子而创建的。

df = pd.read_csv('./zh-ja.bicleaner05.txt', sep='\\t', engine='python', header=None)
trainen = df[2].values.tolist()#[:10000]
trainja = df[3].values.tolist()#[:10000]
# trainen.pop(5972)
# trainja.pop(5972)
1.3准备分词器


与英语或其他字母语言不同,日语句子不包含空格来分隔单词,我们可以使用JParaCrawl提供的分词器。

en_tokenizer = spm.SentencePieceProcessor(model_file='spm.en.nopretok.model')
ja_tokenizer = spm.SentencePieceProcessor(model_file='spm.ja.nopretok.model')
en_tokenizer.encode("All residents aged 20 to 59 years who live in Japan must enroll in public pension system.", out_type=str)
ja_tokenizer.encode("年金 日本に住んでいる20歳~60歳の全ての人は、公的年金制度に加入しなければなりません。", out_type=str)
1.4构建 TorchText Vocab 对象并将句子转换为 Torch 张量


使用分词器和原始句子,我们构建从 TorchText 导入的 Vocab 对象。此过程可能需要几秒钟或几分钟,具体取决于我们的数据集大小和计算能力。不同的分词器也会影响构建词汇所需的时间,我尝试了其他几种日语分词器,但 SentencePiece 对我来说似乎运行良好且速度足够快。

def build_vocab(sentences, tokenizer):
  counter = Counter()
  for sentence in sentences:
    counter.update(tokenizer.encode(sentence, out_type=str))
  return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])
ja_vocab = build_vocab(trainja, ja_tokenizer)
en_vocab = build_vocab(trainen, en_tokenizer)
def data_process(ja, en):
  data = []
  for (raw_ja, raw_en) in zip(ja, en):
    ja_tensor_ = torch.tensor([ja_vocab[token] for token in ja_tokenizer.encode(raw_ja.rstrip("\n"), out_type=str)],
                            dtype=torch.long)
    en_tensor_ = torch.tensor([en_vocab[token] for token in en_tokenizer.encode(raw_en.rstrip("\n"), out_type=str)],
                            dtype=torch.long)
    data.append((ja_tensor_, en_tensor_))
  return data
train_data = data_process(trainja, trainen)

在有了词汇表对象之后,我们可以使用词汇表和分词器对象来构建训练数据的张量。

2.创建要在训练期间迭代的 DataLoader 对象


将BATCH_SIZE设置为 16 以防止“cuda 内存不足”

# BATCH_SIZE = 8
BATCH_SIZE = 32

PAD_IDX = ja_vocab['<pad>']
BOS_IDX = ja_vocab['<bos>']
EOS_IDX = ja_vocab['<eos>']
def generate_batch(data_batch):
  ja_batch, en_batch = [], []
  for (ja_item, en_item) in data_batch:
    ja_batch.append(torch.cat([torch.tensor([BOS_IDX]), ja_item, torch.tensor([EOS_IDX])], dim=0))
    en_batch.append(torch.cat([torch.tensor([BOS_IDX]), en_item, torch.tensor([EOS_IDX])], dim=0))
  ja_batch = pad_sequence(ja_batch, padding_value=PAD_IDX)
  en_batch = pad_sequence(en_batch, padding_value=PAD_IDX)
  return ja_batch, en_batch
train_iter = DataLoader(train_data, batch_size=BATCH_SIZE,
                        shuffle=True, collate_fn=generate_batch)

这里我根据服务器电脑实际训练速度和训练情况将原模型中的# BATCH_SIZE = 8更改为BATCH_SIZE = 32以增加步长

3.序列到序列转换器

from torch.nn import (TransformerEncoder, TransformerDecoder,
                      TransformerEncoderLayer, TransformerDecoderLayer)


class Seq2SeqTransformer(nn.Module):
    def __init__(self, num_encoder_layers: int, num_decoder_layers: int,
                 emb_size: int, src_vocab_size: int, tgt_vocab_size: int,
                 dim_feedforward:int = 512, dropout:float = 0.1):
        super(Seq2SeqTransformer, self).__init__()
        encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=NHEAD,
                                                dim_feedforward=dim_feedforward)
        self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)
        decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=NHEAD,
                                                dim_feedforward=dim_feedforward)
        self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)

        self.generator = nn.Linear(emb_size, tgt_vocab_size)
        self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
        self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
        self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)

    def forward(self, src: Tensor, trg: Tensor, src_mask: Tensor,
                tgt_mask: Tensor, src_padding_mask: Tensor,
                tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor):
        src_emb = self.positional_encoding(self.src_tok_emb(src))
        tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg))
        memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
        outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,
                                        tgt_padding_mask, memory_key_padding_mask)
        return self.generator(outs)

    def encode(self, src: Tensor, src_mask: Tensor):
        return self.transformer_encoder(self.positional_encoding(
                            self.src_tok_emb(src)), src_mask)

    def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
        return self.transformer_decoder(self.positional_encoding(
                          self.tgt_tok_emb(tgt)), memory,
                          tgt_mask)

文本标记通过使用标记嵌入来表示位置编码被添加到标记嵌入中,以引入词序的概念。

class PositionalEncoding(nn.Module):
    def __init__(self, emb_size: int, dropout, maxlen: int = 5000):
        super(PositionalEncoding, self).__init__()
        den = torch.exp(- torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)
        pos = torch.arange(0, maxlen).reshape(maxlen, 1)
        pos_embedding = torch.zeros((maxlen, emb_size))
        pos_embedding[:, 0::2] = torch.sin(pos * den)
        pos_embedding[:, 1::2] = torch.cos(pos * den)
        pos_embedding = pos_embedding.unsqueeze(-2)

        self.dropout = nn.Dropout(dropout)
        self.register_buffer('pos_embedding', pos_embedding)

    def forward(self, token_embedding: Tensor):
        return self.dropout(token_embedding +
                            self.pos_embedding[:token_embedding.size(0),:])

class TokenEmbedding(nn.Module):
    def __init__(self, vocab_size: int, emb_size):
        super(TokenEmbedding, self).__init__()
        self.embedding = nn.Embedding(vocab_size, emb_size)
        self.emb_size = emb_size
    def forward(self, tokens: Tensor):
        return self.embedding(tokens.long()) * math.sqrt(self.emb_size)

我们创建一个后续单词掩码来阻止目标单词关注其后续单词,我们还创建掩码,用于屏蔽源和目标填充令牌。

def generate_square_subsequent_mask(sz):
    mask = (torch.triu(torch.ones((sz, sz), device=device)) == 1).transpose(0, 1)
    mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
    return mask

def create_mask(src, tgt):
  src_seq_len = src.shape[0]
  tgt_seq_len = tgt.shape[0]

  tgt_mask = generate_square_subsequent_mask(tgt_seq_len)
  src_mask = torch.zeros((src_seq_len, src_seq_len), device=device).type(torch.bool)

  src_padding_mask = (src == PAD_IDX).transpose(0, 1)
  tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)
  return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask

4.使用服务器进行模型训练

使用服务器GPU进行训练的时候,NUM_ENCODER_LAYERS 和 NUM_DECODER_LAYERS 设置为3或者更高,NHEAD设置8,EMB_SIZE设置为512

SRC_VOCAB_SIZE = len(ja_vocab)
TGT_VOCAB_SIZE = len(en_vocab)
EMB_SIZE = 512
NHEAD = 8
FFN_HID_DIM = 512
# BATCH_SIZE = 16
BATCH_SIZE = 32

NUM_ENCODER_LAYERS = 3
NUM_DECODER_LAYERS = 3
NUM_EPOCHS = 16
transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS,
                                 EMB_SIZE, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE,
                                 FFN_HID_DIM)

for p in transformer.parameters():
    if p.dim() > 1:
        nn.init.xavier_uniform_(p)

transformer = transformer.to(device)

loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)

optimizer = torch.optim.Adam(
    transformer.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9
)
def train_epoch(model, train_iter, optimizer):
  model.train()
  losses = 0
  for idx, (src, tgt) in  enumerate(train_iter):
      src = src.to(device)
      tgt = tgt.to(device)

      tgt_input = tgt[:-1, :]

      src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input)

      logits = model(src, tgt_input, src_mask, tgt_mask,
                                src_padding_mask, tgt_padding_mask, src_padding_mask)

      optimizer.zero_grad()

      tgt_out = tgt[1:,:]
      loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
      loss.backward()

      optimizer.step()
      losses += loss.item()
  return losses / len(train_iter)


def evaluate(model, val_iter):
  model.eval()
  losses = 0
  for idx, (src, tgt) in (enumerate(valid_iter)):
    src = src.to(device)
    tgt = tgt.to(device)

    tgt_input = tgt[:-1, :]

    src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input)

    logits = model(src, tgt_input, src_mask, tgt_mask,
                              src_padding_mask, tgt_padding_mask, src_padding_mask)
    tgt_out = tgt[1:,:]
    loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
    losses += loss.item()
  return losses / len(val_iter)

5.开始训练

在准备了必要的类和函数之后,我们准备训练我们的模型。完成训练所需的时间可能会有很大差异,具体取决于很多因素,例如计算能力、参数和数据集的大小。使用 JParaCrawl 的完整句子列表(每种语言大约有 590 万个句子)训练模型时,使用单个 NVIDIA GeForce RTX 3070 GPU 每个 epoch 大约需要 5 小时。

for epoch in tqdm.tqdm(range(1, NUM_EPOCHS+1)):
  start_time = time.time()
  train_loss = train_epoch(transformer, train_iter, optimizer)
  end_time = time.time()
  print((f"Epoch: {epoch}, Train loss: {train_loss:.3f}, "
          f"Epoch time = {(end_time - start_time):.3f}s"))

6.训练结果:

 0%|          | 0/16 [00:00<?, ?it/s]/root/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:5137: UserWarning: Support for mismatched key_padding_mask and attn_mask is deprecated. Use same type for both instead.
  warnings.warn(
  6%|▋         | 1/16 [01:42<25:44, 102.97s/it]
Epoch: 1, Train loss: 4.888, Epoch time = 102.967s
 12%|█▎        | 2/16 [03:26<24:02, 103.04s/it]
Epoch: 2, Train loss: 3.644, Epoch time = 103.080s
 19%|█▉        | 3/16 [05:09<22:23, 103.36s/it]
Epoch: 3, Train loss: 3.174, Epoch time = 103.749s
 25%|██▌       | 4/16 [06:53<20:41, 103.49s/it]
Epoch: 4, Train loss: 2.847, Epoch time = 103.674s
 31%|███▏      | 5/16 [08:35<18:51, 102.89s/it]
Epoch: 5, Train loss: 2.595, Epoch time = 101.829s
 38%|███▊      | 6/16 [10:17<17:07, 102.71s/it]
Epoch: 6, Train loss: 2.398, Epoch time = 102.367s
 44%|████▍     | 7/16 [11:59<15:21, 102.34s/it]
Epoch: 7, Train loss: 2.235, Epoch time = 101.564s
 50%|█████     | 8/16 [13:41<13:39, 102.47s/it]
Epoch: 8, Train loss: 2.104, Epoch time = 102.736s
 56%|█████▋    | 9/16 [15:24<11:57, 102.56s/it]
Epoch: 9, Train loss: 1.995, Epoch time = 102.777s
 62%|██████▎   | 10/16 [17:07<10:14, 102.49s/it]
Epoch: 10, Train loss: 1.900, Epoch time = 102.319s
 69%|██████▉   | 11/16 [18:49<08:32, 102.51s/it]
Epoch: 11, Train loss: 1.819, Epoch time = 102.563s
 75%|███████▌  | 12/16 [20:32<06:50, 102.71s/it]
Epoch: 12, Train loss: 1.747, Epoch time = 103.165s
 81%|████████▏ | 13/16 [22:13<05:06, 102.20s/it]
Epoch: 13, Train loss: 1.683, Epoch time = 101.005s
 88%|████████▊ | 14/16 [23:55<03:23, 101.95s/it]
Epoch: 14, Train loss: 1.625, Epoch time = 101.396s
 94%|█████████▍| 15/16 [25:37<01:42, 102.08s/it]
Epoch: 15, Train loss: 1.576, Epoch time = 102.353s
100%|██████████| 16/16 [27:20<00:00, 102.54s/it]
Epoch: 16, Train loss: 1.532, Epoch time = 103.109s

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值