Transformer(cpu)代码复现

逐行注释,逐行解析。可直接运行。
code from https://github.com/graykode/nlp-tutorial/tree/master/5-1.Transformer

import numpy as np
import torch
import torch.nn as nn
import math
import time
import torch.optim as optim
from torch.utils.data import Dataset,DataLoader


# 13. MyDataset
class MyDataset(Dataset):
    # 读数据
    def __init__(self, enc_inputs, dec_inputs, target_batch):
        self.enc_inputs = enc_inputs
        self.dec_inputs = dec_inputs
        self.target_batch = target_batch

    # 返回数据长度(有几行数据)
    def __len__(self):
        return len(self.enc_inputs)
        # return self.enc_inputs.shape[0]

    # 返回相对位置上的元素,会比make_batch函数返回的tensor数据少一个维度
    def __getitem__(self, idx):
        return self.enc_inputs[idx], self.dec_inputs[idx], self.target_batch[idx]


# 12. make_batch
def make_batch(sentences):
    input_batch = [[src_vocab[n] for n in sentences[0].split()]]  # [[1, 2, 3, 4, 0]]
    output_batch = [[tgt_vocab[n] for n in sentences[1].split()]]  # [[5, 1, 2, 3, 4]]
    target_batch = [[tgt_vocab[n] for n in sentences[2].split()]]  # [[1, 2, 3, 4, 6]]
    return torch.LongTensor(input_batch), torch.LongTensor(output_batch), torch.LongTensor(target_batch)


# 11. get_attn_subsequent_mask
def get_attn_subsequent_mask(seq):
    attn_shape = [seq.size(0), seq.size(1), seq.size(1)]  # [1, 5, 5]
    subsequence_mask = np.triu(np.ones(attn_shape), k=1)  # ndarray [1, 5, 5]
    # .byte() is equivalent to self.to(torch.uint8)
    subsequence_mask = torch.from_numpy(subsequence_mask).byte()  # [1, 5, 5]
    return subsequence_mask


# 10. DecoderLayer:包含三个部分,带Mask的多头自注意力层、交互注意力层、前馈神经网络
class DecoderLayer(nn.Module):
    def __init__(self):
        super(DecoderLayer, self).__init__()
        self.dec_self_attn = MultiHeadAttention()
        self.dec_enc_attn = MultiHeadAttention()
        self.pos_fnn = PoswiseFeedForwardNet()

    #                [1, 5, 512]  [1, 5, 512]      [1, 5, 5]           [1, 5, 5]
    def forward(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask):
        # dec_self_attn===[1, 8, 5, 5]  dec_outputs===[1, 5, 512]
        dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, dec_inputs, dec_self_attn_mask)
        # dec_enc_attn===[1, 8, 5, 5]  dec_outputs===[1, 5, 512]
        dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask)
        dec_outputs = self.pos_fnn(dec_outputs)  # [1, 5, 512]
        return dec_outputs, dec_self_attn, dec_enc_attn


# 9. Decoder包含三个部分:词向量Embedding、位置编码部分、(带Masked自注意力层、交互注意力层、前馈神经网络)
class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model)  # [7, 512]
        self.pos_emb = PositionalEncoding(d_model)
        self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])  # 10.

    def forward(self, dec_inputs, enc_inputs, enc_outputs):
        dec_outputs = self.tgt_emb(dec_inputs)  # [1, 5, 512]
        dec_outputs = self.pos_emb(dec_outputs.transpose(0, 1)).transpose(0, 1)  # [1, 5, 512]--->[5, 1, 512]--->[1, 5, 512]
        # 💣💣💣第一层的带Masked自注意力层  mask作用:既要padding mask也要遮掩未来信息(相加)💣💣💣
        dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs)  # [1, 5]--->[1, 5, 5]
        # [1, 5]--->[1, 5, 5]的 k=1 的上三角矩阵
        dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_inputs)  # 11.
        # 布尔类型的值和int类型的值相加,相当于布尔类型True看作1,False看作0再去和int类型相加
        dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0)  # [1, 5, 5]
        # 💣💣💣第二层的交互注意力层  mask作用:padding mask,因为是对k进行标识做mask,k来自Encoder端(enc_inputs)💣💣💣
        dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs)  # [1, 5, 5]
        dec_self_attns, dec_enc_attns = [], []
        for layer in self.layers:
            # dec_outputs===[1, 5, 512]  dec_self_attn===[1, 8, 5, 5]  dec_enc_attn===[1, 8, 5, 5]
            dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask)
            dec_self_attns.append(dec_self_attn)
            dec_enc_attns.append(dec_enc_attn)
        return dec_outputs, dec_self_attns, dec_enc_attns


# 8. PoswiseFeedForwardNet
class PoswiseFeedForwardNet(nn.Module):
    def __init__(self):
        super(PoswiseFeedForwardNet, self).__init__()
        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
        self.layer_norm = nn.LayerNorm(d_model)

    def forward(self, inputs):
        # inputs===[1, 5, 512]
        residual = inputs  # 残差连接
        output = nn.ReLU()(self.conv1(inputs.transpose(1, 2)))  # [1, 5, 512]--->[1, 512, 5]--->[1, 2048, 5]
        output = self.conv2(output).transpose(1, 2)  # [1, 2048, 5]--->[1, 512, 5]--->[1, 5, 512]
        return self.layer_norm(output + residual)


# 7. ScaledDotProductAttention
class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, Q, K, V, attn_mask):
        # Q,K,V===[1, 8, 5, 64]  attn_mask===[1, 8, 5, 5]
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)  # [1, 8, 5, 64]*[1, 8, 64, 5]==[1, 8, 5, 5]
        scores.masked_fill_(attn_mask, -1e9)
        # 矩阵横行 特征维度上做softmax
        attn = nn.Softmax(dim=-1)(scores)  # [1, 8, 5, 5]
        context = torch.matmul(attn, V)  # [1, 8, 5, 5]*[1, 8, 5, 64]==[1, 8, 5, 64]
        return context, attn


# 6. MultiHeadAttention
class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k * n_heads)
        self.W_K = nn.Linear(d_model, d_k * n_heads)
        self.W_V = nn.Linear(d_model, d_v * n_heads)
        self.linear = nn.Linear(n_heads * d_v, d_model)
        self.layer_norm = nn.LayerNorm(d_model)

    def forward(self, Q, K, V, attn_mask):
        # Q,K,V===[1, 5, 512]  attn_mask===[1, 5, 5]
        residual = Q  # 残差连接
        batch_size = Q.size(0)
        q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1, 2)  # [1, 5, 8, 64]--->[1, 8, 5, 64]
        k_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1, 2)  # [1, 5, 8, 64]--->[1, 8, 5, 64]
        v_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1, 2)  # [1, 5, 8, 64]--->[1, 8, 5, 64]
        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1)  # [1, 1, 5, 5]--->[1, 8, 5, 5]
        # attn===[1, 8, 5, 5]  context===[1, 8, 5, 64]
        context, attn = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)  # 7.
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, n_heads * d_v)  # [1, 8, 5, 64]--->[1, 5, 8, 64]--->[1, 5, 512]
        output = self.linear(context)  # [1, 5, 512]--->[1, 5, 512]
        return self.layer_norm(output + residual), attn


# 5. EncoderLayer:包含两个部分,多头自注意力层和前馈神经网络
class EncoderLayer(nn.Module):
    def __init__(self):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention()  # 6.
        self.pos_ffn = PoswiseFeedForwardNet()  # 8.

    def forward(self, enc_inputs, enc_self_attn_mask):
        # attn===[1, 8, 5, 5]  enc_outputs===[1, 5, 512]
        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask)
        enc_outputs = self.pos_ffn(enc_outputs)  # [1, 5, 512]
        return enc_outputs, attn


# 4. get_attn_pad_mask
def get_attn_pad_mask(seq_q, seq_k):
    batch_size, len_q = seq_q.size()  # [1, 5]
    batch_size, len_k = seq_k.size()  # [1, 5]
    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)  # [1, 5]--->[1, 1, 5]
    return pad_attn_mask.expand(batch_size, len_q, len_k)  # [1, 5, 5]


# 3. 位置编码PositionalEncoding
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        # 定义pe矩阵
        pe = torch.zeros(max_len, d_model)  # [5000, 512]
        # 定义pe矩阵的max_len作为位置列
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)  # [5000, 1]
        # 定义pe矩阵的d_model作为位置行  div_term是一个256长度的一维tensor
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))  # (256,)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)  # [5000, 512]--->[1, 5000, 512]--->[5000, 1, 512]
        # 定义一个缓冲区,其实简单理解为这个参数不更新就可以
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x:[seq_len, batch_size, d_model]====[5, 1, 512]
        x = x + self.pe[:x.size(0), :]  # [5, 1, 512]
        return self.dropout(x)


# 2. Encoder包含三个部分:词向量Embedding、位置编码部分、(自注意力层、前馈神经网络)
class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()
        self.src_emb = nn.Embedding(src_vocab_size, d_model)  # [5, 512]
        self.pos_emb = PositionalEncoding(d_model)  # 3.
        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])  # 5.

    def forward(self, enc_inputs):
        enc_outputs = self.src_emb(enc_inputs)  # [1, 5]--->[1, 5, 512]
        enc_outputs = self.pos_emb(enc_outputs.transpose(0, 1)).transpose(0, 1)  # [1, 5, 512]--->[5, 1, 512]--->[1, 5, 512]
        # get_attn_pad_mask:是为了记录句子中pad的位置信息,传给模型后面,在计算自注意力和交互注意力的时候去掉pad符号的影响 4.
        enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs)  # [1, 5, 5]
        enc_self_attns = []  # 存的是6个Encoder里面的计算完的注意力权重分布
        for layer in self.layers:
            enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)  # enc_self_attn===[1, 8, 5, 5]  enc_outputs===[1, 5, 512]
            enc_self_attns.append(enc_self_attn)
        return enc_outputs, enc_self_attns


# 1. 从整体网络结构来看,分为三个部分:编码器层、解码器层、输出层
class Transformer(nn.Module):
    def __init__(self):
        super(Transformer, self).__init__()
        self.encoder = Encoder()  # 2.
        self.decoder = Decoder()  # 9.
        # 最后输出层要把d_model(512)映射成解码器端词表长度,之后做softmax做损失
        self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False)

    def forward(self, enc_inputs, dec_inputs):
        # enc_inputs:编码器端的输入,形状:[batch_size, src_len]
        # dec_inputs:解码器端的输入:形状:[batch_size, tgt_len]
        enc_outputs, enc_self_attns = self.encoder(enc_inputs)  # enc_outputs===[1, 5, 512]
        dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs)  # dec_outputs===[1, 5, 512]
        dec_logits = self.projection(dec_outputs)  # [1, 5, 512]--->[1, 5, 7]
        # 这里不用做softmax了,后面用的nn.CrossEntropyLoss()
        # dec_logits.view(-1, dec_logits.size(-1))===[5,7]
        return dec_logits.view(-1, dec_logits.size(-1)), enc_self_attns, dec_self_attns, dec_enc_attns


if __name__ == '__main__':
    # 句子的输入部分,分别是编码器端的输入、解码器端的输入、解码器端的真实标签
    # P:代表pad填充字符  S:start开始标志  E:end结束标志
    sentences = ['ich mochte ein bier P', 'S i want a beer', 'i want a beer E']

    # 构建词表
    src_vocab = {'P': 0, 'ich': 1, 'mochte': 2, 'ein': 3, 'bier': 4}
    src_vocab_size = len(src_vocab)
    print('src_vocab_size 源文本词表长度--->', src_vocab_size)

    tgt_vocab = {'P': 0, 'i': 1, 'want': 2, 'a': 3, 'beer': 4, 'S': 5, 'E': 6}
    tgt_vocab_size = len(tgt_vocab)
    print('tgt_vocab_size 目标文本词表长度--->', tgt_vocab_size)

    src_len = 5  # length of source
    tgt_len = 5  # target of target

    # 模型参数
    d_model = 512  # Embedding Size
    d_ff = 2048  # FeedForward dimension(尺寸)
    d_k = d_v = 64  # dimension of Q(=k),V
    n_layers = 6  # number of (Encoder and Decoder) Layer
    n_heads = 8  # number of heads in Multi-Head Attention

    # 模型
    model = Transformer()
    # 损失函数
    criterion = nn.CrossEntropyLoss()
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 数据
    enc_inputs, dec_inputs, target_batch = make_batch(sentences)  # 12.

    # 实例化数据源
    dataset = MyDataset(enc_inputs, dec_inputs, target_batch)  # 13.
    # 实例化DataLoader
    dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=True)

    model.train()
    start = time.time()
    for epoch in range(20):
        # 通过DataLoader把MyDataset中的__getitem__函数返回的数据拿出来后 会多增加一个维度
        for enc_inputs, dec_inputs, target_batch in dataloader:
            outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_inputs, dec_inputs)
            # target_batch.contiguous().view(-1)是一个一维的tensor[1,2,3,4,6],因为做损失时真实值要比预测值少一个维度。
            loss = criterion(outputs, target_batch.contiguous().view(-1))
            print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    end = time.time()
    print(f'cpu下共执行--->{end - start}s')
  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值