Bert代码实现+论文地址+讲解

Bert论文地址及代码实现

1,论文地址:Bert论文地址

2, 论文讲解:

[1]论文讲解1(李沐)
[2]论文讲解2(nlp从入门到放弃)

3,代码实现

Bert代码实现讲解

代码如下 没有数据集 只有大体框架/代码可运行
import math
import re
from random import *
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim

# sample IsNext and NotNext to be same in small batch size
def make_batch():
    batch = []
    positive = negative = 0 ## 为了记录NSP任务中的正样本和负样本的个数,比例最好是在一个batch中接近1:1
    while positive != batch_size/2 or negative != batch_size/2:
        tokens_a_index, tokens_b_index= randrange(len(sentences)), randrange(len(sentences)) # 比如tokens_a_index=3,tokens_b_index=1;从整个样本中抽取对应的样本;
        tokens_a, tokens_b= token_list[tokens_a_index], token_list[tokens_b_index]## 根据索引获取对应样本:tokens_a=[5, 23, 26, 20, 9, 13, 18] tokens_b=[27, 11, 23, 8, 17, 28, 12, 22, 16, 25]
        input_ids = [word_dict['[CLS]']] + tokens_a + [word_dict['[SEP]']] + tokens_b + [word_dict['[SEP]']] ## 加上特殊符号,CLS符号是1,sep符号是2:[1, 5, 23, 26, 20, 9, 13, 18, 2, 27, 11, 23, 8, 17, 28, 12, 22, 16, 25, 2]
        segment_ids = [0] * (1 + len(tokens_a) + 1) + [1] * (len(tokens_b) + 1)##分割句子符号:[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

        # MASK LM
        n_pred =  min(max_pred, max(1, int(round(len(input_ids) * 0.15)))) # n_pred=3;整个句子的15%的字符可以被mask掉,这里取和max_pred中的最小值,确保每次计算损失的时候没有那么多字符以及信息充足,有15%做控制就够了;其实可以不用加这个,单个句子少了,就要加上足够的训练样本
        cand_maked_pos = [i for i, token in enumerate(input_ids)
                          if token != word_dict['[CLS]'] and token != word_dict['[SEP]']] ## cand_maked_pos=[1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18];整个句子input_ids中可以被mask的符号必须是非cls和sep符号的,要不然没意义
        shuffle(cand_maked_pos)  ## 打乱顺序:cand_maked_pos=[6, 5, 17, 3, 1, 13, 16, 10, 12, 2, 9, 7, 11, 18, 4, 14, 15]  其实取mask对应的位置有很多方法,这里只是一种使用shuffle的方式
        masked_tokens, masked_pos = [], []
        for pos in cand_maked_pos[:n_pred]:## 取其中的三个;masked_pos=[6, 5, 17] 注意这里对应的是position信息;masked_tokens=[13, 9, 16] 注意这里是被mask的元素之前对应的原始单字数字;
            masked_pos.append(pos)
            masked_tokens.append(input_ids[pos])
            if random() < 0.8:  # 80%
                input_ids[pos] = word_dict['[MASK]'] # make mask
            elif random() < 0.5:  # 10%
                index = randint(0, vocab_size - 1) # random index in vocabulary
                input_ids[pos] = word_dict[number_dict[index]] # replace

        # Zero Paddings
        n_pad = maxlen - len(input_ids)  ## maxlen=30;n_pad=10
        input_ids.extend([0] * n_pad)  # 在input_ids后面补零
        segment_ids.extend([0] * n_pad)# 在segment_ids 后面补零;这里有一个问题,0和之前的重了,这里主要是为了区分不同的句子,所以无所谓啊;他其实是另一种维度的位置信息;

        # Zero Padding (100% - 15%) tokens 是为了计算一个batch中句子的mlm损失的时候可以组成一个有效矩阵放进去;不然第一个句子预测5个字符,第二句子预测7个字符,第三个句子预测8个字符,组不成一个有效的矩阵;
        ## 这里非常重要,为什么是对masked_tokens是补零,而不是补其他的字符????我补1可不可以??
        if max_pred > n_pred:
            n_pad = max_pred - n_pred
            masked_tokens.extend([0] * n_pad)##  masked_tokens= [13, 9, 16, 0, 0] masked_tokens 对应的是被mask的元素的原始真实标签是啥,也就是groundtruth
            masked_pos.extend([0] * n_pad)## masked_pos= [6, 5, 17,0,0] masked_pos是记录哪些位置被mask了

        if tokens_a_index + 1 == tokens_b_index and positive < batch_size/2:
            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, True]) # IsNext
            positive += 1
        elif tokens_a_index + 1 != tokens_b_index and negative < batch_size/2:
            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, False]) # NotNext
            negative += 1
    return batch
# Proprecessing Finished

def get_attn_pad_mask(seq_q, seq_k):
    batch_size, len_q = seq_q.size()
    batch_size, len_k = seq_k.size()
    # eq(zero) is PAD token
    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)  # batch_size x 1 x len_k(=len_q), one is masking
    return pad_attn_mask.expand(batch_size, len_q, len_k)  # batch_size x len_q x len_k

def gelu(x):
    "Implementation of the gelu activation function by Hugging Face"
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))

class Embedding(nn.Module):
    def __init__(self):
        super(Embedding, self).__init__()
        self.tok_embed = nn.Embedding(vocab_size, d_model)  # token embedding
        self.pos_embed = nn.Embedding(maxlen, d_model)  # position embedding
        self.seg_embed = nn.Embedding(n_segments, d_model)  # segment(token type) embedding
        self.norm = nn.LayerNorm(d_model)

    def forward(self, x, seg):
        seq_len = x.size(1)
        pos = torch.arange(seq_len, dtype=torch.long)
        pos = pos.unsqueeze(0).expand_as(x)  # (seq_len,) -> (batch_size, seq_len)
        embedding = self.tok_embed(x) + self.pos_embed(pos) + self.seg_embed(seg)
        return self.norm(embedding)

class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, Q, K, V, attn_mask):
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is one.
        attn = nn.Softmax(dim=-1)(scores)
        context = torch.matmul(attn, V)
        return context, attn

class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k * n_heads)
        self.W_K = nn.Linear(d_model, d_k * n_heads)
        self.W_V = nn.Linear(d_model, d_v * n_heads)
    def forward(self, Q, K, V, attn_mask):
        # q: [batch_size x len_q x d_model], k: [batch_size x len_k x d_model], v: [batch_size x len_k x d_model]
        residual, batch_size = Q, Q.size(0)
        # (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
        q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1,2)  # q_s: [batch_size x n_heads x len_q x d_k]
        k_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1,2)  # k_s: [batch_size x n_heads x len_k x d_k]
        v_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1,2)  # v_s: [batch_size x n_heads x len_k x d_v]

        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size x n_heads x len_q x len_k]

        # context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        context, attn = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, n_heads * d_v) # context: [batch_size x len_q x n_heads * d_v]
        output = nn.Linear(n_heads * d_v, d_model)(context)
        return nn.LayerNorm(d_model)(output + residual), attn # output: [batch_size x len_q x d_model]

class PoswiseFeedForwardNet(nn.Module):
    def __init__(self):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc1 = nn.Linear(d_model, d_ff)
        self.fc2 = nn.Linear(d_ff, d_model)

    def forward(self, x):
        # (batch_size, len_seq, d_model) -> (batch_size, len_seq, d_ff) -> (batch_size, len_seq, d_model)
        return self.fc2(gelu(self.fc1(x)))

class EncoderLayer(nn.Module):
    def __init__(self):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention()
        self.pos_ffn = PoswiseFeedForwardNet()

    def forward(self, enc_inputs, enc_self_attn_mask):
        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask) # enc_inputs to same Q,K,V
        enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size x len_q x d_model]
        return enc_outputs, attn

## 1. BERT模型整体架构
class BERT(nn.Module):
    def __init__(self):
        super(BERT, self).__init__()
        self.embedding = Embedding() ## 词向量层,构建词表矩阵
        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)]) ## 把N个encoder堆叠起来,具体encoder实现一会看
        self.fc = nn.Linear(d_model, d_model) ## 前馈神经网络-cls
        self.activ1 = nn.Tanh() ## 激活函数-cls
        self.linear = nn.Linear(d_model, d_model)#-mlm
        self.activ2 = gelu ## 激活函数--mlm
        self.norm = nn.LayerNorm(d_model)
        self.classifier = nn.Linear(d_model, 2)## cls 这是一个分类层,维度是从d_model到2,对应我们架构图中就是这种:
        # decoder is shared with embedding layer
        embed_weight = self.embedding.tok_embed.weight
        n_vocab, n_dim = embed_weight.size()
        self.decoder = nn.Linear(n_dim, n_vocab, bias=False)
        self.decoder.weight = embed_weight
        self.decoder_bias = nn.Parameter(torch.zeros(n_vocab))

    def forward(self, input_ids, segment_ids, masked_pos):
        output = self.embedding(input_ids, segment_ids)## 生成input_ids对应的embdding;和segment_ids对应的embedding
        enc_self_attn_mask = get_attn_pad_mask(input_ids, input_ids)
        for layer in self.layers:
            output, enc_self_attn = layer(output, enc_self_attn_mask)
        # output : [batch_size, len, d_model], attn : [batch_size, n_heads, d_mode, d_model]
        # it will be decided by first token(CLS)
        h_pooled = self.activ1(self.fc(output[:, 0])) # [batch_size, d_model]
        logits_clsf = self.classifier(h_pooled) # [batch_size, 2]

        masked_pos = masked_pos[:, :, None].expand(-1, -1, output.size(-1)) # [batch_size, max_pred, d_model]  其中一个 masked_pos= [6, 5, 17,0,0]
        # get masked position from final output of transformer.
        h_masked = torch.gather(output, 1, masked_pos) # masking position [batch_size, max_pred, d_model]
        h_masked = self.norm(self.activ2(self.linear(h_masked)))
        logits_lm = self.decoder(h_masked) + self.decoder_bias # [batch_size, max_pred, n_vocab]

        return logits_lm, logits_clsf

if __name__ == '__main__':
    # BERT Parameters
    maxlen = 30 # 句子的最大长度 cover住95% 不要看平均数 或者99%  直接取最大可以吗?当然也可以,看你自己
    batch_size = 6 # 每一组有多少个句子一起送进去模型
    max_pred = 5  # max tokens of prediction
    n_layers = 6 # number of Encoder of Encoder Layer
    n_heads = 12 # number of heads in Multi-Head Attention
    d_model = 768 # Embedding Size
    d_ff = 3072  # 4*d_model, FeedForward dimension
    d_k = d_v = 64  # dimension of K(=Q), V
    n_segments = 2

    text = (
        'Hello, how are you? I am Romeo.\n'
        'Hello, Romeo My name is Juliet. Nice to meet you.\n'
        'Nice meet you too. How are you today?\n'
        'Great. My baseball team won the competition.\n'
        'Oh Congratulations, Juliet\n'
        'Thanks you Romeo'
    )
    sentences = re.sub("[.,!?\\-]", '', text.lower()).split('\n')  # filter '.', ',', '?', '!'
    word_list = list(set(" ".join(sentences).split()))
    word_dict = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[MASK]': 3}
    for i, w in enumerate(word_list):
        word_dict[w] = i + 4
    number_dict = {i: w for i, w in enumerate(word_dict)}
    vocab_size = len(word_dict)

    token_list = list()
    for sentence in sentences:
        arr = [word_dict[s] for s in sentence.split()]
        token_list.append(arr)

    batch = make_batch()
    input_ids, segment_ids, masked_tokens, masked_pos, isNext = map(torch.LongTensor, zip(*batch))


    model = BERT()
    criterion = nn.CrossEntropyLoss(ignore_index=0)
    optimizer = optim.Adam(model.parameters(), lr=0.001)



    for epoch in range(100):
        optimizer.zero_grad()
        logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)## logits_lm 【6,5,29】 bs*max_pred*voca  logits_clsf:[6*2]
        loss_lm = criterion(logits_lm.transpose(1, 2), masked_tokens) # for masked LM ;masked_tokens [6,5]
        loss_lm = (loss_lm.float()).mean()
        loss_clsf = criterion(logits_clsf, isNext) # for sentence classification
        loss = loss_lm + loss_clsf
        if (epoch + 1) % 10 == 0:
            print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
        loss.backward()
        optimizer.step()

    # Predict mask tokens ans isNext
    input_ids, segment_ids, masked_tokens, masked_pos, isNext = map(torch.LongTensor, zip(batch[0]))
    print(text)
    print([number_dict[w.item()] for w in input_ids[0] if number_dict[w.item()] != '[PAD]'])

    logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)
    logits_lm = logits_lm.data.max(2)[1][0].data.numpy()
    print('masked tokens list : ',[pos.item() for pos in masked_tokens[0] if pos.item() != 0])
    print('predict masked tokens list : ',[pos for pos in logits_lm if pos != 0])

    logits_clsf = logits_clsf.data.max(1)[1].data.numpy()[0]
    print('isNext : ', True if isNext else False)
    print('predict isNext : ',True if logits_clsf else False)




3.1,带中文注释的Bert版本

import math
import re
from random import *
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim

# sample IsNext and NotNext to be same in small batch size
# 注:bert中只是用了transformer中得Encoder部分
## 1
# 预训练数据的构建部分  这里input_ids 和 cand_maked_pos长度是不样的但是却可以用位置信息代表????好像有问题   问题已解决哪里的i是从0开始哪里的计数没有错
def make_batch():
    batch = []
    positive = negative = 0  ## 为了记录NSP任务中的正样本和负样本的个数,比例最好是在一个batch中接近1:1
    while positive != batch_size/2 or negative != batch_size/2:
        # 随机抽取两个句子id列表 a, b
        tokens_a_index, tokens_b_index = randrange(len(sentences)), randrange(len(sentences))  #  比如tokens_a_index=3,tokens_b_index=1;从整个样本中抽取对应的样本;
        tokens_a, tokens_b = token_list[tokens_a_index], token_list[tokens_b_index] #  根据索引获取对应样本:tokens_a=[5, 23, 26, 20, 9, 13, 18] tokens_b=[27, 11, 23, 8, 17, 28, 12, 22, 16, 25]
        input_ids = [word_dict['[CLS]']] + tokens_a + [word_dict['[SEP]']] + tokens_b + [word_dict['[SEP]']]  ## 加上特殊符号,CLS符号是1,sep符号是2:[1, 5, 23, 26, 20, 9, 13, 18, 2, 27, 11, 23, 8, 17, 28, 12, 22, 16, 25, 2]
        segment_ids = [0] * (1 + len(tokens_a) + 1) + [1] * (len(tokens_b) + 1) ##分割句子符号:[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] 表示0是第一句 1为第二句

        # MASK LM n_pred 表示要mask得个数 max_pred是控制每个句子中最多能有多少个可以被mask掉 input_ids 就是两句话合在一起得id列表 round表示将四舍五入
        n_pred = min(max_pred, max(1, int(round(len(input_ids) * 0.15)))) # n_pred=3;整个句子的15%的字符可以被mask掉,这里取和max_pred中的最小值,确保每次计算损失的时候没有那么多字符以及信息充足,有15%做控制就够了;其实可以不用加这个,单个句子少了,就要加上足够的训练样本

        # 不让cls和sep这两个符号参与mask 得到每个单词得位置 处理CLS和SEP符号
        cand_maked_pos = [i for i, token in enumerate(input_ids)  # 这里的i是从0开始的
                          if token != word_dict['[CLS]'] and token != word_dict['[SEP]']] ## cand_maked_pos=[1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18];整个句子input_ids中可以被mask的符号必须是非cls和sep符号的,要不然没意义
        shuffle(cand_maked_pos)  ## 打乱顺序:cand_maked_pos=[6, 5, 17, 3, 1, 13, 16, 10, 12, 2, 9, 7, 11, 18, 4, 14, 15]  其实取mask对应的位置有很多方法,这里只是一种使用shuffle的方式
        masked_tokens, masked_pos = [], []  # mask_tokens的原始标签 masked_pos的位置
        # 这里是按照8:1:1的比例mask


        for pos in cand_maked_pos[:n_pred]: ## 取其中的三个(假设n_pred为3);masked_pos=[6, 5, 17] 注意这里对应的是position信息;masked_tokens=[13, 9, 16] 注意这里是被mask的元素之前对应的原始单字数字;
            masked_pos.append(pos)  # 取得是mask的位置
            masked_tokens.append(input_ids[pos])    # mask的真是标签
            if random() < 0.8:  # 80%
                input_ids[pos] = word_dict['[MASK]']  # make mask
            elif random() < 0.5:  # 10%
                index = randint(0, vocab_size - 1)  # random index in vocabulary
                input_ids[pos] = word_dict[number_dict[index]]  # replace 以10%的概率替换为词汇表中的单词

        # Zero Paddings 这个是句子长度填充
        n_pad = maxlen - len(input_ids)  #  maxlen=30;n_pad=9 表示需要填充的长度
        input_ids.extend([0] * n_pad)  # 在input_ids后面补零
        segment_ids.extend([0] * n_pad)  # 在segment_ids 后面补零;这里有一个问题,0和之前的重了,这里主要是为了区分不同的句子,所以无所谓啊;他其实是另一种维度的位置信息;

        # Zero Padding (100% - 15%) tokens 是为了计算一个batch中句子的mlm损失的时候可以组成一个有效矩阵放进去;不然第一个句子预测5个字符,第二句子预测7个字符,第三个句子预测8个字符,组不成一个有效的矩阵;
        ## 这里非常重要,为什么是对masked_tokens是补零,而不是补其他的字符????我补1可不可以??
        ## 这里是mask最大长度填充
        if max_pred > n_pred:
            n_pad = max_pred - n_pred
            masked_tokens.extend([0] * n_pad)  ##  masked_tokens= [13, 9, 16, 0, 0] masked_tokens 对应的是被mask的元素的原始真实标签是啥,也就是groundtruth
            masked_pos.extend([0] * n_pad)  ## masked_pos= [6, 5, 17,0,0] masked_pos是记录哪些位置被mask了

        if tokens_a_index + 1 == tokens_b_index and positive < batch_size/2:  # 这里说明是下一句 标签为正例 而且正例个数小于1/2 添加样本
            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, True])  # IsNext
            positive += 1
        elif tokens_a_index + 1 != tokens_b_index and negative < batch_size/2:  # 这里说明不是下一句 标签为负例 而且负例个数小于1/2 添加样本
            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, False])  # NotNext
            negative += 1
    return batch
# Proprecessing Finished

## 3.
# 进行填充
def get_attn_pad_mask(seq_q, seq_k):
    batch_size, len_q = seq_q.size()  # batch_size 6,len_q 30
    batch_size, len_k = seq_k.size()  # batch_size 6,len_k 30
    # eq(zero) is PAD token

    # --这里将seq_k的元素为0表示为True 非0表示为False 所以1(True)表示mask 最后加一维度 位置进行扩展
    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)  # batch_size x 1 x len_k(=len_q), one is masking[6, 1, 30]
    return pad_attn_mask.expand(batch_size, len_q, len_k)  # batch_size x len_q x len_k  shape[6, 30, 30]

# 实现gelu的函数
def gelu(x):
    "Implementation of the gelu activation function by Hugging Face"
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))

## 2.
# Embedding部分包括三个Embedding
class Embedding(nn.Module):
    def __init__(self):
        super(Embedding, self).__init__()
        self.tok_embed = nn.Embedding(vocab_size, d_model)  # token embedding (单词个数29,词维度768)
        self.pos_embed = nn.Embedding(maxlen, d_model)  # position embedding 位置编码(, 768)
        self.seg_embed = nn.Embedding(n_segments, d_model)  # segment(token type) embedding n_segments=2
        self.norm = nn.LayerNorm(d_model)   # 做层归一化

    # 传入的这两个个参数 shape分别是 x为input_ids表示两个句子的id用于NSP任务 本例中只有21个有效(包含特殊符号) [6, 30] segment_ids [6, 30]
    def forward(self, x, seg):
        seq_len = x.size(1)  # 返回max_len 30
        pos = torch.arange(seq_len, dtype=torch.long)  # pos是位置列表从0开始
        pos = pos.unsqueeze(0).expand_as(x)  # (seq_len,) -> (batch_size, seq_len) # 展开成为批量位置列表
        embedding = self.tok_embed(x) + self.pos_embed(pos) + self.seg_embed(seg)  # 得到的embedding shape[6,30,768]
        return self.norm(embedding)  # 最有一个层归一化


## 6. ScaledDotProductAttention   相当于是执行自注意力机制
class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    # Q,K,V,shape都是 [6, 12, 30, 64] attn_mask shape为[6, 12, 30, 30]
    def forward(self, Q, K, V, attn_mask):
        # matmul就是简单的矩阵相乘 首先Q [6, 12, 30, 64] K[6, 12, 64, 30]/根号 矩阵相乘得到求相似度(论文中的公式)
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        # 将score中元素和attn_mask元素中对应起来 在mask_attn中为True的表示为mask 对应的score索引赋值为无穷小 通过Softmax之后权重就为0了
        scores.masked_fill_(attn_mask, -1e9)  # Fills elements of self tensor with value where mask is one.
        attn = nn.Softmax(dim=-1)(scores)  # 对最后一维进行Softmax 也就是 30*30中的行维度 一个q对30个k的权重分数 就得到 每一行都是q中一个单词对k中所有单词的求的权重分数
        # [6, 12, 30, 30]

        context = torch.matmul(attn, V)  # 通过权重分数和V进行加权和(WeightSum)得到上下文向量 [6, 12, 30, 64]
        return context, attn  # 返回上下文向量和权重分数
## 5.
# 注意多头注意力层就包含里layerNorm和残差 这里和transformer基本一致只是将有线性层和层归一化放在了return最后的那一部分   唯一的区别可能就是多头的个数不一样
class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k * n_heads)  # d_model=768, d_k=64 heads=12
        self.W_K = nn.Linear(d_model, d_k * n_heads)
        self.W_V = nn.Linear(d_model, d_v * n_heads)

    # 输入的enc_inputs shape为[6, 30, 768] enc_self_attn_mask shape 为[6, 30, 30] Q, K, V是相等的
    def forward(self, Q, K, V, attn_mask):
        # q: [batch_size x len_q x d_model], k: [batch_size x len_k x d_model], v: [batch_size x len_k x d_model]
        residual, batch_size = Q, Q.size(0)     # residual 就是先保存Q的值 之后用于残差连接
        # (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)

        #  下面这个就是先映射,后分头;一定要注意的是q和k分头之后维度是一致额,所以一看这里都是d_k , #--注意q k 的维度是相同的不然不能使用了
        q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1, 2)  # q_s: [batch_size x n_heads x len_q x d_k]
        k_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1, 2)  # k_s: [batch_size x n_heads x len_k x d_k]
        v_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1, 2)  # v_s: [batch_size x n_heads x len_k x d_v]
        # shape都是 [6, 12, 30, 64]

        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1)  # attn_mask : [batch_size x n_heads x len_q x len_k]
        # [6, 12,30, 30]

        #  然后我们计算 ScaledDotProductAttention 这个函数,去6.看一下
        ## 得到的结果有两个:context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q x len_k]
        # context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        context, attn = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)  #  返回上下文向量和权重分数矩阵,[6, 12, 30, 64],[6, 12, 30, 30]

        # contiguous()相当于拷贝一份context 改动拷贝后的不会影响拷贝前的变量 如果不是的话 就会改变
        # 将8个多头合并得到整体的上下文变量为新的context [6, 30, 768]
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, n_heads * d_v)  #  context: [batch_size x len_q x n_heads * d_v]

        output = nn.Linear(n_heads * d_v, d_model)(context)  # 通过一个全连接层将context转化为output shape不变还是[6, 30, 768]
        return nn.LayerNorm(d_model)(output + residual), attn  # output: [batch_size x len_q x d_model]
        # 将输入的结果和之前的和残差连接相加 [6, 30, 768] [6, 12, 30, 30]

## 7. PoswiseFeedForwardNet
# 这里也是包括残差和layerNorm的
class PoswiseFeedForwardNet(nn.Module):
    def __init__(self):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc1 = nn.Linear(d_model, d_ff)  # (768, 3072)两个线性层 用于特征转换
        self.fc2 = nn.Linear(d_ff, d_model)  # (3072, 768)

    # 传入的x是刚刚通过自注意力得到的上下文向量context(enc_output) shape为[6, 12, 30, 30]
    def forward(self, x):
        # (batch_size, len_seq, d_model) -> (batch_size, len_seq, d_ff) -> (batch_size, len_seq, d_model)
        return self.fc2(gelu(self.fc1(x)))  # 这里用的激活函数和transformer中的不同(ReLU) 这里是gelu
## 4.
## EncoderLayer:包含两个部分,多头注意力机制和前馈神经网络 和 transformer哪里的架构是一致的
class EncoderLayer(nn.Module):
    def __init__(self):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention()
        self.pos_ffn = PoswiseFeedForwardNet()

    # 输入的enc_inputs shape为[6, 30, 768] enc_self_attn_mask shape 为[6, 30, 30]
    def forward(self, enc_inputs, enc_self_attn_mask):
        # 下面这个就是做自注意力层(transformer编码层的第一部分),输入是enc_inputs,形状是[batch_size x seq_len_q x d_model] 需要注意的是最初始的QKV矩阵是等同于这个输入的,去看一下enc_self_attn函数 6.
        # 返回的第一个参数相当于是上下文向量投影之后和残差的和enc_output:[6, 30, 768]   attn(权重分数矩阵):[6, 12, 30, 30]
        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask)  # enc_inputs to same Q,K,V

        # (transformer编码层的第一部分) 全连接层加一个残差连接和LayerNorm
        enc_outputs = self.pos_ffn(enc_outputs)  # enc_outputs: [batch_size x len_q x d_model]
        # 输出不变 shape 为 [6, 30, 768] [6, 12, 30, 30]
        return enc_outputs, attn  # 返回编码器的输出和权重分数

## 1. BERT模型整体架构
class BERT(nn.Module):
    def __init__(self):
        super(BERT, self).__init__()
        self.embedding = Embedding()  ## 词向量层,构建词表矩阵
        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])  ## 把N个encoder堆叠起来,具体encoder实现一会看
        self.fc = nn.Linear(d_model, d_model)  ## 前馈神经网络-cls d_model=768 特征转化
        self.activ1 = nn.Tanh()  ## 激活函数-cls
        self.linear = nn.Linear(d_model, d_model)  # -mlm
        self.activ2 = gelu  ## 激活函数--mlm GELU可以看作 dropout的思想和relu的结合
        self.norm = nn.LayerNorm(d_model)   # 层归一化函数
        self.classifier = nn.Linear(d_model, 2)  ## cls 这是一个分类层,维度是从d_model到2,对应我们架构图中就是这种:
        # decoder is shared with embedding layer
        embed_weight = self.embedding.tok_embed.weight   # tok_embed(29, 768)
        n_vocab, n_dim = embed_weight.size()  # embed_weight shape [29(语料库单词数), 768(词维度)]
        self.decoder = nn.Linear(n_dim, n_vocab, bias=False)     # [768, 29]
        self.decoder.weight = embed_weight
        self.decoder_bias = nn.Parameter(torch.zeros(n_vocab))  # 首先可以把这个函数理解为类型转换函数,将一个不可训练的类型Tensor转换成可以训练的类型parameter

    # 传入的这三个参数 shape分别是 [6, 30] [6, 30] [6,5]
    def forward(self, input_ids, segment_ids, masked_pos):

        #  output shape [6, 30, 768] 就是将位置编码和分句嵌入和词嵌入合并到一起然后利用归一化得到词向量
        output = self.embedding(input_ids, segment_ids)  ## 生成input_ids对应的embdding;和segment_ids对应的embedding

        # shape[6, 30, 30] 这是从[6, 1, 30]其中expand而来的  第二维的30里面装的是30(第三维)个一样的take_a, b mask列表
        enc_self_attn_mask = get_attn_pad_mask(input_ids, input_ids)  # 返回的是矩阵元素就是0,1(true flase) 1表示mask  这是从

        for layer in self.layers:
            output, enc_self_attn = layer(output, enc_self_attn_mask)
            # 返回编码器的输出和权重分数 shape 为 [6, 30, 768] [6, 12, 30, 30]
            # output : [batch_size, len, d_model], attn : [batch_size, n_heads, d_mode, d_model]

        # it will be decided by first token(CLS)

        h_pooled = self.activ1(self.fc(output[:, 0]))  # [batch_size, d_model] 将第一个词的词向量用于二分类任务CLS
        # h_pooled shape为[6, 768]

        logits_clsf = self.classifier(h_pooled)  # [batch_size, 2] 利用特征转化得到shape[6, 2]

        # masked_pos shape [6, 5] masked_pos[:, :, None] shape为[6, 5, 1]
        masked_pos = masked_pos[:, :, None].expand(-1, -1, output.size(-1))  # [batch_size, max_pred, d_model]  其中一个 masked_pos= [6, 5, 17,0,0]
        # masked_pos shape [6, 5, 768]

        # get masked position from final output of transformer. output shape 为 [6, 30, 768]
        # 这里相当于是通过mask_pos中的id得到 在output中的词向量 例如以batch_size=1为例 mask_pos为[6, 5, 17, 0, 0]
        # 则就是从output[30, 768]中提取第6, 5, 17行id的也就是真实标签的[13. 9, 16]词向量 (从0开始算) 这里的output是经过input_ids得到的 line:210
        h_masked = torch.gather(output, 1, masked_pos)  # masking position [batch_size, max_pred, d_model]
        # masked_pos表示的是位置 这里就是获得第一维度的的各个单词的位置 就是掩蔽所有的输出
        # h_masked shape [6, 5, 768]

        h_masked = self.norm(self.activ2(self.linear(h_masked)))  # 经过线性层进行 特征转化
        # shape[6, 5, 768]

        logits_lm = self.decoder(h_masked) + self.decoder_bias  # [batch_size, max_pred, n_vocab]
        # 然后通过目标向量的词嵌入输入得到语言模型的的输出
        # shape [6, 5, 29]

        return logits_lm, logits_clsf

if __name__ == '__main__':
    # BERT Parameters 超参数设置
    maxlen = 30     # 句子的最大长度 cover住95% 不要看平均数 或者99%  直接取最大可以吗?当然也可以,看你自己
    batch_size = 6  # 每一组有多少个句子一起送进去模型
    max_pred = 5    # max tokens of prediction
    n_layers = 6    # number of Encoder of Encoder Layer   Bert base原文是12层 这里为了训练方便设置为6层
    n_heads = 12    # number of heads in Multi-Head Attention
    d_model = 768   # Embedding Size
    d_ff = 3072     # 4*d_model, FeedForward dimension
    d_k = d_v = 64  # dimension of K(=Q), V
    n_segments = 2

    text = (
        'Hello, how are you? I am Romeo.\n'
        'Hello, Romeo My name is Juliet. Nice to meet you.\n'
        'Nice meet you too. How are you today?\n'
        'Great. My baseball team won the competition.\n'
        'Oh Congratulations, Juliet\n'
        'Thanks you Romeo'
    )

    # 这里做的是数据预处理  去除掉原始句子中一些没有用的字符得到句子列表
    sentences = re.sub("[.,!?\\-]", '', text.lower()).split('\n')  # filter '.', ',', '?', '!'
    word_list = list(set(" ".join(sentences).split()))  #  set里面元素是单词 这里set会导致每一次得token_list都会不一样
    word_dict = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[MASK]': 3}  # 词:id字典

    # 把文本转化为数字
    for i, w in enumerate(word_list):
        word_dict[w] = i + 4
    number_dict = {i: w for i, w in enumerate(word_dict)}  # id:词 字典
    vocab_size = len(word_dict)  # 29

    token_list = list()  # sentences对应的id列表
    for sentence in sentences:  # sentences是列表 里面存储得是句子
        arr = [word_dict[s] for s in sentence.split()]  # arr 存储得是一个句子对应得id列表
        token_list.append(arr)  # 将所有句子得id列表组合到一起形成一个大的列表

    batch = make_batch()  # batch 是一个列表
    input_ids, segment_ids, masked_tokens, masked_pos, isNext = map(torch.LongTensor, zip(*batch))  # 通过batch得到各个参数
    # 各个参数的shape 依次为[6, 30] [6, 30] [6, 5] [6, 5], 这都是经过了填充之后的shape 为了计算  input_ids表示的两个句子的id加上特殊符号
    # segment_ids 对应的input_ids的分句   masked_tokens真实标签用于lm任务计算损失, masked_pos是mask的位置, 用于mask

    model = BERT()
    criterion = nn.CrossEntropyLoss(ignore_index=0)
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    for epoch in range(100):
        optimizer.zero_grad()  # 初始化梯度

        # 传入的这三个参数 shape分别是 [6, 30] [6, 30] [6,5]
        logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)  ## logits_lm 【6,5,29】 bs*max_pred*voca  logits_clsf:[6*2]
        # shape [6, 5, 29]

        loss_lm = criterion(logits_lm.transpose(1, 2), masked_tokens)  # for masked LM ;masked_tokens [6,5]  将真实标签和预测出来的标签计算损失
        loss_lm = (loss_lm.float()).mean()  # 就损失的平均值
        loss_clsf = criterion(logits_clsf, isNext)  # for sentence classification 将得到的结果合真是标签计算损失
        loss = loss_lm + loss_clsf
        if (epoch + 1) % 10 == 0:
            print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
        loss.backward()
        optimizer.step()

    # Predict mask tokens ans isNext
    input_ids, segment_ids, masked_tokens, masked_pos, isNext = map(torch.LongTensor, zip(batch[0]))
    print(text)
    print([number_dict[w.item()] for w in input_ids[0] if number_dict[w.item()] != '[PAD]'])

    logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)
    logits_lm = logits_lm.data.max(2)[1][0].data.numpy()
    print('masked tokens list : ',[pos.item() for pos in masked_tokens[0] if pos.item() != 0])
    print('predict masked tokens list : ',[pos for pos in logits_lm if pos != 0])

    logits_clsf = logits_clsf.data.max(1)[1].data.numpy()[0]
    print('isNext : ', True if isNext else False)
    print('predict isNext : ',True if logits_clsf else False)






参考链接:
[1]https://www.bilibili.com/video/av464324279
[2]https://www.bilibili.com/video/av799568628
[3]https://www.bilibili.com/video/BV1Kb4y187G6

  • 5
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值