BERT核心源码阅读笔记

一.参考链接

二.代码

import math
import re
from random import *
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable

# BERT Parameters
maxlen = 30
batch_size = 6
max_pred = 5 # max tokens of prediction(一个句子最大mask的字数)
n_layers = 6
n_heads = 12
d_model = 768
d_ff = 768*4 # 4*d_model, FeedForward dimension
d_k = d_v = 64  # dimension of K(=Q), V
n_segments = 2

text = (
    'Hello, how are you? I am Romeo.\n'
    'Hello, Romeo My name is Juliet. Nice to meet you.\n'
    'Nice meet you too. How are you today?\n'
    'Great. My baseball team won the competition.\n'
    'Oh Congratulations, Juliet\n'
    'Thanks you Romeo'
)

#过滤掉标点符号
sentences = re.sub("[.,!?\\-]", '', text.lower()).split('\n') # filter '.', ',', '?', '!'
#词汇表
word_list = list(set(" ".join(sentences).split()))
#构建字典
word_dict = {'[PAD]' : 0, '[CLS]' : 1, '[SEP]' : 2, '[MASK]' : 3}#先加入四个特殊字符
for i, w in enumerate(word_list):
    word_dict[w] = i + 4
number_dict = {i: w for i, w in enumerate(word_dict)} #将字符——>数字的字典反转为数字——>字典的编码
vocab_size = len(word_dict)
#将句子的字符列表转化为索引列表
token_list = list()
for sentence in sentences:
    arr = [word_dict[s] for s in sentence.split()]
    token_list.append(arr)

#生成一个batch。
# sample IsNext and NotNext to be same in small batch size
def make_batch():
    batch = []
    positive = negative = 0
    while positive != batch_size/2 or negative != batch_size/2:
        tokens_a_index, tokens_b_index= randrange(len(sentences)), randrange(len(sentences)) # sample random index in sentences
        tokens_a, tokens_b= token_list[tokens_a_index], token_list[tokens_b_index]
        #输入:[CLS]句子1[SEP]句子2[SEP]
        input_ids = [word_dict['[CLS]']] + tokens_a + [word_dict['[SEP]']] + tokens_b + [word_dict['[SEP]']]
        #段ID
        segment_ids = [0] * (1 + len(tokens_a) + 1) + [1] * (len(tokens_b) + 1)

        # MASK LM
        n_pred =  min(max_pred, max(1, int(round(len(input_ids) * 0.15)))) # 15 % of tokens in one sentence
        cand_maked_pos = [i for i, token in enumerate(input_ids)
                          if token != word_dict['[CLS]'] and token != word_dict['[SEP]']]
        shuffle(cand_maked_pos)#打乱顺序,然后去前几个词作为要预测的词
        masked_tokens, masked_pos = [], []
        for pos in cand_maked_pos[:n_pred]:
            masked_pos.append(pos)
            masked_tokens.append(input_ids[pos])
            if random() < 0.8:  # 80%
                input_ids[pos] = word_dict['[MASK]'] # make mask
            elif random() < 0.5:  # 10%
                index = randint(0, vocab_size - 1) # random index in vocabulary
                input_ids[pos] = word_dict[number_dict[index]] # replace

        # Zero Paddings
        n_pad = maxlen - len(input_ids)
        input_ids.extend([0] * n_pad)
        segment_ids.extend([0] * n_pad)

        # Zero Padding (100% - 15%) tokens
        #mask列表的填充
        if max_pred > n_pred:
            n_pad = max_pred - n_pred
            masked_tokens.extend([0] * n_pad)
            masked_pos.extend([0] * n_pad)
        #segment_ids=input_ids=[maxlen],masked_tokens=masked_pos=[max_pred]
        if tokens_a_index + 1 == tokens_b_index and positive < batch_size/2:
            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, True]) # IsNext:句子b是句子a的下一个句子
            positive += 1
        elif tokens_a_index + 1 != tokens_b_index and negative < batch_size/2:
            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, False]) # NotNext:句子b不是句子a的下一个句子
            negative += 1
    return batch

# 将填充位置mask掉
def get_attn_pad_mask(seq_q, seq_k):
    #seq_q=seq_k=[bact_size,maxlen]
    batch_size, len_q = seq_q.size()
    batch_size, len_k = seq_k.size()
    # eq(zero) is PAD token
    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)  # batch_size x 1 x len_k(=len_q), one is masking
    return pad_attn_mask.expand(batch_size, len_q, len_k)  # batch_size x len_q x len_k

def gelu(x):
    "Implementation of the gelu activation function by Hugging Face"
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
#embeding类
class Embedding(nn.Module):
    def __init__(self):
        super(Embedding, self).__init__()
        self.tok_embed = nn.Embedding(vocab_size, d_model)  # token embedding
        self.pos_embed = nn.Embedding(maxlen, d_model)  # position embedding
        self.seg_embed = nn.Embedding(n_segments, d_model)  # segment(token type) embedding
        self.norm = nn.LayerNorm(d_model)

    def forward(self, x, seg):
        #x=seg=[bact_size,maxlen]
        seq_len = x.size(1)
        pos = torch.arange(seq_len, dtype=torch.long)
        pos = pos.unsqueeze(0).expand_as(x)  # (seq_len,) -> (batch_size, seq_len)
        embedding = self.tok_embed(x) + self.pos_embed(pos) + self.seg_embed(seg)
        return self.norm(embedding)

class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, Q, K, V, attn_mask):
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        scores.masked_fill_(attn_mask, -1e9) # 将mask矩阵中为1的位置赋值为负无穷.
        attn = nn.Softmax(dim=-1)(scores)#[batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        context = torch.matmul(attn, V) #[batch_size x n_heads x len_q x d_v]
        return context, attn
#多头attention子层
class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k * n_heads)
        self.W_K = nn.Linear(d_model, d_k * n_heads)
        self.W_V = nn.Linear(d_model, d_v * n_heads)
    def forward(self, Q, K, V, attn_mask):
        # q: [batch_size x len_q x d_model], k: [batch_size x len_k x d_model], v: [batch_size x len_k x d_model]
        #attn_mask=[batch_size x maxlen x maxlen]
        residual, batch_size = Q, Q.size(0)
        # (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
        q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1,2)  # q_s: [batch_size x n_heads x len_q x d_k]
        k_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1,2)  # k_s: [batch_size x n_heads x len_k x d_k]
        v_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1,2)  # v_s: [batch_size x n_heads x len_k x d_v]

        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size x n_heads x len_q x len_k]

        # context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
        context, attn = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, n_heads * d_v) # context: [batch_size x len_q x n_heads * d_v]
        output = nn.Linear(n_heads * d_v, d_model)(context)#batch_size x len_q x d_model]
        return nn.LayerNorm(d_model)(output + residual), attn # output: [batch_size x len_q x d_model]


#FNN子层
class PoswiseFeedForwardNet(nn.Module):
    def __init__(self):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc1 = nn.Linear(d_model, d_ff)
        self.fc2 = nn.Linear(d_ff, d_model)

    def forward(self, x):
        # (batch_size, len_seq, d_model) -> (batch_size, len_seq, d_ff) -> (batch_size, len_seq, d_model)
        return self.fc2(gelu(self.fc1(x)))

#encoder的每层
class EncoderLayer(nn.Module):
    def __init__(self):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention()
        self.pos_ffn = PoswiseFeedForwardNet()
    #enc_inputs=[bact_size,maxlen,d_model] enc_self_attn_mask=[batch_size x maxlen x maxlen]
    def forward(self, enc_inputs, enc_self_attn_mask):
        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask) # enc_inputs to same Q,K,V
        enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size x len_q x d_model]
        return enc_outputs, attn

class BERT(nn.Module):
    def __init__(self):
        super(BERT, self).__init__()
        self.embedding = Embedding()
        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
        self.fc = nn.Linear(d_model, d_model)
        self.activ1 = nn.Tanh()
        self.linear = nn.Linear(d_model, d_model)
        self.activ2 = gelu
        self.norm = nn.LayerNorm(d_model)
        self.classifier = nn.Linear(d_model, 2)
        # decoder is shared with embedding layer  softmax层与embedding层参数共享。
        embed_weight = self.embedding.tok_embed.weight
        n_vocab, n_dim = embed_weight.size()
        self.decoder = nn.Linear(n_dim, n_vocab, bias=False)
        self.decoder.weight = embed_weight
        self.decoder_bias = nn.Parameter(torch.zeros(n_vocab))

    def forward(self, input_ids, segment_ids, masked_pos):
        #input_ids=segment_ids=[bact_size,maxlen]
        # masked_pos=[batch_size,max_pred]
        #output=[bact_size,maxlen,d_model]
        output = self.embedding(input_ids, segment_ids)#tok_embed+pos_embed+.seg_embed+norm
        # enc_self_attn_mask=batch_size x maxlen x maxlen
        enc_self_attn_mask = get_attn_pad_mask(input_ids, input_ids)
        for layer in self.layers:
            output, enc_self_attn = layer(output, enc_self_attn_mask)
        # output : [batch_size, len, d_model], attn : [batch_size, n_heads, d_mode, d_model]
        # it will be decided by first token(CLS)
        h_pooled = self.activ1(self.fc(output[:, 0])) # [batch_size, d_model] 取出句子开始标志CLS最后隐藏层输出用来预测两个句子是否为前后关系
        logits_clsf = self.classifier(h_pooled) # [batch_size, 2] #前后句子的预测,是个二分类

        masked_pos = masked_pos[:, :, None].expand(-1, -1, output.size(-1)) # [batch_size, max_pred, d_model]
        # get masked position from final output of transformer.
        #取出需要预测词的隐藏层状态向量
        h_masked = torch.gather(output, 1, masked_pos) # masking position [batch_size, max_pred, d_model]
        h_masked = self.norm(self.activ2(self.linear(h_masked)))
        logits_lm = self.decoder(h_masked) + self.decoder_bias # [batch_size, max_pred, n_vocab]

        return logits_lm, logits_clsf




model = BERT()
criterion = nn.CrossEntropyLoss()  #交叉熵loss函数
optimizer = optim.Adam(model.parameters(), lr=0.001) #优化器

batch = make_batch()#生成一个batch_data
#input_ids=segment_ids=[bact_size,maxlen]
#masked_tokens=masked_pos=[batch_size,max_pred]
#isNext=[batch_size]
input_ids, segment_ids, masked_tokens, masked_pos, isNext = zip(*batch)
input_ids, segment_ids, masked_tokens, masked_pos, isNext = \
    torch.LongTensor(input_ids),  torch.LongTensor(segment_ids), torch.LongTensor(masked_tokens), \
    torch.LongTensor(masked_pos), torch.LongTensor(isNext)

for epoch in range(1):
    optimizer.zero_grad()
    #logits_lm=[batch_size, max_pred, n_vocab]
    #logits_clsf=[batch_size, 2]
    logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)
    loss_lm = criterion(logits_lm.transpose(1, 2), masked_tokens) # for masked LM
    loss_lm = (loss_lm.float()).mean()
    loss_clsf = criterion(logits_clsf, isNext) # for sentence classification
    loss = loss_lm + loss_clsf
    if (epoch + 1) % 10 == 0:
        print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
    loss.backward()
    optimizer.step()

# Predict mask tokens and isNext
input_ids, segment_ids, masked_tokens, masked_pos, isNext = batch[0]
print(text)
print([number_dict[w] for w in input_ids if number_dict[w] != '[PAD]'])

logits_lm, logits_clsf = model(torch.LongTensor([input_ids]), \
                               torch.LongTensor([segment_ids]), torch.LongTensor([masked_pos]))

logits_lm = logits_lm.data.max(2)[1][0].data.numpy()  #最大值索引[max_pred]
print('masked tokens list : ',[pos for pos in masked_tokens if pos != 0])
print('predict masked tokens list : ',[pos for pos in logits_lm if pos != 0])

logits_clsf = logits_clsf.data.max(1)[1].data.numpy()[0]
print('isNext : ', True if isNext else False)
print('predict isNext : ',True if logits_clsf else False)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
### 回答1: BERT是一种自然语言处理模型,它的源代码是用Python编写的。您可以在GitHub上找到BERT的源代码:https://github.com/google-research/bertBERT是一种基于Transformer的模型,它可以完成诸如问答、分类和序列标记等任务。BERT使用了双向训练,其中一个Transformer模型在输入序列的正序和反序之间进行训练,从而使得模型能够从文本的任何方向学习语言模式。 BERT的代码主要分为以下几个部分: 1. 数据加载和预处理:这部分代码负责将原始文本数据加载到内存中,并进行必要的预处理,比如分词、标记实体等。 2. 模型定义:这部分代码定义了BERT模型的结构,包括Transformer的编码器和解码器,以及各种辅助层(比如,分类层)。 3. 训练:这部分代码定义了训练BERT模型的过程,包括计算损失、优化参数等。 4. 测试:这部分代码负责在训练完成后,使用测试数据评估模型的性能。 5. 预测:这部分代码负责使用训练好的BERT模型对输入文 ### 回答2: BERT(Bidirectional Encoder Representations from Transformers)是谷歌公司开源的自然语言处理模型。BERT通过预训练和微调的方式,可以用于多项任务,如文本分类、命名实体识别、问答系统等。 BERT的源代码可在谷歌的GitHub仓库中找到,它使用Python语言编写,在TensorFlow框架下实现。代码包括了预训练与微调两个主要的阶段。 预训练阶段的代码主要包括数据预处理、构建BERT模型和训练过程。数据预处理包括Tokenization(将文本切分成词或子词)、输入数据的构建(包括加入特殊标记如[CLS]和[SEP]等)等工作。构建BERT模型的代码主要是构建了一个多层的Transformer模型,其中包括了多个Encoder层以及一些参数设置,如隐藏单元的数量、自注意力头的数量等。训练过程主要是使用预训练数据集进行训练,使用了蒙版语言建模(Masked Language Model, MLM)和下一句预测(Next Sentence Prediction, NSP)两种任务来训练模型。 微调阶段的代码用于将BERT模型应用到具体的任务上。代码主要包括了读取和处理任务数据、构建特定任务的分类器、微调训练和评估等过程。在微调阶段,可以对BERT模型进行Fine-tuning,使用任务相关的数据集对模型进行训练,以适应具体任务要求。 BERT的源代码非常庞大,它包括了许多模块和函数。源代码中的注释和说明文档也提供了详细的说明,帮助使用者理解代码的功能和实现细节。不同任务可能需要稍作调整和修改代码,以适应具体的需求。 总而言之,BERT的源代码是一个用Python编写的庞大项目,它包括了预处理、模型构建、训练和微调等多个阶段的代码实现,对于想要深入了解BERT的工作原理和应用的人来说,是一个非常有价值的参考资源。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值