transformer的核心是自注意机制
1 self attention实现代码
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
#dropout用于防止过拟合,在前向传播的过程中,让某个神经元的激活值以一定的概率停止工作
#这样可以使模型泛化性更强,因为它不会依赖某些局部的特征
def forward(self, q, k, v, mask=None):
# q是查询,to match others;
#k是关键值,to be matched;
# v 用于提取的信息,information to be extracted
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
#q与k的转置相乘,被匹配与匹配值的矩阵乘表示了相关性,称之为注意力。
#transpose一次实现在两个维度进行转置,这里是2 3维转置,k的0维是batch;1维是特征的数量,是channel;2维是对应不同a的参数,3维是多个列向量?
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
# masked_fill_(mask, value),用value填充tensor中与mask中值为1位置对应的元素,
#mask与tensor的形状一致。此处是填充mask中对应为1的位置
attn = self.dropout(F.softmax(attn, dim=-1))
#softmax是将张量按照某个维度的每个元素缩放到(0,1)区间,且和为1。
#softmax回归模型用于分类。再加上防止过拟合的dropout
output = torch.matmul(attn, v)
# 注意力与信息值矩阵乘,输出
return output, attn
2 MultiHeadAttention
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
#类定义对象时,输入头的数量,矩阵的特征维度
super().__init__()
self.n_head = n_head
#head的数量,2个head就是将转移矩阵的数量从1变为2,
self.d_k = d_k
#key矩阵的特征数,关键值矩阵, to be matched
self.d_v = d_v
#value矩阵的特征数,information to be extracted
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
#线性变换,将不确定维度的输入特征转为指定的维度
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
#将K矩阵维度转为指定值
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
#将v矩阵维度转为指定值
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
#特征维度转为输入值
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
# 自注意力对象
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
#normalization的一种,channel方向做归一化
def forward(self, q, k, v, mask=None):
# 输入三个特征
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
# head在下面的计算中,就是增加了一个维度
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q #残差
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)#线性变换后映射为多维矩阵
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
#转置为batch * head * 查询 * 信息值
if mask is not None:
mask = mask.unsqueeze(1)
# For head axis broadcasting.#mask增加一个维度,对应head 现在mask是
q, attn = self.attention(q, k, v, mask=mask)
#单头注意力,返回更新查询矩阵,返回q k 计算得到的注意力矩阵
#q (sz_b,n_head,N=len_q,d_k)
#k (sz_b,n_head,N=len_k,d_k)
#v (sz_b,n_head,N=len_v,d_v)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
#view之前使用contiguous连续的内存,可以用reshape替代
#q (sz_b,len_q,n_head,N * d_k)
q = self.dropout(self.fc(q))
# q经过全连接变换为输出维度,随机取消一些激励层,防止过拟合
q += residual
#加入未处理的值,就是残差的概念
q = self.layer_norm(q) # 做一个通道的归一化
return q, attn
差不多看懂了自注意的单头和多头代码,下面看transformer的整体结构,它调用了encode和decoder类
class Transformer(nn.Module):
''' A sequence to sequence model with attention mechanism. '''
#使用注意力机制的序列到序列模型
def __init__(
self, n_src_vocab, n_trg_vocab, src_pad_idx, trg_pad_idx,
d_word_vec=512, d_model=512, d_inner=2048,
n_layers=6, n_head=8, d_k=64, d_v=64, dropout=0.1, n_position=200,
trg_emb_prj_weight_sharing=True, emb_src_trg_weight_sharing=True):
super().__init__()
self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx
self.encoder = Encoder(
n_src_vocab=n_src_vocab, n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=src_pad_idx, dropout=dropout)#编码器定义
self.decoder = Decoder(
n_trg_vocab=n_trg_vocab, n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=trg_pad_idx, dropout=dropout)#解码器定义
self.trg_word_prj = nn.Linear(d_model, n_trg_vocab, bias=False)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
assert d_model == d_word_vec, \
'To facilitate the residual connections, \
the dimensions of all module outputs shall be the same.'
#为了适应残差连接,所有模块的输出维度都相同
self.x_logit_scale = 1.
if trg_emb_prj_weight_sharing:
# Share the weight between target word embedding & last dense layer
#在目标词embedding与最后一个密集层共享权重
self.trg_word_prj.weight = self.decoder.trg_word_emb.weight
self.x_logit_scale = (d_model ** -0.5)
if emb_src_trg_weight_sharing:
self.encoder.src_word_emb.weight = self.decoder.trg_word_emb.weight
def forward(self, src_seq, trg_seq):
src_mask = get_pad_mask(src_seq, self.src_pad_idx)
trg_mask = get_pad_mask(trg_seq, self.trg_pad_idx) & get_subsequent_mask(trg_seq)
enc_output, *_ = self.encoder(src_seq, src_mask)#编码
dec_output, *_ = self.decoder(trg_seq, trg_mask, enc_output, src_mask)#解码
seq_logit = self.trg_word_prj(dec_output) * self.x_logit_scale
return seq_logit.view(-1, seq_logit.size(2))
现在来看encoder类
class Encoder(nn.Module):
''' A encoder model with self attention mechanism. '''
#使用注意力机制的编码器
def __init__(
self, n_src_vocab, d_word_vec, n_layers, n_head, d_k, d_v,
d_model, d_inner, pad_idx, dropout=0.1, n_position=200):
super().__init__()
self.src_word_emb = nn.Embedding(n_src_vocab, d_word_vec, padding_idx=pad_idx)
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_position)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, src_seq, src_mask, return_attns=False):
enc_slf_attn_list = []
# -- Forward
enc_output = self.dropout(self.position_enc(self.src_word_emb(src_seq)))
enc_output = self.layer_norm(enc_output)
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(enc_output, slf_attn_mask=src_mask)
enc_slf_attn_list += [enc_slf_attn] if return_attns else []
if return_attns:
return enc_output, enc_slf_attn_list
return enc_output
可以看到里面直接用了EncoderLayer类,下面仔细看EncoderLayer
class EncoderLayer(nn.Module):
''' Compose with two layers '''、
#由两层组成
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)#多头注意机制类
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)#位置编码类
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
# 多头注意机制的三个tensor是一个值,都是输入特征
enc_output = self.pos_ffn(enc_output)
#给输出加上位置
return enc_output, enc_slf_attn
提到了位置编码,下面详细了解这一块
class PositionalEncoding(nn.Module):
#位置编码是为了使模型能够有效利用序列的顺序特征
#位置编码的每个维度都对应于一个正弦曲线
def __init__(self, d_hid, n_position=200):
super(PositionalEncoding, self).__init__()
# Not a parameter
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))
def _get_sinusoid_encoding_table(self, n_position, d_hid):
''' Sinusoid position encoding table '''
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)#(1,N,d)
def forward(self, x):
# x(B,N,d)
return x + self.pos_table[:, :x.size(1)].clone().detach()