Attention is All you need -Transformer 深入浅出
本文翻译哈佛大学NLP小组The Annotated Transformer 一文,有翻译不准确之处可以参考原文 The Annotated Transformer (harvard.edu)
在2018年tansformer发布后,引起了很多人的关注,不仅在翻译方面取得了非常大提升,在其它NLP任务上也取得不错的效果,改变了过去传统的做法(CNN,RNN),从而实现用传统的方式很难实现的成绩。
这篇文章是是对paper“Attention is All you need" 原文做了解释和标准,但是重新排序了paper的内容,而且对部分内容进行了删除。这篇文章的内容再jupter Notebook上可以直接运行,总共大约400多行代码
在4个GPU上每秒处理27000个token.
本文是基于pytorch实现的,首先需要安装pytorch的环境,本文用notebook源码的可以在 github 或 google colab 上免费GPU 正常运行。
1,运行环境准备
除了安装基本的pytorch环境外,还需要安装一下依赖安装包
# !pip install http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl numpy matplotlib spacy torchtext seaborn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import seaborn
seaborn.set_context(context="talk")
%matplotlib inline
2,模型架构
必有优秀的的基于序列的翻译模型是encoder-decoder序列结构:NEURAL MACHINE TRANSLATION BY JOINTLY LEARNING TO ALIGN AND TRANSLATE),
输入序列参数是和一个序列的
, 给出
后通过decoder生成一个输出序列
,采用自回归的方式(Generating Sequences With Recurrent Neural Networks),生产没一个字的时候,会和之前的生产的字一起作为生产下一个字的输入。
EncoderDecorder
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
Genrator
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
下图是transformer 的总统架构,包含selft-attention,point-wise,全连接层,左边图是Encoder,右边是Decorder架构。
Encoder and Decoder Stacks
Encoder
encoder 由6层组成。如上图的左半部分Encoder, 接下来介绍Encoder 构成,encoder 的输入主要包含 postionnal Encodeing + input Embedding, 下一步输入数据进入self-attention,当前是吧self-atten 拆成多头。然后完成残差连接和归一化,也就是第一子层。
然后进入第二子层,Feed Forward层,主要是由两个全连接网络组成,在第二子层完成残差连接和归一化。
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
每个子层都用残差连接,(Deep Residual Learning for Image Recognition), 每两个子层用归一化 (Layer Normalization)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
在这里每一个子层都需要残差连接和归一化。LayerNorm(x+Sublayer(x)) 其中Sublayer(x) 通过它类内部完成,先归一化话,droput 归一化后的数据,然后再进行残差连接。
为了能够实现残差连接,模型中所以的隐藏层和embedding大小维度均设置512.
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
没一个层Encoder包含两个子层,第一子层是Multi-head self-attention, 第二个子层包含了point-wise 和 feed-forward 全连接网络。最后需要层归一化。注意子层归一化和残差连接。最后才是层归一化。
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
Decoder
Decoder 也是包含6层。
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
Decoder 如图所示,除了同ecoder的两个子层外,还增加了第三个子层,主要是针对encoder得输出和执行Muti-head attention EncoderDecoder层,每一个子层都由归一化和残差连接。最后包含由层归一化。
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
再decoder的self-attention 进行了mask序列处理,这种掩码是因为预测下一个内容的时候不应该提前知道,不能提前预知未来。
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
plt.figure(figsize=(5,5))
plt.imshow(subsequent_mask(20)[0])
None
mask 预测内容视图效果
Attention
attention 机制是由 query,key,value 组成,query,key,value 都是输出向量,通过query 和key得出每一个字和其它字attention关系的权重,最后通过每个value 和 权重相加,而计算出这个字再句子中的向量表示。
我么称为这个为“Scaled Dot-Product Attention”,包含 query,key,value 组成的维向量。我们通过计算query和key 的点乘,然后除以
后,通过softmax 获取到每个value 和权重weights.
再实际应用中,我们会把一个句子所有的query,key,value 每一个向量组装成一个矩阵Q,K,V. 具体公式如下
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
两个最常用的Bahdanau attention 的方式是 加法和点乘法,点乘法和我们是完全相同的,除此业务我们增加了比例因子,另外还增加了单个隐藏层的 feed-forward 前馈神经网络兼容性功能。虽然理论上两种的复杂度相识,但是通过点乘的attention 无论再
时间和空间上都更优秀,因为可以是用高效有害的矩阵乘法来实现。以上代码中的maske_fill 的是把padding 设置为一个极小值,再后续softmax 的运输中结果趋向于零。
Multi-head attention 可以关注到不同位置和不同子空间的特征。相比单个attention,multi-head attention 公式如下:
投影参数矩阵
和
,我们这里设w=8 为注意力attention head 数。 而且每一个
. 减少每个attention的大小,这样和采用单个attention 的的计算量是相近的。
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
Attention 再模型中的应用
PositionwiseFeedForward
再每个一self attention 子层外,我们的encoder 和decoder还包含一个全连接的前馈神经网络feed-forward独立的应用作用于没一层的每一个子层。有两个全连接网络组成,中间包含一个Relu激活函数。公式如下:
虽然作用与不同的层,但是线性变换相同,再不同的层间时殷弘了不同的参数,使用了一个卷积核为1的卷积,输入和输出的维度都是 ,其中的隐藏层是
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
Embeddings and Softmax
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
Positional Encoding
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
Transformer 模型
包含了
def make_model(src_vocab, tgt_vocab, N=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn),
c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model