【Transformer】Self-Attention with Relative Position Representations及实现pytorch代码

在Transformer中加入可训练的embedding编码,使得output representation可以表征inputs的时序/位置信息。这些embedding vectors在计算输入序列中的任意两个单词i,j之间的key和value是被加入其中。embedding vector用于表示单词i,j之间的距离,因此命名为“相对位置表征”(Relative Postiion Representation)。

 

Self-Attention

输入序列xi经过Self-Attention之后输出为zi。zi是所有经过映射(W^V)的序列的加权和。两个序列之间的权重通过(2)式计算,并通过Softmax归一化。

Relation-aware Self-Attention

考虑输入元素对之间的关系。输入xi和xj之间,增加aij^V和aij^K的表示,这不需要额外的Linear Layer,而是在attention heads中共用。将(1)式修改为(3);(2)式修改为(4)

相关位置,我们仅考虑最远k个元素。即从当前位置出发,左侧最远k个元素:-k,和右侧最远k个元素k,超过k个元素范围的距离截断为k,因此对于w^V和w^K,每一个都包含2k+1个向量(左边k个+右边k个,加自己),每个向量,即aij^K或aij^V都是d_a=d_z维。具体如下图所示:

 简化运算,将(4)拆为两步计算。

实验中,本文使用6个encoder和decoder层,其中d_x=512,d_z=64,8个attention_heads,clipping distance k = 16。训练了100,000 steps在8个K40 GPUs。关于k的取值:

 

代码

参考:GitHub - evelinehong/Transformer_Relative_Position_PyTorch: Implement the paper "Self-Attention with Relative Position Representations"

class RelativePosition(nn.Module):
    def __init__(self, num_units, max_relative_position):
        super().__init__()
        self.num_units = num_units
        self.max_relative_position = max_relative_position
        self.embeddings_table = nn.Parameter(torch.Tensor(max_relative_position * 2 + 1, num_units))
        nn.init.xavier_uniform_(self.embeddings_table)

    def forward(self, length_q, length_k):
        range_vec_q = torch.arange(length_q)
        range_vec_k = torch.arange(length_k)
        distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
        distance_mat_clipped = torch.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
        final_mat = distance_mat_clipped + self.max_relative_position
        final_mat = torch.LongTensor(final_mat).cuda()
        embeddings = self.embeddings_table[final_mat].cuda()

        return embeddings

class RelativeMultiHeadAttention(nn.Module):
    def __init__(self, d_model, n_heads, dropout=0.1, batch_size=6):
        "Take in model size and number of heads."
        super(RelativeMultiHeadAttention, self).__init__()
        self.d_model = d_model
        self.n_heads = n_heads
        self.batch_size = batch_size

        assert d_model % n_heads == 0
        self.head_dim = d_model // n_heads

        self.linears = _get_clones(nn.Linear(d_model, d_model), 4)
        self.dropout = nn.Dropout(p=dropout)
        self.relative_position_k = RelativePosition(self.head_dim, max_relative_position=16)
        self.relative_position_v = RelativePosition(self.head_dim, max_relative_position=16)

        self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).cuda()

    def forward(self, query, key, value):
        # embedding
        # query, key, value = [batch_size, len, hid_dim]
        query, key, value = [l(x).view(self.batch_size, -1, self.d_model) for l, x in
                             zip(self.linears, (query, key, value))]

        len_k = query.shape[1]
        len_q = query.shape[1]
        len_v = value.shape[1]

        # Self-Attention
        # r_q1, r_k1 = [batch_size, len, n_heads, head_dim]
        r_q1 = query.view(self.batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
        r_k1 = key.view(self.batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
        attn1 = torch.matmul(r_q1, r_k1.permute(0, 1, 3, 2))

        r_q2 = query.permute(1, 0, 2).contiguous().view(len_q, self.batch_size * self.n_heads, self.head_dim)
        r_k2 = self.relative_position_k(len_q, len_k)
        attn2 = torch.matmul(r_q2, r_k2.transpose(1, 2)).transpose(0, 1)
        attn2 = attn2.contiguous().view(self.batch_size, self.n_heads, len_q, len_k)
        attn = (attn1 + attn2) / self.scale

        attn = self.dropout(torch.softmax(attn, dim=-1))
        # attn = [batch_size, n_heads, len, len]
        r_v1 = value.view(self.batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
        weight1 = torch.matmul(attn, r_v1)
        r_v2 = self.relative_position_v(len_q, len_v)
        weight2 = attn.permute(2, 0, 1, 3).contiguous().view(len_q, self.batch_size * self.n_heads, len_k)
        weight2 = torch.matmul(weight2, r_v2)
        weight2 = weight2.transpose(0, 1).contiguous().view(self.batch_size, self.n_heads, len_q, self.head_dim)

        x = weight1 + weight2
        # x = [batch size, n heads, query len, head dim]

        x = x.permute(0, 2, 1, 3).contiguous()
        # x = [batch size, query len, n heads, head dim]

        x = x.view(self.batch_size * len_q, self.d_model)
        # x = [batch size * query len, hid dim]

        return self.linears[-1](x)

 

以下是使用PyTorch实现TransformerSelf-Attention的示例代码: ## Self-Attention ```python import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, embed_size, heads): super(SelfAttention, self).__init__() self.embed_size = embed_size self.heads = heads self.head_dim = embed_size // heads assert (self.head_dim * heads == embed_size), "Embed size needs to be divisible by heads" self.values = nn.Linear(self.head_dim, self.head_dim, bias=False) self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False) self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False) self.fc_out = nn.Linear(heads * self.head_dim, embed_size) def forward(self, values, keys, queries, mask): # Get number of training examples N = queries.shape[0] value_len, key_len, query_len = values.shape[1], keys.shape[1], queries.shape[1] # Split embedding into self.heads pieces values = values.reshape(N, value_len, self.heads, self.head_dim) keys = keys.reshape(N, key_len, self.heads, self.head_dim) queries = queries.reshape(N, query_len, self.heads, self.head_dim) # Transpose to get dimensions batch_size * self.heads * seq_len * self.head_dim values = values.permute(0, 2, 1, 3) keys = keys.permute(0, 2, 1, 3) queries = queries.permute(0, 2, 1, 3) # Calculate energy energy = torch.matmul(queries, keys.permute(0, 1, 3, 2)) if mask is not None: energy = energy.masked_fill(mask == 0, float("-1e20")) # Apply softmax to get attention scores attention = torch.softmax(energy / (self.embed_size ** (1/2)), dim=-1) # Multiply attention scores with values out = torch.matmul(attention, values) # Concatenate and linearly transform output out = out.permute(0, 2, 1, 3).reshape(N, query_len, self.heads * self.head_dim) out = self.fc_out(out) return out ``` ## Transformer ```python import torch import torch.nn as nn from torch.nn.modules.activation import MultiheadAttention class TransformerBlock(nn.Module): def __init__(self, embed_size, heads, dropout, forward_expansion): super(TransformerBlock, self).__init__() self.attention = MultiheadAttention(embed_dim=embed_size, num_heads=heads) self.norm1 = nn.LayerNorm(embed_size) self.norm2 = nn.LayerNorm(embed_size) self.feed_forward = nn.Sequential( nn.Linear(embed_size, forward_expansion * embed_size), nn.ReLU(), nn.Linear(forward_expansion * embed_size, embed_size) ) self.dropout = nn.Dropout(dropout) def forward(self, value, key, query, mask): attention_output, _ = self.attention(query, key, value, attn_mask=mask) x = self.dropout(self.norm1(attention_output + query)) forward_output = self.feed_forward(x) out = self.dropout(self.norm2(forward_output + x)) return out class Encoder(nn.Module): def __init__(self, src_vocab_size, embed_size, num_layers, heads, device, forward_expansion, dropout, max_length): super(Encoder, self).__init__() self.embed_size = embed_size self.device = device self.word_embedding = nn.Embedding(src_vocab_size, embed_size) self.position_embedding = nn.Embedding(max_length, embed_size) self.layers = nn.ModuleList([ TransformerBlock(embed_size, heads, dropout, forward_expansion) for _ in range(num_layers) ]) self.dropout = nn.Dropout(dropout) def forward(self, x, mask): N, seq_length = x.shape positions = torch.arange(0, seq_length).expand(N, seq_length).to(self.device) out = self.dropout(self.word_embedding(x) + self.position_embedding(positions)) for layer in self.layers: out = layer(out, out, out, mask) return out class DecoderBlock(nn.Module): def __init__(self, embed_size, heads, forward_expansion, dropout, device): super(DecoderBlock, self).__init__() self.norm = nn.LayerNorm(embed_size) self.attention = MultiheadAttention(embed_size, heads) self.transformer_block = TransformerBlock(embed_size, heads, dropout, forward_expansion) self.dropout = nn.Dropout(dropout) def forward(self, x, value, key, src_mask, trg_mask): attention_output, _ = self.attention(x, x, x, attn_mask=trg_mask) query = self.dropout(self.norm(attention_output + x)) out = self.transformer_block(value, key, query, src_mask) return out class Decoder(nn.Module): def __init__(self, trg_vocab_size, embed_size, num_layers, heads, forward_expansion, dropout, device, max_length): super(Decoder, self).__init__() self.embed_size = embed_size self.device = device self.word_embedding = nn.Embedding(trg_vocab_size, embed_size) self.position_embedding = nn.Embedding(max_length, embed_size) self.layers = nn.ModuleList([ DecoderBlock(embed_size, heads, forward_expansion, dropout, device) for _ in range(num_layers) ]) self.fc_out = nn.Linear(embed_size, trg_vocab_size) self.dropout = nn.Dropout(dropout) def forward(self, x, enc_out, src_mask, trg_mask): N, seq_length = x.shape positions = torch.arange(0, seq_length).expand(N, seq_length).to(self.device) x = self.dropout(self.word_embedding(x) + self.position_embedding(positions)) for layer in self.layers: x = layer(x, enc_out, enc_out, src_mask, trg_mask) out = self.fc_out(x) return out ``` 这些代码可以用于实现TransformerSelf-Attention模型。但这只是示例,你需要根据你的数据和任务来调整这些代码中的各种超参数和结构。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值