多头注意力机制代码

import math
from typing import Optional, List

import torch
from torch import nn

class PrepareForMultiHeadAttention(nn.Module):
“”"
## Prepare for multi-head attention
“”"

def __init__(self, d_model: int, heads: int, d_k: int, bias: bool):
    super().__init__()
    self.linear = nn.Linear(d_model, heads * d_k, bias=bias)
    self.heads = heads
    self.d_k = d_k

def forward(self, x: torch.Tensor):
    head_shape = x.shape[:-1]
    x = self.linear(x)

    # Split last dimension into heads
    x = x.view(*head_shape, self.heads, self.d_k)

    # Output has shape `[seq_len, batch_size, heads, d_k]` or `[batch_size, heads, d_model]`
    return x

class MultiHeadAttention(nn.Module):
r"“”
## Multi-Head Attention Module

This computes scaled multi-headed attention for given `query`, `key` and `value` vectors.
"""

def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1, bias: bool = True):
    """
    * `heads` is the number of heads.
    * `d_model` is the number of features in the `query`, `key` and `value` vectors.
    """

    super().__init__()

    # Number of features per head
    self.d_k = d_model // heads
    self.heads = heads

    # These transform the `query`, `key` and `value` vectors for multi-headed attention.
    self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias)
    self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias)
    self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True)

    # Softmax for attention along the time dimension of `key`
    self.softmax = nn.Softmax(dim=1)

    self.output = nn.Linear(d_model, d_model)
    self.dropout = nn.Dropout(dropout_prob)
    self.scale = 1 / math.sqrt(self.d_k)

    # We store attentions so that it can be used for logging, or other computations if needed
    self.attn = None

def get_scores(self, query: torch.Tensor, key: torch.Tensor):
    """
    ### Calculate scores between queries and keys

    This method can be overridden for other variations like relative attention.
    """
    return torch.einsum('ibhd,jbhd->ijbh', query, key)

def prepare_mask(self, mask: torch.Tensor, query_shape: List[int], key_shape: List[int]):
    """
    `mask` has shape `[seq_len_q, seq_len_k, batch_size]`, where first dimension is the query dimension.
    If the query dimension is equal to 1 it will be broadcasted.
    """

    assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0]
    assert mask.shape[1] == key_shape[0]
    assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1]

    # Same mask applied to all heads.
    mask = mask.unsqueeze(-1)

    # resulting mask has shape `[seq_len_q, seq_len_k, batch_size, heads]`
    return mask

def forward(self, *,
            query: torch.Tensor,
            key: torch.Tensor,
            value: torch.Tensor,
            mask: Optional[torch.Tensor] = None):
    """
    `query`, `key` and `value` are the tensors that store
    collection of *query*, *key* and *value* vectors.
    They have shape `[seq_len, batch_size, d_model]`.

    `mask` has shape `[seq_len, seq_len, batch_size]` and
    `mask[i, j, b]` indicates whether for batch `b`,
    query at position `i` has access to key-value at position `j`.
    """

    # `query`, `key` and `value`  have shape `[seq_len, batch_size, d_model]`
    seq_len, batch_size, _ = query.shape

    if mask is not None:
        mask = self.prepare_mask(mask, query.shape, key.shape)

    # Prepare `query`, `key` and `value` for attention computation.
    # These will then have shape `[seq_len, batch_size, heads, d_k]`.
    query = self.query(query)
    key = self.key(key)
    value = self.value(value)

    # This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.
    scores = self.get_scores(query, key)
    scores *= self.scale

    if mask is not None:
        scores = scores.masked_fill(mask == 0, float('-inf'))

    attn = self.softmax(scores)

    # Apply dropout
    attn = self.dropout(attn)

    # Multiply by values
    x = torch.einsum("ijbh,jbhd->ibhd", attn, value)

    # Save attentions for any other calculations
    self.attn = attn.detach()

    # Concatenate multiple heads
    x = x.reshape(seq_len, batch_size, -1)

    # Output layer
    return self.output(x)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
多头注意力机制是一种在神经网络中常用的技术,用于加强模型对于不同特征的关注和集成。以下是一个示例代码,展示了如何实现多头注意力机制: ```python import tensorflow as tf class MultiHeadAttention(tf.keras.layers.Layer): def __init__(self, num_heads, d_model): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = tf.keras.layers.Dense(d_model) self.wk = tf.keras.layers.Dense(d_model) self.wv = tf.keras.layers.Dense(d_model) self.dense = tf.keras.layers.Dense(d_model) def split_heads(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q): batch_size = tf.shape(q)[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.split_heads(q, batch_size) k = self.split_heads(k, batch_size) v = self.split_heads(v, batch_size) scaled_attention_logits = tf.matmul(q, k, transpose_b=True) scaled_attention_logits /= tf.math.sqrt(tf.cast(self.depth, tf.float32)) attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) output = tf.matmul(attention_weights, v) output = tf.transpose(output, perm=[0, 2, 1, 3]) output = tf.reshape(output, (batch_size, -1, self.d_model)) output = self.dense(output) return output, attention_weights ``` 上述代码定义了一个名为`MultiHeadAttention`的自定义层,它接受三个输入:`v`、`k`和`q`。其中,`v`表示value,`k`表示key,`q`表示query。这三个输入经过线性变换后,分别被拆分成多个头(`num_heads`个),然后进行注意力计算并融合。最终的输出是多头注意力机制的结果。 请注意,这只是一个示例代码,实际应用中可能需要根据具体任务和模型进行适当的修改和调整。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值