from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False,
causal = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = AttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
q, k, v = map(lambda t: t.contiguous(), (q, k, v))
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, bias = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
if self.flash:
assert not exists(bias)
return self.flash_attn(q, k, v)
scale = q.shape[-1] ** -0.5
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attn bias
if exists(bias):
sim = sim + bias
# causal
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
这是一个使用PyTorch实现的Attend类,它是一个用于神经网络的自定义注意力机制。Attend类接收三个输入张量(query、key和value),并返回一个上下文向量。注意力机制旨在根据查询关注输入序列的不同部分。
Attend类有一些选项,如dropout、快速注意力和因果注意力。快速注意力是一种快速且内存高效的缩放点积注意力的实现,仅在PyTorch 2.0或更高版本中可用。因果注意力是一种遮蔽的自注意力,防止模型关注序列中的未来位置。
前向方法计算query和key张量的相似度,应用可选的偏置,然后对相似度进行softmax以获得注意力权重。然后,使用注意力权重聚合value张量以产生上下文向量。
要使用Attend类,可以创建一个Attend类的实例,并将输入张量(query、key和value)传递给前向方法。例如:
```python
attend = Attend(dropout = 0.1, flash = True, causal = True)
query = torch.randn(1, 12, 32)
key = torch.randn(1, 20, 32)
value = torch.randn(1, 20, 32)
context = attend(query, key, value)
```
这将创建一个Attend类的实例,其中包含指定的选项,并使用这些选项计算给定输入张量的上下文向量。