好的,以下是Selective Kernel Attention 注意力模块的 Python 代码:
```python
import torch
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = nn.Softmax(dim=-1)(attn)
output = torch.matmul(attn, v)
return output, attn
class SKAttention(nn.Module):
def __init__(self, d_model, d_k, d_v, n_heads, n_sk, dropout):
super().__init__()
self.n_heads = n_heads
self.n_sk = n_sk
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_heads * n_sk * d_k, bias=True)
self.w_ks = nn.Linear(d_model, n_heads * n_sk * d_k, bias=True)
self.w_vs = nn.Linear(d_model, n_heads * n_sk * d_v, bias=True)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.proj_o = nn.Linear(n_heads * n_sk * d_v, d_model, bias=True)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.fc = nn.Linear(d_model, d_model, bias=True)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
residual_xs = query
q_s = self.w_qs(query).view(batch_size, -1, self.n_heads, self.n_sk, self.d_k)
k_s = self.w_ks(key).view(batch_size, -1, self.n_heads, self.n_sk, self.d_k)
v_s = self.w_vs(value).view(batch_size, -1, self.n_heads, self.n_sk, self.d_v)
q = q_s.permute(2, 0, 3, 1, 4).contiguous().view(-1, self.n_sk, self.d_k)
k = k_s.permute(2, 0, 3, 1, 4).contiguous().view(-1, self.n_sk, self.d_k)
v = v_s.permute(2, 0, 3, 1, 4).contiguous().view(-1, self.n_sk, self.d_v)
mask = mask.repeat(self.n_heads, self.n_sk, 1, 1)
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(self.n_heads, batch_size, self.n_sk, self.d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(batch_size, -1, self.n_heads * self.n_sk * self.d_v)
proj_o = self.proj_o(output)
output = self.layer_norm(residual_xs + proj_o)
fc_output = self.fc(output)
output = self.dropout(fc_output)
output = self.layer_norm(output + output)
return output, attn
```
希望能够帮到你。