注意力机制和自注意力机制模块的相关代码实现/缝合模块/即插即用模块

注意力机制

import torch
import torch.nn as nn

class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attention = nn.Linear(hidden_dim, 1, bias=False)

    def forward(self, encoder_outputs):
        # encoder_outputs shape: (batch_size, sequence_length, hidden_dim)
        attn_weights = self.attention(encoder_outputs)  # (batch_size, sequence_length, 1)
        attn_weights = torch.softmax(attn_weights, dim=1)  # (batch_size, sequence_length, 1)
        context = torch.sum(attn_weights * encoder_outputs, dim=1)  # (batch_size, hidden_dim)
        return context, attn_weights

# 示例用法
batch_size = 2
sequence_length = 5
hidden_dim = 10

encoder_outputs = torch.randn(batch_size, sequence_length, hidden_dim)
attention_layer = Attention(hidden_dim)
context, attn_weights = attention_layer(encoder_outputs)

print("Context:", context)
print("Attention Weights:", attn_weights)

自注意力机制

import torch
import torch.nn as nn

class MultiHeadSelfAttention(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super(MultiHeadSelfAttention, self).__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads

        assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"

        self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)
        self.o_proj = nn.Linear(embed_dim, embed_dim)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        batch_size, seq_length, embed_dim = x.size()
        qkv = self.qkv_proj(x)  # (batch_size, seq_length, embed_dim * 3)
        qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3 * self.head_dim)
        qkv = qkv.permute(0, 2, 1, 3)  # (batch_size, num_heads, seq_length, 3 * head_dim)

        q, k, v = qkv.chunk(3, dim=-1)  # Each has shape (batch_size, num_heads, seq_length, head_dim)

        attn_weights = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim ** 0.5)  # Scaled dot-product
        attn_weights = self.softmax(attn_weights)  # (batch_size, num_heads, seq_length, seq_length)

        attn_output = torch.matmul(attn_weights, v)  # (batch_size, num_heads, seq_length, head_dim)
        attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
        attn_output = attn_output.reshape(batch_size, seq_length, embed_dim)

        output = self.o_proj(attn_output)
        return output, attn_weights

# 示例用法
batch_size = 2
seq_length = 5
embed_dim = 16
num_heads = 4

x = torch.randn(batch_size, seq_length, embed_dim)
self_attention_layer = MultiHeadSelfAttention(embed_dim, num_heads)
output, attn_weights = self_attention_layer(x)

print("Output:", output)
print("Attention Weights:", attn_weights)
 

  • 9
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
最佳缝合线算法(Seam Carving)是一种图像处理算法,用于在保持图像宽高比的情况下,缩小或扩大图像的大小。 以下是实现最佳缝合线算法的代码: 1. 计算能量值 ```python import numpy as np import cv2 def energy(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3) sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3) return np.abs(sobel_x) + np.abs(sobel_y) ``` 2. 计算最小能量路径 ```python def find_seam(image): e = energy(image) m = np.zeros_like(e) m[0] = e[0] for i in range(1, m.shape[0]): for j in range(m.shape[1]): if j == 0: m[i][j] = e[i][j] + min(m[i-1][j], m[i-1][j+1]) elif j == m.shape[1]-1: m[i][j] = e[i][j] + min(m[i-1][j], m[i-1][j-1]) else: m[i][j] = e[i][j] + min(m[i-1][j-1], m[i-1][j], m[i-1][j+1]) seam = [] j = np.argmin(m[-1]) seam.append((len(e)-1, j)) for i in reversed(range(len(e)-1)): if j == 0: j = np.argmin(m[i][j:j+2]) elif j == m.shape[1]-1: j = np.argmin(m[i][j-1:j+1]) + j - 1 else: j = np.argmin(m[i][j-1:j+2]) + j - 1 seam.append((i, j)) return seam[::-1] ``` 3. 删除最小能量路径 ```python def remove_seam(image, seam): h, w = image.shape[:2] mask = np.ones((h, w), dtype=np.bool) for i, j in seam: mask[i, j] = False return image[mask].reshape((h, w-1, 3)) ``` 4. 缩小图像 ```python def resize(image, size): h, w = image.shape[:2] dy, dx = h - size[1], w - size[0] if dx > 0: for i in range(dx): seam = find_seam(image) image = remove_seam(image, seam) if dy > 0: image = np.rot90(image, 1, (0, 1)) for i in range(dy): seam = find_seam(image) image = remove_seam(image, seam) image = np.rot90(image, 3, (0, 1)) return image ``` 上述代码实现了最佳缝合线算法,可以通过调用 `resize` 函数对图片进行缩放。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值