DL-based-零星

DL-based: 2D-avg-pooling的foraward和backward的代码,CPP和python版

有向图的两点最短路径

字符串查找-二分匹配

 DL-based

池化层Maxpooling/avgpooling

import numpy as np
import torch

class AvgPooling2D:
    def __init__(self, kernel_size=(2, 2), stride=2):
        self.stride = stride
        self.kernel_size = kernel_size
        self.w_height = kernel_size[0]
        self.w_width = kernel_size[1]

    def __call__(self, x):
        self.x = x
        self.in_height = x.shape[0]
        self.in_width = x.shape[1]

        self.out_height = int((self.in_height - self.w_height) / self.stride) + 1
        self.out_width = int((self.in_width - self.w_width) / self.stride) + 1
        out = np.zeros((self.out_height, self.out_width))

        for i in range(self.out_height):
            for j in range(self.out_width):
                start_i = i * self.stride
                start_j = j * self.stride
                end_i = start_i + self.w_height
                end_j = start_j + self.w_width
                out[i, j] = np.mean(x[start_i: end_i, start_j: end_j])
        return out

    def backward(self, d_loss):
        dx = np.zeros_like(self.x)

        for i in range(self.out_height):
            for j in range(self.out_width):
                start_i = i * self.stride
                start_j = j * self.stride
                end_i = start_i + self.w_height
                end_j = start_j + self.w_width
                dx[start_i: end_i, start_j: end_j] = d_loss[i, j] / (self.w_width * self.w_height)
        return dx

注意力层Attention

https://blog.51cto.com/u_14300986/5467376

https://blog.csdn.net/m0_55097528/article/details/127759943

import torch, math
#1、准备输入
input = torch.rand((3, 4), dtype=torch.float32)
#2、初始化权重
w_key   = torch.rand((4, 3), dtype=torch.float32)
w_query = torch.rand((4, 3), dtype=torch.float32)
w_value = torch.rand((4, 3), dtype=torch.float32)
# 3、推导键、查询和值
keys   = input @ w_key
querys = input @ w_query
values = input @ w_value
print(keys) # torch.Size([3, 3])

attn_scores         = querys @ keys.t()
attn_scores_softmax = torch.softmax(attn_scores, dim=-1)
print('attn_scores_softmax:','\n',attn_scores_softmax) # torch.Size([3, 3])

weighted_values     = values @ attn_scores_softmax.t()

weighted_values     = values[:,None] * attn_scores_softmax.t()
outputs = weighted_values.sum(dim=0)



class self_attention():
    """
    x     : bs * seq_len   * input_dim
    W_q   : bs * input_dim * dim_k
    W_k   : bs * input_dim * dim_k
    W_v   : bs * input_dim * dim_v
    """
    def __init__(self, input_dim, dim_k, dim_v):
        # 参数矩阵
        self.W_q = nn.Linear(input_dim, dim_k, bias=False)
        self.W_k = nn.Linear(input_dim, dim_k, bias=False)
        self.W_v = nn.Linear(input_dim, dim_v, bias=False)
        
    def forward(self, x):
        query = self.W_q(x) # bs * seq_len   * dim_k
        key   = self.W_k(x) # bs * seq_len   * dim_k
        value = self.W_v(x) # bs * seq_len   * dim_v
        key_T = key.permute(0,2,1) # 
        attention_score = nn.Softmax(dim=-1)(torch.bmm(query, key_T)) / np.sqrt(dim_k)
        # bs * seq_len   * seq_len
      
        output = torch.bmm(attention_score, value) # bs * seq_len   * dim_v
        return output

最短路径

  • 8
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值