Pytorch中自定义神经网络卷积核权重

Pytorch中自定义神经网络卷积核权重

  1. 自定义神经网络卷积核权重
    神经网络被深度学习者深深喜爱,究其原因之一是神经网络的便利性,使用者只需要根据自己的需求像搭积木一样搭建神经网络框架即可,搭建过程中我们只需要考虑卷积核的尺寸,输入输出通道数,卷积方式等等。

    我们使用惯了自带的参数后,当我们要自定义卷积核参数时,突然有种无从下手的感觉,哈哈哈哈哈哈哈哈~~,请允许我开心下,嘿嘿!因为笔者在初入神经网络时也遇到了同样的问题,当时踩了太多坑了,宝宝想哭(灬ꈍ ꈍ灬)!让我悲伤的是,找遍了各个资源区,也没有找到大家的分享。因此,我想把我的方法写出来,希望能帮助到各位宝宝,开心(*^▽^*)。
    

话不多说,正文开始…

  1. 定义卷积核权重
      我这里是自定义的dtt系数卷积核权重,直接上权重代码:

2.1 dtt系数权重Code
  def dtt_matrix(n): 这个函数是nn的DTT系数矩阵,笔者的是88的系数矩阵。

   def dtt_kernel(out_channels, in_channels, kernel_size): 这个方法是设定权重,权重需要包括4个参数(输出通道数,输入通道数,卷积核高,卷积核宽),这里有很多细节要注意,宝宝们要亲自躺下坑,才能映像深刻也,我就不深究了哈,(#^.^#)。

复制代码
import numpy as np
import torch
import torch.nn as nn

================================

DTT coefficient matrix of n * n

================================

def dtt_matrix(n):
dtt_coe = np.zeros([n, n], dtype=‘float32’)
for i in range(0, n):
dtt_coe[0, i] = 1/np.sqrt(n)
dtt_coe[1, i] = (2i + 1 - n)np.sqrt(3/(n(np.power(n, 2) - 1)))
for i in range(1, n-1):
dtt_coe[i+1, 0] = -np.sqrt((n-i-1)/(n+i+1)) * np.sqrt((2
(i+1)+1)/(2*(i+1)-1)) * dtt_coe[i, 0]
dtt_coe[i+1, 1] = (1 + (i+1)(i+2)/(1-n)) * dtt_coe[i+1, 0]
dtt_coe[i+1, n-1] = np.power(-1, i+1) * dtt_coe[i+1, 0]
dtt_coe[i+1, n-2] = np.power(-1, i+1) * dtt_coe[i+1, 1]
for j in range(2, int(n/2)):
t1 = (-(i+1) * (i+2) - (2
j-1) * (j-n-1) - j)/(j*(n-j))
t2 = ((j-1) * (j-n-1))/(j * (n-j))
dtt_coe[i+1, j] = t1 * dtt_coe[i+1, j-1] + t2 * dtt_coe[i+1, j-2]
dtt_coe[i+1, n-j-1] = np.power(-1, i-1) * dtt_coe[i+1, j]
return dtt_coe

===============================================================

DTT coefficient matrix of (out_channels * in_channels * n * n)

===============================================================

def dtt_kernel(out_channels, in_channels, kernel_size):
dtt_coe = dtt_matrix(kernel_size)
dtt_coe = np.array(dtt_coe)

dtt_weight = np.zeros([out_channels, in_channels, kernel_size, kernel_size], dtype='float32')
temp = np.zeros([out_channels, in_channels, kernel_size, kernel_size], dtype='float32')

order = 0
for i in range(0, kernel_size):
    for j in range(0, kernel_size):
        dtt_row = dtt_coe[i, :]
        dtt_col = dtt_coe[:, j]
        dtt_row = dtt_row.reshape(len(dtt_row), 1)
        dtt_col = dtt_col.reshape(1, len(dtt_col))
        # print("dtt_row: ", dtt_row)
        # print("dtt_col: ", dtt_col)
        # print("i:", i, "j: ", j)
        temp[order, 0, :, :] = np.dot(dtt_row, dtt_col)
        order = order + 1
for i in range(0, in_channels):
    for j in range(0, out_channels):
        # dtt_weight[j, i, :, :] = flip_180(temp[j, 0, :, :])
        dtt_weight[j, i, :, :] = temp[j, 0, :, :]
return torch.tensor(dtt_weight)

复制代码

2.2 'same’方式卷积
  如果宝宝需要保持卷积前后的数据尺寸保持不变,即’same’方式卷积,那么你直接使用我这个卷积核(提一下哟,这个我也是借自某位前辈的,我当时没备注哇,先在这里感谢那位前辈,前辈如果路过,还请留言小生哈,(#.#))。

复制代码
import torch.utils.data
from torch.nn import functional as F
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
from torch.nn.modules.utils import _single, _pair, _triple

class _ConvNd(Module):
def init(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNd, self).init()
if in_channels % groups != 0:
raise ValueError(‘in_channels must be divisible by groups’)
if out_channels % groups != 0:
raise ValueError(‘out_channels must be divisible by groups’)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter(‘bias’, None)
self.reset_parameters()

def reset_parameters(self):
    n = self.in_channels
    for k in self.kernel_size:
        n *= k
    stdv = 1. / math.sqrt(n)
    self.weight.data.uniform_(-stdv, stdv)
    if self.bias is not None:
        self.bias.data.uniform_(-stdv, stdv)

def __repr__(self):
    s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
         ', stride={stride}')
    if self.padding != (0,) * len(self.padding):
        s += ', padding={padding}'
    if self.dilation != (1,) * len(self.dilation):
        s += ', dilation={dilation}'
    if self.output_padding != (0,) * len(self.output_padding):
        s += ', output_padding={output_padding}'
    if self.groups != 1:
        s += ', groups={groups}'
    if self.bias is None:
        s += ', bias=False'
    s += ')'
    return s.format(name=self.__class__.__name__, **self.__dict__)

class Conv2d(_ConvNd):
def init(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).init(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
def forward(self, input):
return conv2d_same_padding(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)

custom con2d, because pytorch don’t have “padding=‘same’” option.

def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=1):
input_rows = input.size(2)
filter_rows = weight.size(2)
effective_filter_size_rows = (filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]

input_cols = input.size(3)
filter_cols = weight.size(3)
effective_filter_size_cols = (filter_cols - 1) * dilation[1] + 1
out_cols = (input_cols + stride[1] - 1) // stride[1]

padding_needed = max(0, (out_rows - 1) * stride[0] + effective_filter_size_rows -input_rows)
padding_rows = max(0, (out_rows - 1) * stride[0] +
                    (filter_rows - 1) * dilation[0] + 1 - input_rows)
rows_odd = (padding_rows % 2 != 0)
padding_cols = max(0, (out_cols - 1) * stride[1] +
                   (filter_cols - 1) * dilation[1] + 1 - input_cols)
cols_odd = (padding_cols % 2 != 0)
if rows_odd or cols_odd:
    input = pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride,
              padding=(padding_rows // 2, padding_cols // 2),
              dilation=dilation, groups=groups)

复制代码

2.3 将权重赋给卷积核
  此处才是宝宝们最关心的吧,不慌,这就来了哈,开心(),进入正文了(#.#)。

这里给了一个简单的网络模型(一个固定卷积+3个全连接,全连接是11的Conv2d),代码里我给了注释,宝宝们应该能秒懂滴,(*)!

复制代码
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import dtt_kernel
import util
import paddingSame

定义权重

dtt_weight1 = dtt_kernel.dtt_kernel(64, 2, 8)

class DttNet(nn.Module):
def init(self):
super(DttNet, self).init()

    self.conv1 = paddingSame.Conv2d(2, 64, 8)

# 将权重赋给卷积核
self.conv1.weight = nn.Parameter(dtt_weight1, requires_grad=False)

    self.fc1 = util.fc(64, 512, 1)
    self.fc2 = util.fc(512, 128, 1)
    self.fc3 = util.fc(128, 2, 1, last=True)

def forward(self, x):
    x = self.conv1(x)
    x = self.fc1(x)
    x = self.fc2(x)
    x = self.fc3(x)
    return x

复制代码

2.4 补充我的util类
复制代码
import torch.nn as nn

def conv(in_channels, out_channels, kernel_size, stride=1, dilation=1, batch_norm=True):
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
else:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
nn.ReLU()
)

def fc(in_channels, out_channels, kernel_size, stride=1, bias=True, last=False):
if last:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
)
else:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
复制代码

我的

import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn import init

class _ConvNd(Module):

__constants__ = ['stride', 'padding', 'dilation', 'groups', 'bias',
                 'padding_mode', 'output_padding', 'in_channels',
                 'out_channels', 'kernel_size']

def __init__(self, in_channels, out_channels, kernel_size, stride,
             padding, dilation, transposed, output_padding,
             groups, bias, padding_mode):
    super(_ConvNd, self).__init__()
    if in_channels % groups != 0:
        raise ValueError('in_channels must be divisible by groups')
    if out_channels % groups != 0:
        raise ValueError('out_channels must be divisible by groups')
    self.in_channels = in_channels
    self.out_channels = out_channels
    self.kernel_size = kernel_size
    self.stride = stride
    self.padding = padding
    self.dilation = dilation
    self.transposed = transposed
    self.output_padding = output_padding
    self.groups = groups
    self.padding_mode = padding_mode
    if transposed:
        self.weight = Parameter(torch.Tensor(
            in_channels, out_channels // groups, *kernel_size))
    else:
        self.weight = Parameter(torch.Tensor(
            out_channels, in_channels // groups, *kernel_size))
    if bias:
        self.bias = Parameter(torch.Tensor(out_channels))
    else:
        self.register_parameter('bias', None)
    self.reset_parameters()

def reset_parameters(self):
    init.kaiming_uniform_(self.weight, a=math.sqrt(5))
    if self.bias is not None:
        fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
        bound = 1 / math.sqrt(fan_in)
        init.uniform_(self.bias, -bound, bound)

def extra_repr(self):
    s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
         ', stride={stride}')
    if self.padding != (0,) * len(self.padding):
        s += ', padding={padding}'
    if self.dilation != (1,) * len(self.dilation):
        s += ', dilation={dilation}'
    if self.output_padding != (0,) * len(self.output_padding):
        s += ', output_padding={output_padding}'
    if self.groups != 1:
        s += ', groups={groups}'
    if self.bias is None:
        s += ', bias=False'
    if self.padding_mode != 'zeros':
        s += ', padding_mode={padding_mode}'
    return s.format(**self.__dict__)

def __setstate__(self, state):
    super(_ConvNd, self).__setstate__(state)
    if not hasattr(self, 'padding_mode'):
        self.padding_mode = 'zeros'

class Conv726d(_ConvNd):
def init(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode=‘zeros’):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv726d, self).init(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)

def conv2d_forward(self, input, weight):
    if self.padding_mode == 'circular':
        expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
                            (self.padding[0] + 1) // 2, self.padding[0] // 2)
        return F.conv2d(F.pad(input, expanded_padding, mode='circular'),
                        weight*torch.tensor([[[0, 1, 0], [1, 0, 1], [0, 1, 0]]]).cuda(), self.bias, self.stride,
                        _pair(0), self.dilation, self.groups)
    return F.conv2d(input, weight*torch.tensor([[[0, 1, 0], [1, 0, 1], [0, 1, 0]]]).cuda(), self.bias, self.stride,
                    self.padding, self.dilation, self.groups)

def forward(self, input):
    return self.conv2d_forward(input, self.weight)
  1. 总结
      哇哦,写完了耶,不晓得宝宝们有没得收获呢,o((⊙﹏⊙))o,o((⊙﹏⊙))o。大家不懂的可以再下面留言哟,我会时常关注我家的园子呢。若有不足之处,宝宝们也在留言区吱我一下哟,我们下次再见,┏(^0^)┛┏(^0^)┛。

但行好事 莫问前程

  • 4
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
自定义卷积核并执行卷积操作,需要按照以下步骤进行: 1. 导入必要的库,包括PyTorch库和numpy库。 ```python import torch import numpy as np ``` 2. 定义卷积核权重矩阵,可以手动创建或使用随机数生成器。然后将权重矩阵转换为PyTorch张量,以便在下一步使用。 ```python kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]).astype(np.float32) weights = torch.from_numpy(kernel).unsqueeze(0).unsqueeze(0) ``` 3. 创建输入张量,将其转换为PyTorch张量,并使用unsqueeze函数将其扩展为4D张量。 ```python input_tensor = np.random.rand(1, 1, 5, 5).astype(np.float32) input = torch.from_numpy(input_tensor).unsqueeze(0) ``` 4. 使用PyTorchconv2d函数进行卷积操作。将输入张量和权重矩阵传递给该函数,并指定所需的卷积参数(如步长、边界填充和输出通道数)。 ```python output = torch.nn.functional.conv2d(input, weights, stride=1, padding=0) ``` 5. 输出结果。可以使用PyTorch张量的numpy函数将张量转换为同类型的numpy数组,并使用它来输出卷积操作的结果。 ```python result = output.numpy() print(result) ``` 完整的示例代码如下所示: ```python import torch import numpy as np # 定义卷积核 kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]).astype(np.float32) # 将卷积核转换为PyTorch张量 weights = torch.from_numpy(kernel).unsqueeze(0).unsqueeze(0) # 创建输入张量 input_tensor = np.random.rand(1, 1, 5, 5).astype(np.float32) # 将输入张量转换为PyTorch张量并扩展为4D张量 input = torch.from_numpy(input_tensor).unsqueeze(0) # 执行卷积运算 output = torch.nn.functional.conv2d(input, weights, stride=1, padding=0) # 输出结果 result = output.numpy() print(result) ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值