卷积神经网络中的卷积操作

利用张量操作实现卷积

# 利用张量操作实现卷积
import torch
a = torch.arange(16).view(4, 4)
print(a)
'''
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11],
        [12, 13, 14, 15]])
'''
b = a.unfold(0, 3, 1)  # 按照行,以每三个元素,跨步为1进行展开
print(b)
'''
tensor([[[ 0,  4,  8],
         [ 1,  5,  9],
         [ 2,  6, 10],
         [ 3,  7, 11]],

        [[ 4,  8, 12],
         [ 5,  9, 13],
         [ 6, 10, 14],
         [ 7, 11, 15]]])
'''
print(b.shape)  # torch.Size([2, 4, 3])
c = b.unfold(1, 3, 1)  # 对b按照列,以每三个元素,跨步为1进行展开(c即为3x3滑动窗展开结果)
print(c)
'''
tensor([[[[ 0,  1,  2],
          [ 4,  5,  6],
          [ 8,  9, 10]],

         [[ 1,  2,  3],
          [ 5,  6,  7],
          [ 9, 10, 11]]],


        [[[ 4,  5,  6],
          [ 8,  9, 10],
          [12, 13, 14]],

         [[ 5,  6,  7],
          [ 9, 10, 11],
          [13, 14, 15]]]])
'''
print(c.shape)  # torch.Size([2, 2, 3, 3])

实现CNN中完整的卷积

实现标准的带补零和跨步设置的4层卷积层操作:

1、conv.py

# 实现标准的带补零和跨步设置的4层卷积层操作
import torch


def conv2d(x, weight, bias, stride, pad):  # x: 样本变量, weight:卷积核, bias: 偏置值,stride: 跨步, pad: 补零尺寸
    n, c, h_in, w_in = x.shape  # 批样本量、输入通道数、长、宽
    d, c, k, j = weight.shape  # 卷积核大小:输出通道数、输入通道数、卷积核尺寸
    x_pad = torch.zeros(n, c, h_in + 2 * pad, w_in + 2 * pad).to(x.device)
    x_pad[:, :, pad:-pad, pad:-pad] = x  # 对输入进行补零操作

    # 按滑动窗口展开
    x_pad = x_pad.unfold(2, k, stride)
    x_pad = x_pad.unfold(3, j, stride)

    # 按照滑动窗口相乘,并将所有输入通道上卷积结果累加
    out = torch.einsum(
        'nchwkj,dckj->ndhw',
            x_pad, weight
    )
    out = out + bias.view(1, -1, 1, 1) # 添加偏置值
    return out

2、test_conv.py

import torch.nn.functional as F
import torch
from conv import conv2d
x = torch.randn(2, 3, 5, 5, requires_grad=True)
w = torch.randn(4, 3, 3, 3, requires_grad=True)
b = torch.randn(4, requires_grad=True)
stride = 2
pad = 2
torch_out = F.conv2d(x, w, b, stride, pad)
my_out = conv2d(x, w, b, stride, pad)

# 二者数值最大误差不超过1e-5
print('torch_out == my_out :', torch.allclose(torch_out, my_out, atol=1e-05))
'''
torch_out == my_out : True
'''

# 比较两种方法反转梯度是否接近
grad_out = torch.randn(*torch_out.shape)  # 输出的反转梯度
grad_x = torch.autograd.grad(torch_out, x, grad_out, retain_graph=True)[0]
my_grad_x = torch.autograd.grad(my_out, x, grad_out, retain_graph=True)[0]
print('grad_x == my_grad_x :', torch.allclose(grad_x, my_grad_x, atol=1e-05))
'''
grad_x == my_grad_x : True
'''

# 求对于权重的梯度
grad_w = torch.autograd.grad(torch_out, w, grad_out, retain_graph=True)[0]
my_grad_w = torch.autograd.grad(my_out, w, grad_out, retain_graph=True)[0]
print('grad_w == my_grad_w :', torch.allclose(grad_w, my_grad_w, atol=1e-05))
'''
grad_w == my_grad_w : True
'''

# 两种对于偏置的反转梯度是否接近
grad_b = torch.autograd.grad(torch_out, b, grad_out, retain_graph=True)[0]
my_grad_b = torch.autograd.grad(my_out, b, grad_out, retain_graph=True)[0]
print('grad_b == my_grad_b :', torch.allclose(grad_b, my_grad_b, atol=1e-05))
'''
grad_b == my_grad_b : True
'''
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值