numpy手动实现反卷积代码

反卷积代码

import numpy as np
import torch

def zy_deconv(img, in_channels, out_channels, kernels,bias, stride=1, padding=0,output_padding=0):
    #得到参数
    N, C, H, W = img.shape
    kc,kc_in,kh, kw = kernels.shape
    p = padding

    #间隔填充
    if  stride>1:
       gap_img=np.zeros([N,in_channels,stride*H-1,stride*W-1])
       for n in range(N):
           for in_c in range(in_channels):
               for  x in range(H):
                   for y in range(W):
                        gap_img[n][in_c][stride*x][stride*y]=img[n][in_c][x][y]
       N, C, H, W = gap_img.shape
       img=gap_img


    #四周填充
    p=kh-p-1
    if p:
        img = np.pad(img, ((0, 0), (0, 0), (p, p), (p, p)), 'constant')  # padding along with all axis

    out_h = (H + 2 * p - kh) + 1
    out_w = (W + 2 * p - kw) + 1

    #右边和下方填充
    if output_padding:
        temp = np.zeros([N,in_channels, H+2*p+output_padding, W+2*p+output_padding])
        #先将原有的数据填入
        for n in range(N):
             for in_c in range(in_channels):
                for  x in range(H+2*p):
                   for y in range(W+2*p):
                     temp[n][in_c][x][y] =img[n][in_c][x][y]

                for  a  in range(output_padding):
                    #对下方与右方填入output_padding行(列)的0
                    for  i in range (W+2*p) :
                        temp[n][in_c][H+2*p+a][i]=0
                    for  j  in range (H+2*p+output_padding) :
                        temp[n][in_c][j][W+2*p+a]=0
        img=temp
        out_h=  out_h+output_padding
        out_w=  out_w+output_padding

    #卷积
    outputs = np.zeros([N, out_channels, out_h, out_w])
    for n in range(N):
        for out_c in range(out_channels):
            for in_c in range(in_channels):
                for h in range(out_h):
                    for w in range(out_w):
                             for x in range(kh):
                                 for y in range(kw):
                                     #这里的矩阵旋转了180度
                                    outputs[n][out_c][h][w] += img[n][in_c][h + x][w + y] * kernels[in_c][out_c][kh-x-1][kw-y-1]
            #添加偏置
            if in_c == in_channels - 1:
                            outputs[n][out_c][:][:] += bias[out_c]

    #转化为tensor格式,内部数据为float32的形式
    outputs = torch.tensor(outputs, dtype=torch.float32)
    return outputs


if __name__ == '__main__':
    img = np.asarray(
        [[
            [
                [1, 2, 3],
                [6, 5, 4],
                [7, 8, 9]
            ],
            [
                [1, 2, 3],
                [6, 5, 4],
                [7, 8, 9]
            ],
            [
                [1, 2, 3],
                [6, 5, 4],
                [7, 8, 9]
            ],
        ]]
    )

    kernels = np.asarray(
          [[[[ 7.1967e-02,  3.0717e-02,  1.3104e-01],
                    [ 4.7125e-02,  6.7723e-02, -2.1911e-01],
                    [-1.8075e-04, -6.7567e-02, -1.5927e-02]],
                   [[-5.2011e-02,  1.8606e-01,  1.4314e-01],
                    [ 7.3310e-02, -9.1761e-02,  1.6812e-01],
                    [-2.3355e-01,  1.2247e-01, -6.2335e-02]]],
                  [[[ 7.3672e-02,  2.0140e-01, -1.5706e-01],
                    [ 8.9183e-02, -2.2074e-01, -3.8826e-02],
                    [-4.7068e-03,  7.9847e-02, -5.4270e-02]],
                   [[-9.3060e-02, -3.0037e-02, -4.0801e-02],
                    [ 1.8086e-02,  7.1836e-02, -7.5930e-02],
                    [-1.5923e-02, -1.9815e-01, -1.6305e-01]]],
                  [[[-1.3977e-01, -2.0699e-01,  7.0780e-02],
                    [-1.4483e-01,  9.8975e-02, -1.1932e-01],
                    [-1.3956e-02,  2.1206e-01, -1.3758e-01]],
                   [[ 5.5005e-02, -6.0732e-02, -2.0449e-02],
                    [-1.4679e-01, -9.1417e-03,  1.6225e-01],
                    [-1.1387e-01, -2.5464e-02, -1.8694e-01]]]]
    )

    bias = np.asarray(
            [-0.0722, -0.1841]
    )



    in_channels = 3
    out_channels = 2

    outputs = zy_deconv(img, in_channels, out_channels, kernels, bias,stride=2, padding=1, output_padding=1)
    print(outputs)

输出:
在这里插入图片描述
与pytorch自带反卷积对比:

from __future__ import print_function, division
import torch
import os
import numpy as np
import torch.nn as nn
import zy_convTranspose2d

img = np.asarray(
        [[
            [
                [1, 2, 3],
                [6, 5, 4],
                [7, 8, 9]
            ],
            [
                [1, 2, 3],
                [6, 5, 4],
                [7, 8, 9]
            ],
            [
                [1, 2, 3],
                [6, 5, 4],
                [7, 8, 9]
            ],
        ]]
)

#将img转化为tensor类型
img= torch.tensor(img, dtype=torch.float32)
deconv= nn.ConvTranspose2d(3, 2, 3, 2, 1,output_padding=1)#输入通道,输出通道,卷积核大小,步长,padding,output_padding

#进行反卷积操作
out=deconv(img)
print("卷积核:")
print(deconv.weight)
print("偏置:")
print(deconv.bias)
print("pytorch自带反卷积函数运行结果:")
print(out)

#提取出卷积核与偏置矩阵
kernels=deconv.weight
kernals=kernels.detach().numpy()
bias=deconv.bias
bias=bias.detach().numpy()

in_channels = 3
out_channels = 2

#将提取出来的卷积核与偏置矩阵代入自编的卷积函数中,对比自编函数与pytorch自带函数的结果
outputs = zy_convTranspose2d.zy_deconv(img, in_channels, out_channels, kernels,bias, stride=2, padding=1, output_padding=1)
print("自编反卷积函数运行结果:")
print(outputs)

在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值