卷积神经网络(1)卷积算子(代码部分)

实验任务:卷积运算、卷积算子

使用pytorch实现: 

1. 自定义二维卷积算子

import torch
import torch.nn as nn
import numpy as np
#二维卷积算子
class Conv2D(nn.Module):
    def __init__(self,kernel_size):
        super(Conv2D,self).__init__()
        w=torch.tensor(np.array([[0.,1.],[2.,3.]],dtype='float32').reshape([kernel_size,kernel_size]))
        self.weight=torch.nn.Parameter(w,requires_grad=True)

    def forward(self,X):
        u,v=self.weight.shape
        output=torch.zeros([X.shape[0],X.shape[1]-u+1,X.shape[2]-v+1])
        for i in range(output.shape[1]):
            for j in range(output.shape[2]):
                output[:,i,j]=torch.sum(X[:,i:i+u,j:j+v]*self.weight,axis=[1,2])
        return output

torch.manual_seed(100)
inputs=torch.tensor([[[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]]])
conv2d=Conv2D(kernel_size=2)
outputs=conv2d(inputs)
print("input:{},\noutput:{}".format(inputs,outputs))

2. 自定义带步长和零填充的二维卷积算子

#带步长和零填充的二维卷积算子
class Conv2D(nn.Module):
    def __init__(self,kernel_size,stride=1,padding=0):
        super(Conv2D,self).__init__()
        w=torch.tensor(np.array([[0.,1.,2.],[3.,4.,5.],[6.,7.,8.]],dtype='float32').reshape([kernel_size,kernel_size]))
        self.weight=torch.nn.Parameter(w,requires_grad=True)
        self.stride=stride#步长
        self.padding=padding#零填充
    def forward(self,X):
        new_X=torch.zeros([X.shape[0],X.shape[1]+2*self.padding,X.shape[2]+2*self.padding])
        new_X[:,self.padding:X.shape[1]+self.padding,self.padding:X.shape[2]+self.padding]
        u,v=self.weight.shape
        output_w=(new_X.shape[1]-u)//self.stride+1
        output_h=(new_X.shape[2]-v)//self.stride+1
        output=torch.zeros([X.shape[0],output_w,output_h])
        for i in range(0,output.shape[1]):
            for j in range(0,output.shape[1]):
                output[:,i,j]=torch.sum(new_X[:,self.stride*i:self.stride*i+u,self.stride*j:self.stride*j+v]*self.weight,axis=[1,2])
        return output
inputs=torch.randn([2,8,8])
conv2d_padding=Conv2D(kernel_size=3,padding=1)
outputs=conv2d_padding(inputs)
print("When kernel_size=3, padding=1 stride=1, input's shape: {}, output's shape: {}".format(inputs.shape, outputs.shape))
conv2d_stride = Conv2D(kernel_size=3, stride=2, padding=1)
outputs = conv2d_stride(inputs)
print("When kernel_size=3, padding=1 stride=2, input's shape: {}, output's shape: {}".format(inputs.shape, outputs.shape))

3. 实现图像边缘检测

#使用卷积运算完成图像边缘检测任务
import matplotlib.pyplot as plt
from PIL import Image
class Conv2D(nn.Module):
    def __init__(self, kernel_size,stride=1, padding=0):
        super(Conv2D, self).__init__()
        # 设置卷积核参数
        w = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype='float32').reshape((3,3))
        w=torch.from_numpy(w)
        self.weight = torch.nn.Parameter(w, requires_grad=True)
        self.stride = stride
        self.padding = padding

    def forward(self, X):
        # 零填充
        new_X = torch.zeros([X.shape[0], X.shape[1] + 2 * self.padding, X.shape[2] + 2 * self.padding])
        new_X[:, self.padding:X.shape[1] + self.padding, self.padding:X.shape[2] + self.padding] = X
        u, v = self.weight.shape
        output_w = (new_X.shape[1] - u) // self.stride + 1
        output_h = (new_X.shape[2] - v) // self.stride + 1
        output = torch.zeros([X.shape[0], output_w, output_h])
        for i in range(0, output.shape[1]):
            for j in range(0, output.shape[2]):
                output[:, i, j] = torch.sum(
                    new_X[:, self.stride * i:self.stride * i + u, self.stride * j:self.stride * j + v] * self.weight,
                    axis=[1, 2])
        return output


img=Image.open('C:\\Users\\26980\\Desktop\\hehua.jpg').convert('L')
w = np.array(img, dtype='float32')
im = torch.from_numpy(w.reshape((w.shape[0],w.shape[1])))

# 创建卷积算子,卷积核大小为3x3,并使用上面的设置好的数值作为卷积核权重的初始化参数
conv = Conv2D(kernel_size=3, stride=1, padding=0)

# 将读入的图片转化为float32类型的numpy.ndarray
inputs = np.array(im).astype('float32')
print("输入转化为Tensor之前:",inputs)
# 将图片转为Tensor
inputs = torch.as_tensor(inputs)
print("输入升维之前:",inputs)
inputs = torch.unsqueeze(inputs, axis=0)
print("输入升维之后:",inputs)
outputs = conv(inputs)
print(outputs)

# # 可视化结果
plt.subplot(121).set_title('input image', fontsize=15)
img=np.array(img)
plt.imshow(img.astype('uint8'),cmap='gray')
plt.subplot(122).set_title('output feature map', fontsize=15)
plt.imshow(outputs.squeeze().detach().numpy(), cmap='gray')
plt.savefig('conv-vis.pdf')
plt.show()

4. 自定义卷积层算子和汇聚层算子

#多通道卷积层算子
import torch
import torch.nn as nn
class Conv2D(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
        super(Conv2D, self).__init__()
        # 创建卷积核
        weight = torch.zeros([out_channels,in_channels,kernel_size,kernel_size])
        weight= nn.init.constant_(weight.clone().detach().requires_grad_(True),val=1.0)
        self.weight = torch.nn.Parameter(weight)
        # 创建偏置
        bias = torch.zeros([out_channels, 1])
        bias = nn.init.constant_(bias.clone().detach().requires_grad_(True),val=0.0) # 值可调整
        self.bias = torch.nn.Parameter(bias)
        # 步长
        self.stride = stride
        # 补零
        self.padding = padding
        # 输入通道
        self.in_channels = in_channels
        # 输出通道
        self.out_channels = out_channels

    # 基础卷积运算
    def single_forward(self, X, weight):
        # 零填充
        new_X = torch.zeros([X.shape[0], X.shape[1]+2*self.padding, X.shape[2]+2*self.padding])
        new_X[:, self.padding:X.shape[1]+self.padding, self.padding:X.shape[2]+self.padding] = X
        u, v = weight.shape
        output_w = (new_X.shape[1] - u) // self.stride + 1
        output_h = (new_X.shape[2] - v) // self.stride + 1
        output = torch.zeros([X.shape[0], output_w, output_h])
        for i in range(0, output.shape[1]):
            for j in range(0, output.shape[2]):
                output[:, i, j] = torch.sum(
                    new_X[:, self.stride*i:self.stride*i+u, self.stride*j:self.stride*j+v]*weight,
                    axis=[1,2])
        return output

    def forward(self, inputs):
        feature_maps = []
        p=0
        for w, b in zip(self.weight, self.bias): # P个(w,b),每次计算一个特征图Zp
            multi_outs = []
            # 循环计算每个输入特征图对应的卷积结果
            for i in range(self.in_channels):
                single = self.single_forward(inputs[:,i,:,:], w[i])
                multi_outs.append(single)
                # print("Conv2D in_channels:",self.in_channels,"i:",i,"single:",single.shape)
            # 将所有卷积结果相加
            feature_map = torch.sum(torch.stack(multi_outs), axis=0) + b #Zp
            feature_maps.append(feature_map)
            # print("Conv2D out_channels:",self.out_channels, "p:",p,"feature_map:",feature_map.shape)
            p+=1
        # 将所有Zp进行堆叠
        out = torch.stack(feature_maps, 1)
        return out

inputs = torch.tensor([[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
               [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]])
conv2d = Conv2D(in_channels=2, out_channels=3, kernel_size=2)
print("inputs shape:",inputs.shape)
outputs = conv2d(inputs)
print("Conv2D outputs shape:",outputs.shape)

# 比较与torch API运算结果
weight_attr = torch.ones([3,2,2,2])
bias_attr = torch.zeros([3, 1])
bias_attr = bias_attr.clone().detach().requires_grad_(True)
conv2d_torch = nn.Conv2d(in_channels=2, out_channels=3, kernel_size=2,bias=True)
conv2d_torch.weight = torch.nn.Parameter(weight_attr)
outputs_torch = conv2d_torch(inputs)
# 自定义算子运算结果
print('Conv2D outputs:', outputs)
# torch API运算结果
print('nn.Conv2D outputs:', outputs_torch)
#汇聚层算子
import torch
import torch.nn as nn
class Pool2D(nn.Module):
    def __init__(self, size=(2, 2), mode='max', stride=1):
        super(Pool2D, self).__init__()
        # 汇聚方式
        self.mode = mode
        self.h, self.w = size
        self.stride = stride

    def forward(self, x):
        output_w = (x.shape[2] - self.w) // self.stride + 1
        output_h = (x.shape[3] - self.h) // self.stride + 1
        output = torch.zeros([x.shape[0], x.shape[1], output_w, output_h])
        # 汇聚
        for i in range(output.shape[2]):
            for j in range(output.shape[3]):
                # 最大汇聚
                if self.mode == 'max':
                    value_m = max(torch.max(
                        x[:, :, self.stride * i:self.stride * i + self.w, self.stride * j:self.stride * j + self.h],
                        axis=3).values[0][0])
                    output[:, :, i, j] = value_m.clone().detach().requires_grad_(True)

                    # 平均汇聚
                elif self.mode == 'avg':
                    output[:, :, i, j] = torch.mean(
                        x[:, :, self.stride * i:self.stride * i + self.w, self.stride * j:self.stride * j + self.h],
                    )


        return output

inputs = torch.tensor([[[[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])
import time
time1 = time.time()
pool2d = Pool2D(stride=2)
outputs = pool2d(inputs)
time2 = time.time()
print("input: {}, \noutput: {}".format(inputs.shape, outputs.shape))
#比较Maxpool2D和torch API运算结果
time3 = time.time()
maxpool2d_torch = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
outputs_torch = maxpool2d_torch(inputs)
time4 = time.time()
# 自定义算子运算结果
print('Maxpool2D outputs:', outputs)
# torch API运算结果
print('nn.Maxpool2D outputs:', outputs_torch)

print("自定义汇聚算子时间耗费:",time2-time1)
print("框架算子时间耗费:",time4-time3)

# 比较Avgpool2D与torch API运算结果
avgpool2d_torch = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
outputs_torch = avgpool2d_torch(inputs)
pool2d = Pool2D(mode='avg', stride=2)
outputs = pool2d(inputs)

# 自定义算子运算结果
print('Avgpool2D outputs:', outputs)
# torch API运算结果
print('nn.Avgpool2D outputs:', outputs_torch)

5. 分别用自定义卷积算子和torch.nn.Conv2d()编程实现下面的卷积运算

import torch
import torch.nn as nn

class Conv2D(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, weight=[], bias=[]):
        super(Conv2D, self).__init__()
        # 创建偏置
        self.weight = nn.Parameter(weight)
        self.bias = nn.Parameter(bias)
        # 步长
        self.stride = stride
        # 补零
        self.padding = padding
        # 输入通道
        self.in_channels = in_channels
        # 输出通道
        self.out_channels = out_channels

    # 基础卷积运算
    def single_forward(self, X, weight):
        # 零填充
        new_X = torch.zeros([X.shape[0], X.shape[1]+2*self.padding, X.shape[2]+2*self.padding])
        new_X[:, self.padding:X.shape[1]+self.padding, self.padding:X.shape[2]+self.padding] = X
        u, v = weight.shape
        output_w = (new_X.shape[1] - u) // self.stride + 1
        output_h = (new_X.shape[2] - v) // self.stride + 1
        output = torch.zeros([X.shape[0], output_w, output_h])
        for i in range(0, output.shape[1]):
            for j in range(0, output.shape[2]):
                output[:, i, j] = torch.sum(
                    new_X[:, self.stride*i:self.stride*i+u, self.stride*j:self.stride*j+v]*weight,
                    axis=[1,2])
        return output

    def forward(self, inputs):
        feature_maps = []
        p=0
        for w, b in zip(self.weight, self.bias): # P个(w,b),每次计算一个特征图Zp
            multi_outs = []
            # 循环计算每个输入特征图对应的卷积结果
            for i in range(self.in_channels):
                single = self.single_forward(inputs[:,i,:,:], w[i])
                multi_outs.append(single)
                # print("Conv2D in_channels:",self.in_channels,"i:",i,"single:",single.shape)
            # 将所有卷积结果相加
            feature_map = torch.sum(torch.stack(multi_outs), axis=0) + b #Zp
            feature_maps.append(feature_map)
            # print("Conv2D out_channels:",self.out_channels, "p:",p,"feature_map:",feature_map.shape)
            p+=1
        # 将所有Zp进行堆叠
        out = torch.stack(feature_maps, 1)
        return out

inputs = torch.tensor([[[0.0, 1.0, 1.0, 0.0, 2.0], [2.0, 2.0, 2.0, 2.0, 1.0], [1.0, 0.0, 0.0, 2.0, 1.0],
                        [0.0, 1.0, 1.0, 0.0, 0.0], [1.0, 2.0, 0.0, 0.0, 2.0]],
                       [[1.0, 0.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 2.0, 0.0], [1.0, 2.0, 1.0, 2.0, 1.0],
                        [1.0, 0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 1.0, 1.0, 1.0]],
                       [[2.0, 1.0, 2.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0, 0.0], [0.0, 2.0, 1.0, 0.0, 1.0],
                        [0.0, 1.0, 2.0, 2.0, 2.0], [2.0, 1.0, 0.0, 0.0, 1.0]]]).reshape([1, 3, 5, 5])
# 卷积层1
w1 = torch.tensor(
    [[[-1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0]], [[-1.0, -1.0, 0.0], [0.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
     [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, -1.0, -1.0]]], dtype=torch.float32).reshape([1, 3, 3, 3])
bias1 = torch.ones([3, 1]).clone().detach()
conv2d1 = Conv2D(in_channels=3, out_channels=1, kernel_size=3, stride=2, padding=1, weight=w1, bias=bias1)
outputs1 = conv2d1(inputs)
print("卷积层1输出:", outputs1)
# 卷积层2
w2 = torch.tensor(
    [[[1.0, 1.0, -1.0], [-1.0, -1.0, 1.0], [0.0, -1.0, 1.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, -1.0], [-1.0, 1.0, 0.0]],
     [[-1.0, 0.0, 0.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 0.0]]], dtype=torch.float32).reshape([1, 3, 3, 3])
bias2 = torch.zeros([3, 1]).clone().detach()
conv2d2 = Conv2D(in_channels=3, out_channels=1, kernel_size=3, stride=2, padding=1, weight=w2, bias=bias2)
outputs2 = conv2d2(inputs)
print("卷积层2输出:", outputs2)

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值