通过一个完整的实例来演示卷积函数的用法

import tensorflow as tfw
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#tf.nn.convolution
#计算N维卷积的和
input_data = tfw.Variable(np.random.rand(10,9,9,3),dtype = np.float32)
filter_data = tfw.Variable(np.random.rand(2,2,3,2),dtype = np.float32)
y = tfw.nn.convolution(input_data,filter_data,strides = [1,1],padding = 'SAME')
print ('1.tfw.nn.convolution : ',y)
#1.tfw.nn.convolution :  Tensor("convolution:0", shape=(10, 9, 9, 2), dtype=float32)


1.tfw.nn.convolution :  Tensor("convolution:0", shape=(10, 9, 9, 2), dtype=float32)
#tfw.nn.conv2d
#对一个四维的输入数据input和四维的卷积核filter进行操作,然后对输入的数据进行二维的卷积操作,得到卷积之后的结果
input_data = tfw.Variable(np.random.rand(10,9,9,3),dtype = np.float32)
filter_data = tfw.Variable(np.random.rand(2,2,3,2),dtype = np.float32)
y = tfw.nn.conv2d(input_data,filter_data,strides = [1,1,1,1],padding = 'SAME')
print ('2.tfw.nn.conv2d : ',y)
#2.tfw.nn.conv2d :  Tensor("Conv2D:0", shape=(10, 9, 9, 2), dtype=float32)

2.tfw.nn.conv2d :  Tensor("Conv2D:0", shape=(10, 9, 9, 2), dtype=float32)
#tfw.nn.depthwise_conv2d
#input的数据维度是[batch,in_height,in_wight,in_channels]
#卷积的维度是[filter_height,filter_height,in_channels,channel_multiplier],然后将所有结果进行汇总,
#输出通道的总数是in_channels*channel_multiplier
input_data = tfw.Variable(np.random.rand(10,9,9,3),dtype = np.float32)
filter_data = tfw.Variable(np.random.rand(2,2,3,2),dtype = np.float32)
y = tfw.nn.depthwise_conv2d(input_data,filter_data,strides = [1,1,1,1],padding = 'SAME')
print ('3.tfw.nn.depthwise_conv2d : ',y)
#3.tfw.nn.depthwise_conv2d :  Tensor("depthwise:0", shape=(10, 9, 9, 6), dtype=float32)


3.tfw.nn.depthwise_conv2d :  Tensor("depthwise:0", shape=(10, 9, 9, 6), dtype=float32)
#tfw.nn.separable_conv2d
#利用几个分离的卷积核去做卷积,在该函数中,将应用一个二维的卷积核,在每个通道上,以深度channel_multiplier进行卷积
input_data = tfw.Variable(np.random.rand(10,9,9,3),dtype = np.float32)
depthwise_filter = tfw.Variable(np.random.rand(2,2,3,5),dtype = np.float32)
poinwise_filter = tfw.Variable(np.random.rand(1,1,15,20),dtype = np.float32)
#out_channels>=in_channels*channel_multiplier

y = tfw.nn.separable_conv2d(input_data,depthwise_filter = depthwise_filter ,pointwise_filter = poinwise_filter,
                            strides = [1,1,1,1],padding = 'SAME')
print ('4.tfw.nn.separable_conv2d : ',y)
#4.tfw.nn.separable_conv2d :  Tensor("separable_conv2d:0", shape=(10, 9, 9, 20), dtype=float32)

4.tfw.nn.separable_conv2d :  Tensor("separable_conv2d:0", shape=(10, 9, 9, 20), dtype=float32)
#计算Atrous卷积,又称孔卷积或者扩张卷积
input_data = tfw.Variable(np.random.rand(1,5,5,1),dtype = np.float32)
filters = tfw.Variable(np.random.rand(3,3,1,1),dtype = np.float32)
y = tfw.nn.atrous_conv2d(input_data,filters,2,padding = 'SAME')
print ('5.tfw.nn.atrous_conv2d : ',y)
#5.tfw.nn.atrous_conv2d :  Tensor("convolution_1/BatchToSpaceND:0", shape=(1, 5, 5, 1), dtype=float32)


5.tfw.nn.atrous_conv2d :  Tensor("convolution_1/BatchToSpaceND:0", shape=(1, 5, 5, 1), dtype=float32)
#在解卷积网络(deconvlutional network)中有时被称为“反卷积”,但实际上是conv2d 的转置,
#而不是实际的反卷积
x = tfw.random_normal(shape = [1,3,3,1])
kernal = tfw.random_normal(shape = [2,2,3,1])
y = tfw.nn.conv2d_transpose(x, kernal,output_shape = [1,5,5,3],strides = [1,2,2,1],
                           padding = 'SAME')
print ('6.tfw.nn.conv2d_reanspose : ',y)
#6.tfw.nn.conv2d_reanspose :  Tensor("conv2d_transpose:0", shape=(1, 5, 5, 3), dtype=float32)


6.tfw.nn.conv2d_reanspose :  Tensor("conv2d_transpose:0", shape=(1, 5, 5, 3), dtype=float32)
#与二维卷积类似,用来计算给定三维输入和过滤器的情况下的一维卷积
#不同的是,他的输入维度为5,[batch,in_width,in_channels]
#卷积核的维度也是5,[filter_height,in_channel,channel_multiplier]
#stride是一个正整数,代表每一步的步长
input_data = tfw.Variable(np.random.rand(1,5,1),dtype = np.float32)
filters = tfw.Variable(np.random.rand(3,1,3),dtype = np.float32)
y = tfw.nn.conv1d(input_data,filters,stride = 2,padding = 'SAME')
print('7.tfw.nn.conv1d : ',y)
#7.tfw.nn.conv1d :  Tensor("conv1d/Squeeze:0", shape=(1, 3, 3), dtype=float32)


7.tfw.nn.conv1d :  Tensor("conv1d/Squeeze:0", shape=(1, 3, 3), dtype=float32)
#与二维卷积类似,用来计算给定五维输入和过滤器的情况下的三维卷积
#不同的是,他的输入维度为5,[batch,in_depth,in_height,in_width,in_channels]
#卷积核的维度也是5,[filter_depth,filter_height,in_channel,channel_multiplier]
#stride相较二维卷积多了一维,变为[strides_batch,strides_depth,strides_height,strides_width,strides_channel],
#必须保证strides[0] = strides[4] = 1
input_data = tfw.Variable(np.random.rand(1,2,5,5,1),dtype = np.float32)
filters = tfw.Variable(np.random.rand(2,3,3,1,3),dtype = np.float32)
y = tfw.nn.conv3d(input_data ,filters,strides = [1,2,2,1,1],padding = 'SAME')
print('8.tfw.nn.conv3d : ',y)
#8.tfw.nn.conv3d :  Tensor("Conv3D:0", shape=(1, 1, 3, 5, 3), dtype=float32)


8.tfw.nn.conv3d :  Tensor("Conv3D:0", shape=(1, 1, 3, 5, 3), dtype=float32)
#与conv2d_reanspose二维反卷积类似
#在解卷积网络(deconvlutional network)中有时被称为“反卷积”,但实际上是conv2d 的转置,
#而不是实际的反卷积
x = tfw.random_normal(shape = [2,1,3,3,1])
kernal = tfw.random_normal(shape = [2,2,2,3,1])
y = tfw.nn.conv3d_transpose(x, kernal,output_shape = [2,1,5,5,3],strides = [1,2,2,2,1],
                           padding = 'SAME')
print ('9.tfw.nn.conv3d_reanspose : ',y)
9.tfw.nn.conv3d_reanspose :  Tensor("conv3d_transpose:0", shape=(2, 1, 5, 5, 3), dtype=float32)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值