python-tensorflow构建网络

卷积

#卷积
def conv(batch_input, out_channels, stride):
    with tf.variable_scope("conv"):
        in_channels = batch_input.get_shape()[3]        #channel
        filter = tf.get_variable("filter", [4, 4, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
        padded_input = tf.pad(batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="CONSTANT")
        conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
        return conv

1*1卷积,无bias

# 没有什么特殊功能,就是实现了各个feature map的线性叠加,斜率为filter,偏置项为bias
def conv1x1(batch_input, out_channels):
    with tf.variable_scope("conv1x1"):
        in_channels = batch_input.get_shape()[3]
        filter = tf.get_variable("filter", [1, 1, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
        conv = tf.nn.conv2d(batch_input, filter, [1, 1, 1, 1], padding="VALID")
        return conv     # [N,H,W,C]

1*1卷积,有bias

def conv1x1b(batch_input, out_channels):
    with tf.variable_scope("conv1x1"):
        in_channels = batch_input.get_shape()[3]
        filter = tf.get_variable("filter", [1, 1, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
        bias = tf.get_variable("bias", [out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
        conv = tf.nn.conv2d(batch_input, filter, [1, 1, 1, 1], padding="VALID")
        conv = tf.nn.bias_add(conv, bias)
        return conv     #   [N,H,W,C]

 lrelu

#分为x>=0 和 x<0去讨论
def lrelu(x, a):
    with tf.name_scope("lrelu"):
        x = tf.identity(x)
        return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)

批量归一化

#批量归一化
def batchnorm(input):
    with tf.variable_scope("batchnorm"):
        # this block looks like it has 3 inputs on the graph unless we do this
        input = tf.identity(input)
        channels = input.get_shape()[3]
        offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
        scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))

        mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)      #提取N,H,W,进行求解均值和方差
        variance_epsilon = 1e-5
        normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)     #求解batch normalization
        return normalized

转置卷积

arry = np.arange(16,dtype=np.float32).reshape([2,2,2,2])
with tf.variable_scope('conv1',reuse=True):
    arry = tf.constant(arry)
    filter = tf.get_variable(name='filter',initializer=tf.random_normal(shape=[3,3,3,2],mean=0,stddev=0.05))
    conv1 = tf.nn.conv2d_transpose(arry,filter,[2,4,4,3],strides=[1,1,1,1],padding='VALID')

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    print(sess.run(conv1))

tensorflow 1.x版本的图像插值

arry = np.arange(4).reshape([1,2,2,1])
arry = tf.constant(arry,tf.float32)
arryOut = tf.image.resize_images(arry,size=[4,4],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
with tf.Session() as sess:
    print(sess.run(arryOut))

总变差正则化:https://en.wikipedia.org/wiki/Total_variation_denoising

def total_variation_regularization(images):
    width_var = tf.nn.l2_loss(images[:,:-1,:,:] - images[:,1:,:,:])  #H方向梯度
    height_var = tf.nn.l2_loss(images[:,:,:-1,:] - images[:,:,1:,:])    #W方向梯度
    return tf.add(width_var, height_var)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值