# 把slim代码改成layers

## 调查默认值

然后，顺着再找出所有的

with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:


## 复现

from functools import partial

....

my_dense_layer = partial(
tf.layers.dense, activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale))

....

hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")

# layers的层定义

## 类定义

import tensorflow as tf
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = tf.layers.Flatten()(x)

## 函数定义

import tensorflow as tf
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = tf.layers.flatten(x)

# 奇葩参数

## 1

slim.separable_conv2d(inputs=net, num_outputs=None
 num_outputs=None，也就是只进行depthwise，也就是1x1的卷积，输出通道数为num_filters_in * depth_multiplier

num_outputs: The number of pointwise convolution output filters. If is None, then we skip the pointwise convolution stage.

depth_multiplier: The number of depthwise convolution output channels for  each input channel. The total number of depthwise convolution output  channels will be equal to num_filters_in * depth_multiplier.

## 2

slim.separable_conv2d(inputs=net, num_outputs=None, kernel_size=conv_def.kernel, stride=conv_def.stride,
depth_multiplier=1.0, normalizer_fn=slim.batch_norm

doc中原文

if normalizer_fn is None,    it adds bias to the result, creating a variable called 'biases', otherwise,    the normalizer_fn is applied.

09-15 1万+