在使用slim构建模型时导入了normalizer_fn=slim.batch_norm
的BN层出现is_training=False
时精度较低或不对。使用tf.layers.batch_normalization
作为BN层好使。
模型构建代码如下:
with slim.arg_scope([slim.conv2d],
kernel_size=[3, 3],
padding='SAME',
stride=1,
weights_regularizer=slim.l2_regularizer(0.0005),
activation_fn=tf.nn.relu,
normalizer_fn=tf.layers.batch_normalization,
normalizer_params={'training': train}
):
pass
当保存模型可以如下设置
with tf.Graph().as_default():
....model()....
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
var_list = tf.trainable_variables()
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
saver = tf.train.Saver(var_list=var_list, max_to_keep=5)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver.save(sess, PATH)
pass