1.tf.constant
创建一个常张量,传入list或者数值来填充
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
# 3-D tensor `a`
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
[21. 22.]
[23. 24.]]]
2.指数衰退的学习率
global_step = tf.Variable(0)
# 通过exponential_decay函数生成学习率。
learning_rate = tf.train.exponential_decay(
0.1, global_step, 100, 0.96, staircase=True)
# 使用指数衰减的学习率。在minimize函数中传入global_step将自动更新
# global_step参数,从而使得学习率也得到相应更新。
learning_step = tf.train.GradientDescentOptimizer(learning_rate)\
.minimize(...my loss..., global_step=global_step)
3.防止过拟合的正则化
w= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w)
loss = tf.reduce_mean(tf.square(y_ - y)) +
tf.contrib.layers.l2_regularizer(lambda)(w)
l1正则化和l2正则化
weights = tf.constant([[1.0, -2.0], [-3.0, 4.0]])
with tf.Session() as sess:
# 输出为(|1|+|-2|+|-3|+|4|)×0.5=5。其中0.5为正则化项的权重。
print sess.run(tf.contrib.layers.l1_regularizer(.5)(weights))
# 输出为(1+4+9+16)*(0.5/2)=7.5
print sess.run(tf.contrib.layers.l2_regularizer(.5)(weights))