TensorFlow 中的get_collection()参数讲解及用法

tf.get_collection()

此函数有两个参数,key和scope。

tf.get_collection会列出key里所有的值。wAAACH5BAEKAAAALAAAAAABAAEAAAICRAEAOw==

tf.GraphKeys的点后可以跟很多类,
比如VARIABLES类(包含所有variables),
比如REGULARIZATION_LOSSES。

具体tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)的使用:

def easier_network(x, reg):
  """ A network based on tf.contrib.learn, with input `x`. """
  with tf.variable_scope('EasyNet'):
     out = layers.flatten(x)
     out = layers.fully_connected(out,
                                  num_outputs=200,
                                  weights_initializer = layers.xavier_initializer(uniform=True),
                                  weights_regularizer = layers.l2_regularizer(scale=reg),
                                  activation_fn = tf.nn.tanh)
     out = layers.fully_connected(out,
                                  num_outputs=200,
                                  weights_initializer = layers.xavier_initializer(uniform=True),
                                  weights_regularizer = layers.l2_regularizer(scale=reg),
                                  activation_fn = tf.nn.tanh)
     out = layers.fully_connected(out,
                                  num_outputs=10, # Because there are ten digits!
                                  weights_initializer = layers.xavier_initializer(uniform=True),
                                  weights_regularizer = layers.l2_regularizer(scale=reg),
                                  activation_fn = None)
     return out

 def main(_):
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
  x = tf.placeholder(tf.float32, [None, 784])
  y_ = tf.placeholder(tf.float32, [None, 10])

  # Make a network with regularization
  y_conv = easier_network(x, FLAGS.regu)
  weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'EasyNet')
  print("")
  for w in weights:
     shp = w.get_shape().as_list()
     print("- {} shape:{} size:{}".format(w.name, shp, np.prod(shp)))
     print("")
     reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'EasyNet')
  for w in reg_ws:
     shp = w.get_shape().as_list()
     print("- {} shape:{} size:{}".format(w.name, shp, np.prod(shp)))
     print("")

  # Make the loss function `loss_fn` with regularization.
  cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
  loss_fn = cross_entropy + tf.reduce_sum(reg_ws)
  train_step = tf.train.AdamOptimizer(1e-4).minimize(loss_fn)

main()

>>>   - EasyNet/fully_connected/weights:0 shape:[784, 200] size:156800
      - EasyNet/fully_connected/biases:0 shape:[200] size:200
      - EasyNet/fully_connected_1/weights:0 shape:[200, 200] size:40000
      - EasyNet/fully_connected_1/biases:0 shape:[200] size:200
      - EasyNet/fully_connected_2/weights:0 shape:[200, 10] size:2000
      - EasyNet/fully_connected_2/biases:0 shape:[10] size:10

      - EasyNet/fully_connected/kernel/Regularizer/l2_regularizer:0 shape:[] size:1.0
      - EasyNet/fully_connected_1/kernel/Regularizer/l2_regularizer:0 shape:[] size:1.0
      - EasyNet/fully_connected_2/kernel/Regularizer/l2_regularizer:0 shape:[] size:1.0
  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值