tensorflow-易错函数

1. tf.assign()函数说明:

def assign(ref, value, validate_shape=None, use_locking=None, name=None):
  """Update 'ref' by assigning 'value' to it.

  This operation outputs a Tensor that holds the new value of 'ref' after
    the value has been assigned. This makes it easier to chain operations
    that need to use the reset value.

  Args:
    ref: A mutable `Tensor`.
      Should be from a `Variable` node. May be uninitialized.
    value: A `Tensor`. Must have the same type as `ref`.
      The value to be assigned to the variable.
    validate_shape: An optional `bool`. Defaults to `True`.
      If true, the operation will validate that the shape
      of 'value' matches the shape of the Tensor being assigned to.  If false,
      'ref' will take on the shape of 'value'.
    use_locking: An optional `bool`. Defaults to `True`.
      If True, the assignment will be protected by a lock;
      otherwise the behavior is undefined, but may exhibit less contention.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` that will hold the new value of 'ref' after
      the assignment has completed.
  """
  if ref.dtype._is_ref_dtype:
    return gen_state_ops.assign(
        ref, value, use_locking=use_locking, name=name,
        validate_shape=validate_shape)
  return ref.assign(value)

注意:

1. 只有tf.assign()操作完成以后,张量才能拥有new value

2.参数validate_shape默认值为True:old value的shape必须与new value的shape相同,否则会报错。如下:

import tensorflow as tf
a = tf.Variable([10, 20])
b = tf.assign(a, [20, 30,1])
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print("run a : ",sess.run(a))
    print("run b : ",sess.run(b))
    print("run a again : ",sess.run(a))
#由于tf.assign()的参数validate_shape的默认值True且[20,30,1]与[10,20]的shape不一样
#报错:ValueError: Dimension 0 in both shapes must be equal, but are 2 and 3. Shapes #   are [2] and [3]. for 'Assign' (op: 'Assign') with input shapes: [2], [3].



import tensorflow as tf
a = tf.Variable([10, 20])
b = tf.assign(a, [20, 30,1],validate_shape=False)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print("run a : ",sess.run(a))
    print(a)
    print("run b : ",sess.run(b))
    print("run a again : ",sess.run(a))
out:
run a :  [10 20]
<tf.Variable 'Variable:0' shape=(2,) dtype=int32_ref>
run b :  [20 30  1]
run a again :  [20 30  1]



a = tf.Variable([10, 20])
b = tf.assign(a, [20, 30])
c = b + [10, 20]
with tf.Session() as sess:
     sess.run(tf.global_variables_initializer())
     print(sess.run(a)) # => [10 20] 
     print(sess.run(c)) # => [30 50] 运行c的时候,由于c中含有b,所以b也被运行了
     print(sess.run(a)) # => [20 30]

#tf.assing()未执行,ref不更新新


2. tf.slice()


import tensorflow as tf

sess = tf.Session()
input = tf.constant([[[1, 1, 1], [2, 2, 2]],
                     [[3, 3, 3], [4, 4, 4]],
                     [[5, 5, 5], [6, 6, 6]]])
data = tf.slice(input, [1, 0, 0], [-1, 1, 3])
print(sess.run(data))
print('+++++++++++++++++')

data = tf.slice(input, [1, 0, 0], [1, 2, 3])
print(sess.run(data))
print('+++++++++++++++++')

data = tf.slice(input, [1, 0, 0], [2, 1, 3])
print(sess.run(data))
print('+++++++++++++++++')

data = tf.slice(input, [1, 0, 0], [2, 2, 2])
print(sess.run(data))
print('+++++++++++++++++')

data = tf.slice(input, [1, 0, 0], [2, 2, 0])
print(sess.run(data))
print('+++++++++++++++++')

out:
[[[3 3 3]]

 [[5 5 5]]]
+++++++++++++++++
[[[3 3 3]
  [4 4 4]]]
+++++++++++++++++
[[[3 3 3]]

 [[5 5 5]]]
+++++++++++++++++
[[[3 3]
  [4 4]]

 [[5 5]
  [6 6]]]
+++++++++++++++++
[]
+++++++++++++++++

2.tf.reduce_max, tf.transpose()  

import tensorflow as tf

word_mat = tf.constant([
                        [[0,0,0],[4,1,4],[7,3,9]],
                        [[1,2,3],[30,5,6],[7,8,9]],
                        [[1,3,3],[7,5,6],[2,8,9]]
                       ],dtype=tf.float32)
#返回所有张量中的最大值
o = tf.reduce_max(word_mat)
o1 = tf.reduce_max(word_mat,axis=0)
o2 = tf.transpose(word_mat,[0,2,1])

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(o))

    print(sess.run(o1))

    print(sess.run(o2))


out:
30.0
[[ 1.  3.  3.]
 [30.  5.  6.]
 [ 7.  8.  9.]]
[[[ 0.  4.  7.]
  [ 0.  1.  3.]
  [ 0.  4.  9.]]

 [[ 1. 30.  7.]
  [ 2.  5.  8.]
  [ 3.  6.  9.]]

 [[ 1.  7.  2.]
  [ 3.  5.  8.]
  [ 3.  6.  9.]]]

 3.tf.tile()

word_mat = tf.constant([
                        [[0,0,0]],
                        [[1,2,3]],
                        [[1,3,3]]
                       ],dtype=tf.float32)

o = tf.tile(word_mat,[1,3,2])


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(o))

out:
[[[0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0.]
  [0. 0. 0. 0. 0. 0.]]

 [[1. 2. 3. 1. 2. 3.]
  [1. 2. 3. 1. 2. 3.]
  [1. 2. 3. 1. 2. 3.]]

 [[1. 3. 3. 1. 3. 3.]
  [1. 3. 3. 1. 3. 3.]
  [1. 3. 3. 1. 3. 3.]]]

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值