TensorFlow2.0 Broadcast 与 数学操作
import tensorflow as tf
import numpy as np
print(tf.__version__)
x = tf.random.normal([4, 32, 32, 3])
# 使用+号 直接隐式调用 broadcast 右边对齐,1或相等shape才可以
print((x + tf.random.normal([3])).shape)
print((x + tf.random.normal([32, 32, 1])).shape)
print((x + tf.random.normal([4, 1, 1, 1])).shape)
# 出现错误
# print((x + tf.random.normal([1, 4, 1, 1])).shape)
# 显示调用 broadcast
b = tf.broadcast_to(tf.random.normal([4, 1, 1,1]), [4, 32, 32, 3])
print(b.shape)
# broadcast 与 tile
# broadcast 并非真实创建内存空间,节省内存空间
# tile真实创建内存空间
a = tf.ones([3, 4])
a1 = tf.broadcast_to(a, [2, 3, 4])
print(a1)
a2 = tf.expand_dims(a, axis=0)
a2 = tf.tile(a2, [2, 1, 1])
print(a2)
数学操作
Log计算
a = tf.fill([2, 2], 2.0)
b = tf.ones([2, 2])
# log e为底
print(tf.math.log(b))
print(tf.exp(b))
# log 2为底
print(tf.math.log(8.0) / tf.math.log(2.0))
# log 10为底
print(tf.math.log(100.0) / tf.math.log(10.0))
矩阵相乘 最后2个维度需要衔接
即 [4,2,3] 与[4, 3, 5] 对应
即 [4,2] 与[2, 1] 对应
# 矩阵相乘
print(tf.matmul(b,a))
print(b@a)
a = tf.ones([4, 2, 3])
b = tf.fill([4, 3, 5], 2.0)
print(a @ b)
# Y = X @ W + b
x = tf.ones([4, 2])
W = tf.ones([2, 1])
b = tf.constant(0.1)
Y = x@W + b
print(Y)