三十一.Tensorflow2常用函数总结

1.创建张量

import tensorflow as tf
a = tf.constant([1,5],dtype=tf.int64)
print(a)
输出:
tf.Tensor([1 5], shape=(2,), dtype=int64)

2.将numpy数据类型转为tensor数据类型

import numpy as np
a = np.arange(0,5)
b = tf.convert_to_tensor(a,dtype=tf.int64)
print(a)
print(b)
输出:
[0 1 2 3 4]
tf.Tensor([0 1 2 3 4], shape=(5,), dtype=int64)

3.常用张量创建函数

#创建一个全为0的张量
print(tf.zeros([2,3]))
输出:
tf.Tensor(
[[0. 0. 0.]
 [0. 0. 0.]], shape=(2, 3), dtype=float32)
#创建一个全为1的张量
print(tf.ones(4,dtype=tf.int64))
输出:
tf.Tensor([1 1 1 1], shape=(4,), dtype=int64)
#创建一个指定数值的张量
print(tf.fill([2,3],9))
输出:
tf.Tensor(
[[9 9 9]
 [9 9 9]], shape=(2, 3), dtype=int32)

4.生成随机数

#创建一个正态分布的随机数,默认值为均值0,标准差为1
print(tf.random.normal([2,2],mean=0,stddev=1))
输出:
tf.Tensor(
[[ 0.7842968  -0.7647454 ]
 [ 0.97283685  0.2644472 ]], shape=(2, 2), dtype=float32)
#创建一个截断式的正太分布,范围在四个标准差内,如果超过重新生成
print(tf.random.truncated_normal([2,2],mean=10,stddev=5))
输出:
tf.Tensor(
[[12.032915   6.0511527]
 [ 5.826352  10.884016 ]], shape=(2, 2), dtype=float32)
#均匀分布,最小值为5,最大值为10
输出:
tf.Tensor(
[[5.6967125 7.247097  6.047154  8.359208  6.3673706]
 [7.9319816 8.97218   7.324269  6.9389033 7.510229 ]
 [6.3872385 9.273575  7.911994  6.5568457 9.686001 ]], shape=(3, 5), dtype=float32)

5.数据类型转换

#将float变为int
a = tf.constant([1.,2.])
b = tf.cast(a,dtype=tf.int64)
print(a)
print(b)
输出:
tf.Tensor([1. 2.], shape=(2,), dtype=float32)
tf.Tensor([1 2], shape=(2,), dtype=int64)

6.行列操作

a = tf.random.normal([3,5])
print(a)
print('最小值',tf.reduce_min(a))
print('每行最小值',tf.reduce_min(a,axis=1))#寻找每行的最小值
print('每行最大值',tf.reduce_max(a,axis=1))#寻找每行的最大值
print('每列最小值',tf.reduce_min(a,axis=0))#寻找每列的最小值
print('每行的和',tf.reduce_sum(a,axis=1))#计算每行的和
print('每行的均值',tf.reduce_mean(a,axis=1))#计算每行的均值
输出:
tf.Tensor(
[[-0.22129555 -1.6673905  -0.1440103  -0.05698334 -0.04073694]
 [-0.30187613  0.72861683 -0.7404691  -0.7892406   1.9739453 ]
 [-2.031113    0.53566146 -0.53305024 -1.7855076  -0.5870117 ]], shape=(3, 5), dtype=float32)
最小值 tf.Tensor(-2.031113, shape=(), dtype=float32)
每行最小值 tf.Tensor([-1.6673905 -0.7892406 -2.031113 ], shape=(3,), dtype=float32)
每行最大值 tf.Tensor([-0.04073694  1.9739453   0.53566146], shape=(3,), dtype=float32)
每列最小值 tf.Tensor([-2.031113  -1.6673905 -0.7404691 -1.7855076 -0.5870117], shape=(5,), dtype=float32)
每行的和 tf.Tensor([-2.1304166  0.8709763 -4.401021 ], shape=(3,), dtype=float32)
每行的均值 tf.Tensor([-0.42608333  0.17419526 -0.8802042 ], shape=(3,), dtype=float32)

7.可训练参数

#将常量标记为变量,即可训练量
a = tf.random.normal([2,2])
b=tf.Variable(tf.random.normal([2,2],mean=0,stddev=1))
print(a)
print(b)
输出:
tf.Tensor(
[[ 2.1720138  -1.2562056 ]
 [ 2.2804182   0.19345297]], shape=(2, 2), dtype=float32)
<tf.Variable 'Variable:0' shape=(2, 2) dtype=float32, numpy=
array([[ 0.46369234, -1.604106  ],
       [ 0.1444198 ,  2.4719782 ]], dtype=float32)>

8.运算法则

a = tf.ones([1,3])
b = tf.fill([1,3],3.)
print(a)
print(b)
输出:
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float32)
tf.Tensor([[3. 3. 3.]], shape=(1, 3), dtype=float32)
四则运算:
print(tf.add(a,b))
print(tf.subtract(a,b))
print(tf.multiply(a,b))
print(tf.divide(a,b))
输出:
tf.Tensor([[4. 4. 4.]], shape=(1, 3), dtype=float32)
tf.Tensor([[-2. -2. -2.]], shape=(1, 3), dtype=float32)
tf.Tensor([[3. 3. 3.]], shape=(1, 3), dtype=float32)
tf.Tensor([[0.33333334 0.33333334 0.33333334]], shape=(1, 3), dtype=float32)
#平方、次方与开方
print(tf.square(b))
print(tf.pow(b,3))
print(tf.sqrt(b))
输出:
tf.Tensor([[9. 9. 9.]], shape=(1, 3), dtype=float32)
tf.Tensor([[27. 27. 27.]], shape=(1, 3), dtype=float32)
tf.Tensor([[1.7320508 1.7320508 1.7320508]], shape=(1, 3), dtype=float32)
#矩阵乘法
a = tf.ones([3,2])
b = tf.fill([2,3],5.)
print(tf.matmul(a,b))
输出:
tf.Tensor(
[[10. 10. 10.]
 [10. 10. 10.]
 [10. 10. 10.]], shape=(3, 3), dtype=float32)

9.特征和标签配对

#特征和标签配对
features=tf.constant([1,2,3])
labels = tf.constant([0,1,0])
dataset = tf.data.Dataset.from_tensor_slices((features,labels))
for element in dataset:
    print(element)
输出:
(<tf.Tensor: shape=(), dtype=int32, numpy=1>, <tf.Tensor: shape=(), dtype=int32, numpy=0>)
(<tf.Tensor: shape=(), dtype=int32, numpy=2>, <tf.Tensor: shape=(), dtype=int32, numpy=1>)
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(), dtype=int32, numpy=0>)

10.独热编码

labels = tf.constant([1,0,2])
one_hot = tf.one_hot(labels,depth=5)
print(one_hot)
输出:
tf.Tensor(
[[0. 1. 0. 0. 0.]
 [1. 0. 0. 0. 0.]
 [0. 0. 1. 0. 0.]], shape=(3, 5), dtype=float32)

11.自减

a = tf.Variable(4)
a.assign_sub(1)
print(a)
输出:
<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=3>

12.返回指定维度的最值索引

test = np.array([[1,2,3],[2,3,4],[4,5,6],[8,2,10]])
print(test)
print(tf.argmax(test,axis=0))#列的最大索引
print(tf.argmax(test,axis=1))#行的最大索引
输出:
[[ 1  2  3]
 [ 2  3  4]
 [ 4  5  6]
 [ 8  2 10]]
tf.Tensor([3 2 3], shape=(3,), dtype=int64)
tf.Tensor([2 2 2 2], shape=(4,), dtype=int64)

13.比较大小

#返回比较结果,如果参数a大于b,为TRUE,否则,为FALSE
import tensorflow as tf
a=tf.constant([5,4,6])
b=tf.constant([5,2,5])
tf.greater(a,b)
输出:
<tf.Tensor: shape=(3,), dtype=bool, numpy=array([False,  True,  True])>

14.三目运算

#返回a、b对应位置较大的元素
a = tf.constant([1,1,1,0,0,0])
b = tf.constant([0,0,0,2,2,2])
c = tf.where(tf.greater(a,b),a,b)#返回a、b对应位置较大的元素
print(c)
输出:
tf.Tensor([1 1 1 2 2 2], shape=(6,), dtype=int32)

15.numpy生成随机数

rdm = np.random.RandomState(seed=1)
a = rdm.rand()
b = rdm.rand()
c = rdm.rand(2,3)
print(a)
print(b)
print(c)
输出:
0.417022004702574
0.7203244934421581
[[1.14374817e-04 3.02332573e-01 1.46755891e-01]
 [9.23385948e-02 1.86260211e-01 3.45560727e-01]]

16.两个数组按垂直方向叠加

a = np.array([1,2,3])
b = np.array([4,5,6])
c = np.vstack((a,b))
print(c)
输出:
[[1 2 3]
 [4 5 6]]

17.生成网格数据

#左闭右开,x为按行递增,y为按列递增,生成的数据行数由x确定,列数由y确定
#1到3每次递增1需要递增2次,2到4每次递增0.5需要递增4次,所以生成的两个矩阵都是2行4列
x,y=np.mgrid[1:3:1,2:4:0.5]
print(x)
print(y)
输出:
[[1. 1. 1. 1.]
 [2. 2. 2. 2.]]
[[2.  2.5 3.  3.5]
 [2.  2.5 3.  3.5]]
#拉平数据
print(x.ravel())
print(y.ravel())
输出:
[1. 1. 1. 1. 2. 2. 2. 2.]
[2.  2.5 3.  3.5 2.  2.5 3.  3.5]
#数据配对
grid = np.c_[x.ravel(),y.ravel()]
print(grid)
输出:
[[1.  2. ]
 [1.  2.5]
 [1.  3. ]
 [1.  3.5]
 [2.  2. ]
 [2.  2.5]
 [2.  3. ]
 [2.  3.5]]

18.打乱数据

#不设置种子
import numpy as np
import tensorflow as tf
a = [1,2,3,4,5,6,7,8]
b = [1,2,3,4,5,6,7,8]
np.random.shuffle(a)
np.random.shuffle(b)
print(a)
print(b)
输出:
[7, 4, 5, 6, 3, 8, 2, 1]
[6, 2, 7, 3, 8, 5, 1, 4]
#设置种子
a = [1,2,3,4,5,6,7,8]
b = [1,2,3,4,5,6,7,8]
np.random.seed(10)
np.random.shuffle(a)
np.random.seed(10)
np.random.shuffle(b)[3, 4, 7, 8, 1, 5, 6, 2]
[3, 4, 7, 8, 1, 5, 6, 2]
print(a)
print(b)
输出:

19.切片 tf.data.Dataset.from_tensor_slices

#切片,组合成特征-标签对
#配对成6组数据,每组1个样本
features = np.random.sample((6,5))
labels = np.random.sample((6,1))
train_db = tf.data.Dataset.from_tensor_slices((features,labels))
print(features)
print(labels)
for i in train_db:
    print(i)
输出:
[[0.45661253 0.73088713 0.36371891 0.16227399 0.16129951]
 [0.53947244 0.68344813 0.31147118 0.62940532 0.84393026]
 [0.25169372 0.02836878 0.22408333 0.2408455  0.93215049]
 [0.9317588  0.55643935 0.91575506 0.50259018 0.22844811]
 [0.48104321 0.18557152 0.83875204 0.53576027 0.78292049]
 [0.68293397 0.02872328 0.30017315 0.88867609 0.9309101 ]]
[[0.86872256]
 [0.18945586]
 [0.06195475]
 [0.59439588]
 [0.39764456]
 [0.26805615]]
(<tf.Tensor: shape=(5,), dtype=float64, numpy=array([0.45661253, 0.73088713, 0.36371891, 0.16227399, 0.16129951])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.86872256])>)
(<tf.Tensor: shape=(5,), dtype=float64, numpy=array([0.53947244, 0.68344813, 0.31147118, 0.62940532, 0.84393026])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.18945586])>)
(<tf.Tensor: shape=(5,), dtype=float64, numpy=array([0.25169372, 0.02836878, 0.22408333, 0.2408455 , 0.93215049])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.06195475])>)
(<tf.Tensor: shape=(5,), dtype=float64, numpy=array([0.9317588 , 0.55643935, 0.91575506, 0.50259018, 0.22844811])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.59439588])>)
(<tf.Tensor: shape=(5,), dtype=float64, numpy=array([0.48104321, 0.18557152, 0.83875204, 0.53576027, 0.78292049])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.39764456])>)
(<tf.Tensor: shape=(5,), dtype=float64, numpy=array([0.68293397, 0.02872328, 0.30017315, 0.88867609, 0.9309101 ])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.26805615])>)
#加上batch,配对成3组数据,每组2个样本
features = np.random.sample((6,5))
labels = np.random.sample((6,1))
train_db = tf.data.Dataset.from_tensor_slices((features,labels)).batch(2)
print(features)
print(labels)
for i in train_db:
    print(i)
输出:
[[0.60191582 0.08312388 0.481893   0.74766266 0.1624255 ]
 [0.09834761 0.35715288 0.45644732 0.53083289 0.83756026]
 [0.07014468 0.96738409 0.35012738 0.00645945 0.30420116]
 [0.666946   0.96222472 0.13848844 0.77630396 0.12717365]
 [0.52043713 0.90597127 0.8873814  0.14441039 0.66881629]
 [0.73593185 0.72052657 0.03192619 0.05156881 0.08208447]]
[[0.00954747]
 [0.79503142]
 [0.59393121]
 [0.42451152]
 [0.11958024]
 [0.51100995]]
(<tf.Tensor: shape=(2, 5), dtype=float64, numpy=
array([[0.60191582, 0.08312388, 0.481893  , 0.74766266, 0.1624255 ],
       [0.09834761, 0.35715288, 0.45644732, 0.53083289, 0.83756026]])>, <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[0.00954747],
       [0.79503142]])>)
(<tf.Tensor: shape=(2, 5), dtype=float64, numpy=
array([[0.07014468, 0.96738409, 0.35012738, 0.00645945, 0.30420116],
       [0.666946  , 0.96222472, 0.13848844, 0.77630396, 0.12717365]])>, <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[0.59393121],
       [0.42451152]])>)
(<tf.Tensor: shape=(2, 5), dtype=float64, numpy=
array([[0.52043713, 0.90597127, 0.8873814 , 0.14441039, 0.66881629],
       [0.73593185, 0.72052657, 0.03192619, 0.05156881, 0.08208447]])>, <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[0.11958024],
       [0.51100995]])>)
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值