Tensorflow-(1)

一.回归问题

1.定义损失函数
2.建立梯度信息

  • out = relu{relu{relu[X@W1 + b1]@W2+ b2}@W3 + b3 注:model的
  • pred=argmax(out) 注:argmax()相当于给y求x
  • loss = MSE(out,label)

在这里插入图片描述
在这里插入图片描述

import numpy as np
#y=wx+b
#定义损失函数
def compute_errer_for_line_given_points(b,w,points):
    totalError=0
    for i in range(0,len(points)):
        x=points[i,0]
        y=points[i,1]
        #computer mean-squared-error
        totalError +=(y-(w*x+b))**2
        #average loss for each point
        plt.scatter(x, y, marker='o')
        return totalError /float(len(points))
        
#建立梯度信息
def step_gradient(b_current ,w_current,points,learningRate):
    b_gradient=0
    w_gradient=0
    N=float(len(points))
    for i in range(0,len(points)):
        x=points[i,0]
        y=points[i,1]
        #grad_b = 2(wx+b-y)
        b_gradient +=(2/N)*((w_current*x+b_current)-y)
        #grad_w = 2(wx+b-y)*x
        w_gradient+=(2/N)*x*((w_current*x+b_current)-y)
        #update w'
        new_b = b_current - (learningRate*b_gradient)
        new_w = w_current - (learningRate*w_gradient)
        return [new_b,new_w]
        
#循环更新梯度
def gradient_descent_runner(points,starting_b,starting_w,learning_rate,num_iterations):
    b=starting_b
    w=starting_w
    # update for several times
    for i in range(num_iterations):
        b,w=step_gradient(b,w,np.array(points),learning_rate)
    return [b,w]
    
def run():
    points = np.genfromtxt("C:/Users/86134/desktop/工作簿1.csv",delimiter=",")
    print(points)
    learning_rate = 0.0001
    initial_b = 0   #initial y-intercept guess
    initial_w = 0   #initial slope guess
    num_iteration = 1000      #迭代1000次
    print("Starting gardient descent at b = {0},m = {1},error = {2}"
          .format(initial_b,initial_w,
                  compute_errer_for_line_given_points(initial_b,initial_w,points)))
    print("Running...")
    [b,w] =gradient_descent_runner(points,initial_b,initial_w,learning_rate,num_iteration)
    print("after {0} iteration ,b ={1},w = {2},error = {3}".
          format(num_iteration,b,w,
                 compute_errer_for_line_given_points(b,w,points)))
    x = np.arange(0,100)
    y = w * x + b
    plt.plot(x,y)
    plt.show()

二.创建tensor以及不同tensor的应用

  • scalar: 1.1
  • vector: [1.1],[1.1,2.2, …]
  • matrix: [[1.1, 2.2], [3.3, 4.4],[5.5, 6.6]]
  • tensor: rank > 2

2.2函数

1.tf.constant(value, shape, dtype=None, name=None)
功能:生成常量

  • value,值
  • shape,数据形状
  • dtype,数据类型
  • name,名称
tf.constant(1)
tf.constant(1.1,dtype=tf.double)
tf.constant([True,False])
tf.constant('hello,world.')
import tensorflow as tf
 
c = tf.constant(-1, shape=[2,3], dtype=tf.float32, name=None)
 
with tf.Session() as sess:
    print(sess.run(c))
[[-1. -1. -1.]
 [-1. -1. -1.]]

2.a.device():输出所指定的设备

with tf.device("gpu"):
    b=tf.range(4)
    b.device
    b.numpy()(out:array([0, 1, 2, 3]))
    b.shape()(TensorShape([4]))

3.ndim:获取数组的维度

b.ndim(out:1)

4.is_tensor:检验数据类型是不是tensor

tf.is_tensor(b)

5.np.arange:用于闯进啊具有等差数列的数组
np.arange([start,] stop[, step,], dtype=None)

  • start: 起始值,默认为0
  • stop: 终止值,不包括该值
  • step: 步长,默认为1
  • dtype: 数组元素的数据类型,可选参数,默认情况下会根据其他输入推断数据类型

6.convert_to_tensor:将数据类型转化为tensor型

#将数据类型转化为tensor型
aa=tf.convert_to_tensor(a)
import tensorflow as tf
import numpy as np
tf.convert_to_tensor(np.ones([2,3]))
tf.convert_to_tensor(np.zeros([2,3]))
tf.convert_to_tensor([1,2])
tf.convert_to_tensor([1,2.])
tf.convert_to_tensor([[1],[2.]])

7.tf.cast():用于执行tensorflow中张量数据类型的转换
tf.cast(待转换的张量,dtype=目标数据类型,name=可选参数)

tf.cast(aa,dtype=tf.float32)

8.tf.Vartable()

#variable是一种不断变化的变量
a=tf.range(5)
b=tf.Variable(a)
b.dtype
b.name
b=tf.Variable(a,name='input_data')
b.trainable#是否能被更新(out:True)
isinstance(b,tf.Tensor)#isinstance是判断是否是tf.Tensor类型(out:False)
isinstance(b,tf.Variable)#isinstance是判断是否是tf.Variable类型(out:True)
tf.is_tensor(b)(out:True)

9.tf.gather()

import tensorflow as tf
idx=tf.range(10)
idx=tf.random.shuffle(idx)#打乱顺序
a=tf.random.normal([10,784])
b=tf.random.uniform([10],maxval=10,dtype=tf.int32
a=tf.gather(a,idx)
b=tf.gather(b,idx))

10.random.uniform:均匀分布中抽取随机值
求loss步骤

out = tf.random.uniform([4, 10])  # 模拟最后一层的输出
 
y = tf.range(4)  # 模拟真实值y,4个分类
y = tf.one_hot(y, depth=10)  # 将y值进行one_hot编码
loss = tf.keras.losses.mse(y, out)  # 方差
loss = tf.reduce_mean(loss)  # 均方差后的结果

print(out, y, loss, sep='\n***************\n')

tf.Tensor(
[[0.95495164 0.42851162 0.63940954 0.7457049  0.863574   0.4137305
  0.19719076 0.6347679  0.3381462  0.21893883]
 [0.9869051  0.64940274 0.92230356 0.02006686 0.9132643  0.97518027
  0.7332783  0.60538805 0.16506422 0.9878422 ]
 [0.7512604  0.5606184  0.90563107 0.24399471 0.5856639  0.8866366
  0.13660061 0.19366539 0.7147602  0.05145013]
 [0.61691034 0.43103004 0.8596388  0.3836639  0.9826486  0.07203913
  0.38422346 0.36048484 0.8623512  0.0403564 ]], shape=(4, 10), dtype=float32)
***************
tf.Tensor(
[[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]], shape=(4, 10), dtype=float32)
***************
tf.Tensor(0.36591613, shape=(), dtype=float32)
from tensorflow.keras import datasets,layers,optimizers
net =layers.Dense(10)
net
net.kernel#相当于w
net.bias#相当于b
x=tf.random.normal([4,784])
net=layers.Dense(10)
net(x).shape(out:TensorShape([4, 10]))
net.kernel.shape(out:TensorShape([784, 10]))
net.bias.shape(out:TensorShape([10]))
from tensorflow import keras
(x_train,y_train),(x_test,y_test)=keras.datasets.imdb.load_data(num_words=10000)
x_train=keras.preprocessing.sequence.pad_sequences(x_train,maxlen=80)
x_train.shape
from keras.layers import Embedding
emb=embedding(x_train)
emb.shape
x=tf.random.normal((4,32,32,3))
net=layers.Conv2D(16,kernel_size=3)
net(x)

三.索引和切片

3.1Basic indexing

import tensorflow as tf
a=tf.ones([1,5,5,3])
<tf.Tensor: shape=(5, 3), dtype=float32, numpy=
array([[1., 1., 1.],
       [1., 1., 1.],
       [1., 1., 1.],
       [1., 1., 1.],
       [1., 1., 1.]], dtype=float32)>
a[0][0][0]
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([1., 1., 1.], dtype=float32)>
a[0][0][0][2]
<tf.Tensor: shape=(), dtype=float32, numpy=1.0>

3.2 Numpy-style indexing

a[ A,B,…]

a=tf.random.normal([4,28,28,3])
a[1,2].shape
(out:TensorShape([28, 3]))
a[1,2,3].shape
(out:TensorShape([3]))
a[1,2,3,2].shape
(out:TensorShape([]))

3.2.1 stard:end

a[A:B]

  • 包含A,不包含B

3.4 indexing by

a[0].shape
(out:TensorShape([28, 28, 3]))
a[0,:,:,:].shape
(out:TensorShape([28, 28, 3]))
a[0,1,:,:].shape
(out:TensorShape([28, 3]))

3.2.2start🔚step

3.2.3 ::step

3.2.4 ::-1 实现倒序功能

3.2.5 …

a[0,…]=a[0,:,:,:]

3.3 Selectuve Indexing

3.3.1 tf.gather(a,axis,indices)

  • data:[classes,students,subjects]
  • [4,35,8]
a=tf.random.normal([4,35,8])
tf.gather(a,axis=0,indices=[2,3]).shape
(out:TensorShape([2, 35, 8]))
tf.gather(a,axis=1,indices=[1,2,5,6,8,9]).shape
(out:TensorShape([4, 6, 8]))

3.3.2 tf.grather_nd

import tensorflow as tf
a=tf.random.normal([4,35,8])
tf.gather_nd(a,[[0,0],[1,1]]).shape
(out:TensorShape([2, 8]))
tf.gather_nd(a,[[0,0],[1,1],[2,2]]).shape
(out:TensorShape([3, 8]))
tf.gather_nd(a,[[0,0,0],[1,1,1],[2,2,2]]).shape
(out:TensorShape([3]))
tf.gather_nd(a,[[[0,0,0],[1,1,1],[2,2,2]]]).shape
(out:TensorShape([1, 3]))

3.3.3 tf.boolean_mask

a=tf.random.normal([4,28,28,3])
tf.boolean_mask(a,mask=[True,False,True,False]).shape
(out:TensorShape([2, 28, 28, 3]))
tf.boolean_mask(a,mask=[True,False,True],axis=3).shape
(out:TensorShape([4, 28, 28, 2]))
a=tf.ones([2,3,4])
tf.boolean_mask(a,mask=[[True,False,True],[True,True,False]])       mask表示[2.3],前两维,有四个ture
(out:<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 1., 1.],
       [1., 1., 1., 1.],
       [1., 1., 1., 1.],
       [1., 1., 1., 1.]], dtype=float32)>)

四、维度变换

4.1 tf.reshape(tensor,shape name=None)

  • tensor :Tensor张量
  • shape :Tensor张量,用于定义输出张量的shape,组成元素类型为 int32或int64.
  • name :可选参数,用于定义操作名称.
  • 作用:重新定义view
a=tf.random.normal([4,28,28,3])
tf.reshape (tf.reshape(a,[4,-1]),[4,28,28,3]).shape
(out:TensorShape([4, 28, 28, 3]))

4.2 tf.transpose(tersor,perm)

  • perm :排列尺寸
  • 作用:转置
a=tf.random.normal([4,3,2,1])
tf.transpose(a).shape
(out:TensorShape([1, 2, 3, 4]))
tf.transpose(a,perm=[0,3,2,1]).shape
(out:TensorShape([4, 1, 2, 3]))

4.3 tf.expand_dims(tensor,axis= ,name)

  • 作用:增加一个维度
a=tf.random.normal([3,4,8])
tf.expand_dims(a,3).shape
(out:TensorShape([3, 4, 8, 1])

4.4 squeeze

  • 只删除维度为1的
  • tf.squeeze(tensor, axis= )
a=tf.random.normal([3,4,8,1])
tf.squeeze(a,axis=3).shape
(out:TensorShape([3, 4, 8]))

五.Boradcasting

5.1 broadcast

  • 把维度扩展成一样的,且节省内存空间
a=tf.ones([3,4])
tf.broadcast_to(a,[2,3,4])
(out:<tf.Tensor: shape=(2, 3, 4), dtype=float32, numpy=
array([[[1., 1., 1., 1.],
        [1., 1., 1., 1.],
        [1., 1., 1., 1.]],

       [[1., 1., 1., 1.],
        [1., 1., 1., 1.],
        [1., 1., 1., 1.]]], dtype=float32)>)

5.2 tf.tile

  • 把维度扩展成一样的,但浪费内存空间
a2 =tf.expand_dims(a,axis=0)
tf.tile(a2,[2,1,1])
(out:<tf.Tensor: shape=(2, 3, 4), dtype=float32, numpy=
array([[[1., 1., 1., 1.],
        [1., 1., 1., 1.],
        [1., 1., 1., 1.]],

       [[1., 1., 1., 1.],
        [1., 1., 1., 1.],
        [1., 1., 1., 1.]]], dtype=float32)>)

六、运算符

  • ±*/
  • **,pow,square
  • sqrt
  • //,%
  • exp(n)表示e^n
  • log:只能表示以10为底的
  • @,tf.matmul
  • linear layer

七、合并和分割

  • tf.concat
import tensorflow as tf
a=tf.random.normal([8,5,7])
b=tf.random.normal([5,5,7])
tf.concat([a,b],axis=0).shape
(out:TensorShape([13, 5, 7]))
  • tf.split
c=tf.split(a,axis=0,num_or_size_splits=2)
c[0].shape
(out:Tensorshape([4,5,7]))
d=tf.split(a,axis=2,num_or_size_splits=[2,2,3])
d[0].shape,d[1].shape,d[2].shape
(out:(TensorShape([8, 5, 2]), TensorShape([8, 5, 2]), TensorShape([8, 5, 3])))
  • tf.stack
g=tf.random.normal([4,35,8])
h=tf.random.normal([4,35,8])
tf.stack([g,h],axis=0).shape
(out:Tensorshape([2,4,35,8]))
  • tf.unstack
k=tf.unstack(j,axis=0)
k[1].shape
(out:TensorShape([]))
k[2].shape
(out:TensorShape([]))
k
(out:[<tf.Tensor: shape=(), dtype=int32, numpy=2>,
 <tf.Tensor: shape=(), dtype=int32, numpy=4>,
 <tf.Tensor: shape=(), dtype=int32, numpy=35>,
 <tf.Tensor: shape=(), dtype=int32, numpy=8>])

八、数据统计

  • tf.norm
    在这里插入图片描述
import tensorflow as tf
b=tf.ones([2,2])
tf.norm(b)
(out:<tf.Tensor: shape=(), dtype=float32, numpy=2.0>)
tf.norm(b,ord=2,axis=1)
(out:<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.4142135, 1.4142135], dtype=float32)>)
tf.norm(b,ord=1)
(out:<tf.Tensor: shape=(), dtype=float32, numpy=4.0>)
tf.norm(b,ord=1,axis=0)
(out:<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 2.], dtype=float32)>)
tf.norm(b,ord=1,axis=1)
(out:<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 2.], dtype=float32)>)
  • tf.reduce_min/max
import tensorflow as tf
a=tf.random.normal([4,10])
tf.reduce_min(a),tf.reduce_max(a),tf.reduce_mean(a)
(out:(<tf.Tensor: shape=(), dtype=float32, numpy=-2.2508128>,
 <tf.Tensor: shape=(), dtype=float32, numpy=1.731733>,
 <tf.Tensor: shape=(), dtype=float32, numpy=-0.14553194>))
tf.reduce_min(a,axis=1) ,tf.reduce_max(a,axis=1),tf.reduce_mean(a,axis=1)
(out:(<tf.Tensor: shape=(4,), dtype=float32, numpy=array([-2.0692022, -2.0198808, -2.2508128, -1.9293032], dtype=float32)>,
 <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.40593663, 1.731733  , 1.2540325 , 0.9654629 ], dtype=float32)>,
 <tf.Tensor: shape=(4,), dtype=float32, numpy=array([-0.7926154 ,  0.4226243 ,  0.05149287, -0.2636295 ], dtype=float32)>))
  • tf.argmax/argmin 默认对列
a.shape
(out:Tensorshape([4,10]))
tf.argmax(a).shape
(out:TensorShape([10]))
tf.argmin(a)
(out:<tf.Tensor: shape=(10,), dtype=int64, numpy=array([1, 2, 2, 1, 1, 0, 2, 3, 3, 3], dtype=int64)>)
  • tf.equal :可用它来算准确率
import tensorflow as tf
a=tf.constant([1,2,3,2,5])
b=tf.range(5)
tf.equal(a,b)
(out:<tf.Tensor: shape=(5,), dtype=bool, numpy=array([False, False, False, False, False])>)
res=tf.equal(a,b)
tf.reduce_sum(tf.cast(res,dtype=tf.int32))
(out:<tf.Tensor: shape=(), dtype=int32, numpy=0>)
  • tf.unique

九、张量排序

9.1 tf.sort(tensor,direction=)

  • 默认为升序
  • 降序时direction = ‘DESCENDING’
  • 使用tf.argsort(a)求索引
import tensorflow as tf
a=tf.random.uniform([3,3],maxval=10,dtype=tf.int32)
a
(out:<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[0, 4, 5],
       [8, 2, 9],
       [6, 2, 5]])>#升序
tf.sort(a)
<out:tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[0, 4, 5],
       [2, 8, 9],
       [2, 5, 6]])>)

#降序
tf.sort(a,direction = 'DESCENDING')
(out:<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[5, 4, 0],
       [9, 8, 2],
       [6, 5, 2]])>)

#找索引
idx=tf.argsort(a)
idx
(out:<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[0, 1, 2],
       [1, 0, 2],
       [1, 2, 0]])>)

9.2、top-k accuracy

import tensorflow as tf
prob =tf.constant([[0.1 ,0.2, 0.7],[0.2 ,0.7,0.1]])
target=tf.constant([2,0])
#预测的可能性从大到小所对应的索引
k_b =tf.math.top_k(prob ,3).indices
k_b
(out:<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[2, 1, 0],
       [1, 0, 2]])>#转置,第一行是最可能的top_1,第二行是top_2...
k_b =tf.transpose(k_b,[1,0])
k_b
(out:<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[2, 1],
       [1, 0],
       [0, 2]])>)

target =tf.broadcast_to(target,[3,2])
target
(out:<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[2, 0],
       [2, 0],
       [2, 0]])>)

for example

def accuarcy(output ,target,topk=(1,)):
    maxk=max(topk)
    batch_size=target.shape[0]
    
    pred=tf.math.top_k(output,maxk).indices
    pred=tf.transpose(pred,perm=[1,0])
    target_=tf.broadcast_to(target,pred.shape)
    correct=tf.equal(pred,target)
    
    res=[]
    for k in topk:
        correct_k=tf.reduce_sum(tf.cast(tf.reshape(correct[:k],[-1]),dtype=tf.float32))
        acc = float(correct_k/batch_size)
        res.append(acc)
        
    return res

import tensorflow as tf
#预测值的概率
output = tf.random.normal([10,6])
#使得六类的概率总合为1
output = tf.math.softmax(output,axis=1)
#真实值
target = tf.random.uniform([10],maxval=6,dtype=tf.int32)
output

target

pred = tf.argmax(output,axis=1)
pred

acc=accuarcy(output,target,topk=(1,2,3,4,5,6))

acc

运行结果:

<tf.Tensor: shape=(10, 6), dtype=float32, numpy=
array([[0.16145979, 0.14574745, 0.42606267, 0.0455263 , 0.1105589 ,
        0.11064494],
       [0.20319663, 0.0706072 , 0.08809059, 0.0842905 , 0.29325873,
        0.2605564 ],
       [0.3292493 , 0.25139844, 0.08191928, 0.03777492, 0.25868928,
        0.04096882],
       [0.16093852, 0.30602935, 0.19835263, 0.15266834, 0.03588389,
        0.14612718],
       [0.16465606, 0.1777486 , 0.07736219, 0.1610233 , 0.23022442,
        0.18898545],
       [0.4143168 , 0.35243836, 0.06659414, 0.08967362, 0.04634555,
        0.03063143],
       [0.02091111, 0.23389657, 0.20749617, 0.04623798, 0.12367996,
        0.3677782 ],
       [0.07771271, 0.23782305, 0.0270989 , 0.1069003 , 0.3576419 ,
        0.19282314],
       [0.01407431, 0.16857031, 0.09763125, 0.12012959, 0.37992334,
        0.21967117],
       [0.08488531, 0.13993438, 0.19996493, 0.39498058, 0.11856262,
        0.06167207]], dtype=float32)>
        
<tf.Tensor: shape=(10,), dtype=int64, numpy=array([2, 4, 0, 1, 4, 0, 5, 4, 4, 3], dtype=int64)>

<tf.Tensor: shape=(10,), dtype=int32, numpy=array([1, 0, 1, 5, 1, 3, 5, 3, 1, 0])>

[0.10000000149011612,
 0.10000000149011612,
 0.699999988079071,
 0.800000011920929,
 1.0,
 1.0]

十.填充与复制

10.1 pad : 用0填充

二维的:

tf.pad(tensor,[[a,b],[c,d]])

  • [a.b]指行,[c,d]指列
  • a,b,c,d分别指上下左右加几行0
import tensorflow as tf
a=tf.reshape(tf.range(9),[3,3])
(out:<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[0, 1, 2],
       [3, 4, 5],
       [6, 7, 8]])>)

tf.pad(a,[[0,0],[0,0]])      
(out:<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[0, 1, 2],
       [3, 4, 5],
       [6, 7, 8]])>
)

 tf.pad(a,[[1,0],[0,0]])
 (out:<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[0, 0, 0],
       [0, 1, 2],
       [3, 4, 5],
       [6, 7, 8]])>)

tf.pad(a,[[1,1],[1,0]])
<tf.Tensor: shape=(5, 4), dtype=int32, numpy=
array([[0, 0, 0, 0],
       [0, 0, 1, 2],
       [0, 3, 4, 5],
       [0, 6, 7, 8],
       [0, 0, 0, 0]])>

多维的

a=tf.random.normal([4,28,28,3])
b=tf.pad(a,[[0,0],[2,2],[2,2],[0,0]])
b.shape
(out:TensorShape([4, 32, 32, 3]))

10.2 tile

import tensorflow as tf
a=tf.reshape(range(9),(3,3))
#列变成三倍
tf.tile(a,[1,3])
(out:<tf.Tensor: shape=(3, 9), dtype=int32, numpy=
array([[0, 1, 2, 0, 1, 2, 0, 1, 2],
       [3, 4, 5, 3, 4, 5, 3, 4, 5],
       [6, 7, 8, 6, 7, 8, 6, 7, 8]])>#行变成二倍
tf.tile(a,[2,1])
(out:<tf.Tensor: shape=(6, 3), dtype=int32, numpy=
array([[0, 1, 2],
       [3, 4, 5],
       [6, 7, 8],
       [0, 1, 2],
       [3, 4, 5],
       [6, 7, 8]])>)

#行,列都变成二倍
tf.tile(a,[2,2])
(out:<tf.Tensor: shape=(6, 6), dtype=int32, numpy=
array([[0, 1, 2, 0, 1, 2],
       [3, 4, 5, 3, 4, 5],
       [6, 7, 8, 6, 7, 8],
       [0, 1, 2, 0, 1, 2],
       [3, 4, 5, 3, 4, 5],
       [6, 7, 8, 6, 7, 8]])>)

10.3broadcast_to

aa=tf.expand_dims(a,axis=0)
tf.tile(aa,[2,2,1]),tf.broadcast_to(aa,[2,3,3])
(out:(<tf.Tensor: shape=(2, 6, 3), dtype=int32, numpy=
 array([[[0, 1, 2],
         [3, 4, 5],
         [6, 7, 8],
         [0, 1, 2],
         [3, 4, 5],
         [6, 7, 8]],
 
        [[0, 1, 2],
         [3, 4, 5],
         [6, 7, 8],
         [0, 1, 2],
         [3, 4, 5],
         [6, 7, 8]]])>,
 <tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=
 array([[[0, 1, 2],
         [3, 4, 5],
         [6, 7, 8]],
 
        [[0, 1, 2],
         [3, 4, 5],
         [6, 7, 8]]])>))

十一、张量限幅

11.1 clip_by_value

import tensorflow as tf
a=tf.range(9)

#满足a>2是本身,反之则变成2
tf.maximum(a,2)
(out:tf.Tensor: shape=(9,), dtype=int32, numpy=array([2, 2, 2, 3, 4, 5, 6, 7, 8])>)

#满足a<2是本身,反之则变成8
tf.minimum(a,8)
(out: <tf.Tensor: shape=(9,), dtype=int32, numpy=array([0, 1, 2, 3, 4, 5, 6, 7, 8])>)

#满足a>=2且a<=8则保留,否则取就近
tf.clip_by_value(a,2,7)
(out:<tf.Tensor: shape=(9,), dtype=int32, numpy=array([2, 2, 2, 3, 4, 5, 6, 7, 7])>)

11.2 relu

  • tf.nn.relu(a)=tf.maximum(a,0)
import tensorflow as tf
a=tf.range(9)
a=a-5
tf.nn.relu(a)
(out:<tf.Tensor: shape=(9,), dtype=int32, numpy=array([0, 0, 0, 0, 0, 0, 1, 2, 3])>
)
tf.maximum(a,0)
(out:
<tf.Tensor: shape=(9,), dtype=int32, numpy=array([0, 0, 0, 0, 0, 0, 1, 2, 3])>)

11.3 clip_by_norm

  • gradient不变,只改变向量的模
a=tf.random.normal([2,2],mean=10)
tf.norm(a)
(out:<tf.Tensor: shape=(), dtype=float32, numpy=20.31215>)

aa=tf.clip_by_norm(a,15)
tf.norm(aa)
(out:<tf.Tensor: shape=(), dtype=float32, numpy=15.000001>)

11.4 gradient clipping

  • tf.clip_by_globe_norm(grads,)
    • 等比例缩放,保持方向不变
  • 在这里插入图片描述

十二、高阶操作

12.1 where

  • 一个参数时返回的是坐标
a=tf.random.normal([3,3])
mask=a>0
(out:<tf.Tensor: shape=(3, 3), dtype=bool, numpy=
array([[False,  True,  True],
       [False, False, False],
       [ True,  True, False]])>#取mask为true的值
tf.boolean_mask(a,mask)
#查询返回为true所在位置
indices=tf.where(mask)
#取出坐标所对于的值
tf.gather_nd(a,indices)

  • where(cond,A,B)
  • cond=True从 A 中选,cond=False从 B 中选
mask
(out:<tf.Tensor: shape=(3, 3), dtype=bool, numpy=
array([[False,  True,  True],
       [False, False, False],
       [ True,  True, False]])>)
       A=tf.ones([3,3])
B=tf.zeros([3,3])
tf.where(mask,A,B)
(out:A=tf.ones([3,3])
B=tf.zeros([3,3])
tf.where(mask,A,B)
A=tf.ones([3,3])
B=tf.zeros([3,3])
tf.where(mask,A,B)
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[0., 1., 1.],
       [0., 0., 0.],
       [1., 1., 0.]], dtype=float32)>)

12.2 tf.scatter_nd(indices,updates,shape)

  • indices 更新的索引
  • updates 更新的值
  • shape 给定的形状
indices=tf.constant([[4],[3],[1],[7]])
updates=[4,3,5,7]
shape=tf.constant([8])
tf.scatter_nd(indices,updates,shape)<tf.Tensor: shape=(8,), dtype=int32, numpy=array([0, 5, 0, 3, 4, 0, 0, 7])>

在这里插入图片描述

12.3 meshgird

import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import matplotlib.pyplot as plt
import tensorflow as tf

def fun(x):
    z=tf.math.sin(x[...,0])+tf.math.sin(x[...,1])
    return z

x=tf.linspace(0.,2*3.14,500)
y=tf.linspace(0.,2*3.14,500)
points_x,points_y=tf.meshgrid(x,y)
points=tf.stack([points_x,points_y],axis=2)

z=fun(points)
print('z',z.shape)

plt.figure('plot 2d func value')
plt.imshow(z,origin='lower',interpolation='none')
plt.colorbar()

plt.figure('plot 2d func contour')
plt.contour(points_x,points_y,z)
plt.colorbar
plt.show()
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值