TensorFLow2基础

TensorFLow2基础

《HCIA-AI 华为认证人工智能工程师在线课程》
课程链接
在这里插入图片描述

本节课程分为8个单元,主要讲解了TensorFlow 2的基本语法.下面的8个单元内容分别包括:

1、常量与变量的创建方式

2、张量的切片与索引

3、张量的维度变化

4、张量的算术运算

5、张量的拼接与分割

6、张量排序

7、Eager Execution

8、AutoGraph

学完本节内容后,你能了解到TensorFlow 2的基本语法。

代码:

# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 15:00:00 2021

HCIA-AI 华为认证人工智能工程师在线课程《TensorFLow2基础》

@author: Administrator
"""
import tensorflow as tf
import numpy as np

const_a = tf.constant([[1,2,3,4]],shape=[2,2],dtype=tf.float32)
print(const_a)

zeros_b = tf.zeros(shape=[2,3],dtype=tf.int32)
print(zeros_b)

zeros_like_c = tf.zeros_like(const_a)
print(zeros_like_c)

fill_d = tf.fill([3,3],8)
print(fill_d.numpy())

random_e = tf.random.normal([5,5],mean=0,stddev=1.0)
print(random_e.numpy())

list_f = [1,2,3,4,5,6]
type(list_f)
tensor_f = tf.convert_to_tensor(list_f,dtype=tf.float32)
print(tensor_f)

var_l = tf.Variable(tf.ones([2,3]))
print(var_l)
var_value_l = [[1,2,3],[4,5,6]]
var_l.assign(var_value_l)
print(var_l)
var_l.assign_add(tf.ones([2,3]))
var_l.assign_add(tf.ones([2,3]))
print(var_l)

tensor_h = tf.random.normal([4,100,100,3])
#print(tensor_h)
print(tensor_h[0][19][39][1])
indices = [0,1,3]
#print(tf.gather(tensor_h,axis=0,indices=indices,batch_dims=1))
indices_nd = [[0,1,1,0],[1,2,2,0]]
#print(tf.gather_nd(tensor_h,indices=indices_nd))
#print(tensor_h[0,:,:,:])

const_d_l = tf.constant([[1,2,3,4]],shape=[2,2],dtype=tf.float32)
print(const_d_l.shape)
print(const_d_l.get_shape())
print(tf.shape(const_d_l))

reshape_l = tf.constant([[1,2,3],[4,5,6]])
print(reshape_l)
print(tf.reshape(reshape_l,(3,2)))

expand_sample_l = tf.random.normal([100,100,3])
print(expand_sample_l.shape)
print(tf.expand_dims(expand_sample_l,axis=0).shape)
print(tf.expand_dims(expand_sample_l,axis=1).shape)
print(tf.expand_dims(expand_sample_l,axis=-1).shape)

squeeze_sample_l = tf.random.normal([1,100,100,3])
print(squeeze_sample_l.shape)
print(tf.squeeze(squeeze_sample_l,axis=0).shape)

trans_sample_1 = tf.constant([1,2,3,4,5,6],shape=[2,3])
print(trans_sample_1.shape)
print(tf.transpose(trans_sample_1).shape)
trans_sample_2 = tf.random.normal([4,100,200,3])
print(trans_sample_2.shape)
print(tf.transpose(trans_sample_2,[0,2,1,3]).shape)

broadcast_sample_1 = tf.constant([1,2,3,4,5,6])
print(broadcast_sample_1.numpy())
print(tf.broadcast_to(broadcast_sample_1,shape=[4,6]).numpy())

a = tf.constant([[0,0,0],[10,10,10],[20,20,20],[30,30,30]])
b = tf.constant([1,2,3])
print(a+b)

a = tf.constant([[3,5],[4,8]])
b = tf.constant([[1,6],[2,9]])
print(tf.add(a,b))
print(tf.matmul(a, b))

argmax_sample_1 = tf.constant([[1,3],[2,5],[7,5]])
print("\r\n输入张量:\r\n",argmax_sample_1.numpy(),"\r\n大小:",argmax_sample_1.shape,"\r\n")
print(tf.argmax(argmax_sample_1,axis=0).numpy())
print(tf.argmax(argmax_sample_1,axis=1).numpy())

reduce_sample_1 = tf.constant([1,2,3,4,5,6],shape=[2,3])
print("\r\n原始数据:\r\n",reduce_sample_1.numpy(),"\r\n大小:",reduce_sample_1.shape,"\r\n")
print(tf.reduce_sum(reduce_sample_1,axis=None).numpy())
print(tf.reduce_sum(reduce_sample_1,axis=0).numpy())
print(tf.reduce_sum(reduce_sample_1,axis=1).numpy())

concat_sample_1 = tf.random.normal([4,100,100,3])
concat_sample_2 = tf.random.normal([40,100,100,3])
print("\r\n原始数据的尺寸分别为:\r\n",concat_sample_1.shape,"\r\n",concat_sample_1.shape)
print("拼接后数据的尺寸为:\r\n",tf.concat([concat_sample_1,concat_sample_2],axis=0).shape)

stack_sample_1 = tf.random.normal([100,100,3])
stack_sample_2 = tf.random.normal([100,100,3])
print("\r\n原始数据的尺寸分别为:\r\n",stack_sample_1.shape,"\r\n",stack_sample_2.shape)
stacked_sample_1 = tf.stack([stack_sample_1,stack_sample_2],axis=0)
print("拼接后数据的尺寸为:\r\n",stacked_sample_1.shape)

#print("分割后数据的尺寸为:\r\n",tf.unstack(stacked_sample_1,axis=0))

split_sample_1 = tf.random.normal([10,10,10,3])
print("\r\nsplit_sample_1原始数据的尺寸为:\r\n",split_sample_1.shape)
splited_sample_1 = tf.split(split_sample_1,num_or_size_splits=5,axis=0)
print("当num_or_size_splits=5,分割后数据的尺寸为:\r\n",np.shape(splited_sample_1))
splited_sample_2 = tf.split(split_sample_1,num_or_size_splits=[3,5,2],axis=0)
print("当num_or_size_splits=[[3,5,2],分割后数据的尺寸为:\r\n",
      np.shape(splited_sample_2[0]),
      np.shape(splited_sample_2[1]),
      np.shape(splited_sample_2[2]),)

sort_sample_1 =tf.random.shuffle(tf.range(10))
print("\r\n输入张量:",sort_sample_1.numpy())
sorted_sample_1 = tf.sort(sort_sample_1,direction="ASCENDING")
print("升序排列后的张量:",sorted_sample_1.numpy())
sorted_sample_2 = tf.argsort(sort_sample_1,direction="ASCENDING")
print("升序排列后,元素的索引:",sorted_sample_2.numpy())

values,index = tf.nn.top_k(sort_sample_1,5)
print("升序排列后的前5个数值:",values.numpy())
print("升序排列后的前5个数值的索引:",index.numpy())

x = tf.ones((2,2),dtype=tf.dtypes.float32)
y = tf.constant([[1,2],[3,4]],dtype=tf.dtypes.float32)
z = tf.matmul(x,y)
print("\r\nTensorFlow2.0不用Session可以直接运算:",z)

thre_1 = tf.random.uniform([],0,1)
x = tf.reshape(tf.range(0,4),[2,2])
print("\r\nTensorFlow2.0得益eager得到Tensor的numpy值thre1:\r\n",thre_1)
if thre_1.numpy() > 5:
    y = tf.matmul(x,y)
else:
    y = tf.add(x,x)
print("从而可以用条件判断计算出y:\r\n",y)

@tf.function
def simple_nn_layer(w,x,b):
    print("\r\nAutoGraph静态图b:无法查看数值",b)
    return tf.nn.relu(tf.matmul(w,x)+b)
w =tf.random.uniform((3,3))
x =tf.random.uniform((3,3))
b =tf.constant(0.5,dtype='float32')
simple_nn_layer(w,x,b)

import timeit
CNN_cell = tf.keras.layers.Conv2D(filters=100,kernel_size=2,strides=(1,1))
@tf.function
def CNN_fn(image):
    return CNN_cell(image)
image = tf.zeros([100,200,200,3])
CNN_cell(image)
CNN_fn(image)
print("\r\neager execution模式下做一层CNN卷积层运算的实际:\r\n",timeit.timeit(lambda:CNN_cell(image),number=10))
print("graph模式下做一层CNN卷积层运算的实际:\r\n",timeit.timeit(lambda:CNN_fn(image),number=10))

运行输出:

tf.Tensor(
[[1. 2.]
 [3. 4.]], shape=(2, 2), dtype=float32)
tf.Tensor(
[[0 0 0]
 [0 0 0]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[0. 0.]
 [0. 0.]], shape=(2, 2), dtype=float32)
[[8 8 8]
 [8 8 8]
 [8 8 8]]
[[-0.80290383 -1.5259775  -0.6541044   0.56509346  0.12168431]
 [-0.4033856  -0.52415013  1.8449188   0.420102   -2.3412702 ]
 [ 0.8241716   0.6353277   1.2247677   1.3111122   0.32647747]
 [-2.1685581   1.6500843   0.7036131  -0.79745334 -0.38055837]
 [ 0.15103035  0.84089184 -0.546232    1.2970096   0.3392236 ]]
tf.Tensor([1. 2. 3. 4. 5. 6.], shape=(6,), dtype=float32)
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=
array([[1., 1., 1.],
       [1., 1., 1.]], dtype=float32)>
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=
array([[1., 2., 3.],
       [4., 5., 6.]], dtype=float32)>
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=
array([[3., 4., 5.],
       [6., 7., 8.]], dtype=float32)>
tf.Tensor(-0.16037415, shape=(), dtype=float32)
(2, 2)
(2, 2)
tf.Tensor([2 2], shape=(2,), dtype=int32)
tf.Tensor(
[[1 2 3]
 [4 5 6]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[1 2]
 [3 4]
 [5 6]], shape=(3, 2), dtype=int32)
(100, 100, 3)
(1, 100, 100, 3)
(100, 1, 100, 3)
(100, 100, 3, 1)
(1, 100, 100, 3)
(100, 100, 3)
(2, 3)
(3, 2)
(4, 100, 200, 3)
(4, 200, 100, 3)
[1 2 3 4 5 6]
[[1 2 3 4 5 6]
 [1 2 3 4 5 6]
 [1 2 3 4 5 6]
 [1 2 3 4 5 6]]
tf.Tensor(
[[ 1  2  3]
 [11 12 13]
 [21 22 23]
 [31 32 33]], shape=(4, 3), dtype=int32)
tf.Tensor(
[[ 4 11]
 [ 6 17]], shape=(2, 2), dtype=int32)
tf.Tensor(
[[13 63]
 [20 96]], shape=(2, 2), dtype=int32)

输入张量:
 [[1 3]
 [2 5]
 [7 5]] 
大小: (3, 2) 

[2 1]
[1 1 0]

原始数据:
 [[1 2 3]
 [4 5 6]] 
大小: (2, 3) 

21
[5 7 9]
[ 6 15]

原始数据的尺寸分别为:
 (4, 100, 100, 3) 
 (4, 100, 100, 3)
拼接后数据的尺寸为:
 (44, 100, 100, 3)

原始数据的尺寸分别为:
 (100, 100, 3) 
 (100, 100, 3)
拼接后数据的尺寸为:
 (2, 100, 100, 3)

split_sample_1原始数据的尺寸为:
 (10, 10, 10, 3)
当num_or_size_splits=5,分割后数据的尺寸为:
 (5, 2, 10, 10, 3)
当num_or_size_splits=[[3,5,2],分割后数据的尺寸为:
 (3, 10, 10, 3) (5, 10, 10, 3) (2, 10, 10, 3)

输入张量: [8 9 4 5 0 3 2 6 7 1]
升序排列后的张量: [0 1 2 3 4 5 6 7 8 9]
升序排列后,元素的索引: [4 9 6 5 2 3 7 8 0 1]
升序排列后的前5个数值: [9 8 7 6 5]
升序排列后的前5个数值的索引: [1 0 8 7 3]

TensorFlow2.0不用Session可以直接运算: tf.Tensor(
[[4. 6.]
 [4. 6.]], shape=(2, 2), dtype=float32)

TensorFlow2.0得益eager得到Tensor的numpy值thre1:
 tf.Tensor(0.52014256, shape=(), dtype=float32)
从而可以用条件判断计算出y:
 tf.Tensor(
[[0 2]
 [4 6]], shape=(2, 2), dtype=int32)

AutoGraph静态图b:无法查看数值 Tensor("b:0", shape=(), dtype=float32)

eager execution模式下做一层CNN卷积层运算的实际:
 13.16959130000032
graph模式下做一层CNN卷积层运算的实际:
 7.5449627000016335
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值