tensorflow2.0学习笔记:tf.function 和 auto-graph

tf.function介绍:

1.将python函数编译成图结构
2.易于将模型导出成GraphDef+checkpoint或者SaveModel
3.使得eager execution可以默认打开
4.1.0的代码可以通过tf.function封装继续再2.0中使用:替代session
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf


from tensorflow import keras
print(tf.__version__)
2.0.0
#tf.function and auto-graph

def scaled_elu(z,scale=1.0,alpha=1.0):
    #z>=0:scale * z, z<0:scale*alpha*tf.nn.elu(z)
    is_positive = tf.greater_equal(z,0.0)
    return scale * tf.where(is_positive,z,alpha*tf.nn.elu(z))

print(scaled_elu(tf.constant(-3.)))
print(scaled_elu(tf.constant([-3.,-2.5])))

scaled_elu_tf = tf.function(scaled_elu)
print(scaled_elu_tf(tf.constant(-3.)))
print(scaled_elu_tf(tf.constant([-3.,-2.5])))
print(scaled_elu_tf.python_function is scaled_elu) #.python_function 转回python 函数
tf.Tensor(-0.95021296, shape=(), dtype=float32)
tf.Tensor([-0.95021296 -0.917915  ], shape=(2,), dtype=float32)
tf.Tensor(-0.95021296, shape=(), dtype=float32)
tf.Tensor([-0.95021296 -0.917915  ], shape=(2,), dtype=float32)
True
# 转成图后运算速度会变快
%timeit scaled_elu(tf.random.normal((1000,1000)))
%timeit scaled_elu_tf(tf.random.normal((1000,1000)))
19.8 ms ± 1.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
15.7 ms ± 255 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
# 1 + 1/2 + 1/2^2 + ...
@tf.function  #加标注,转为图
def converge_to_2(n_iters):
    total = tf.constant(0.)
    increment = tf.constant(1.)
    for _ in range(n_iters):
        total += increment
        increment /= 2.0
    return total

print(converge_to_2(20))

tf.Tensor(1.9999981, shape=(), dtype=float32)
def display_tf_code(func):
    code = tf.autograph.to_code(func) #转为图结构的代码
    from IPython.display import display,Markdown
    display(Markdown('```python\n{}\n```'.format(code)))
display_tf_code(scaled_elu) # 展示转为图结构的代码
def tf__scaled_elu(z, scale=None, alpha=None):
  do_return = False
  retval_ = ag__.UndefinedReturnValue()
  with ag__.FunctionScope('scaled_elu', 'scaled_elu_scope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as scaled_elu_scope:
    is_positive = ag__.converted_call(tf.greater_equal, scaled_elu_scope.callopts, (z, 0.0), None, scaled_elu_scope)
    do_return = True
    retval_ = scaled_elu_scope.mark_return_value(scale * ag__.converted_call(tf.where, scaled_elu_scope.callopts, (is_positive, z, alpha * ag__.converted_call(tf.nn.elu, scaled_elu_scope.callopts, (z,), None, scaled_elu_scope)), None, scaled_elu_scope))
  do_return,
  return ag__.retval(retval_)

# def converge_to_2(n_iters):
#     total = tf.constant(0.)
#     increment = tf.constant(1.)
#     for _ in range(n_iters):
#         total += increment
#         increment /= 2.0
#     return total
# display_tf_code(converge_to_2)
#变量要定义到@tf.function外面,先初始化
var = tf.Variable(0.)

@tf.function
def add_21():
    return var.assign_add(21)  # 不能直接用 +=

print(add_21())
tf.Tensor(21.0, shape=(), dtype=float32)
# 给数据类型做限定,加类型限制,让输入的类型变得明确
@tf.function(input_signature = [tf.TensorSpec([None],tf.int32,name='x')])
def cube(z):
    return tf.pow(z,3)
try:
    print(cube(tf.constant([1.,2.,3.])))
except ValueError as ex:
    print(ex)
print(cube(tf.constant([1,2,3])))
Python inputs incompatible with input_signature:
  inputs: (
    tf.Tensor([1. 2. 3.], shape=(3,), dtype=float32))
  input_signature: (
    TensorSpec(shape=(None,), dtype=tf.int32, name='x'))
tf.Tensor([ 1  8 27], shape=(3,), dtype=int32)
# @tf.function : python function -> graph
# get_concrete_function -> add input signature -> SavedModel

cube_func_int32 = cube.get_concrete_function(tf.TensorSpec([None],tf.int32)) #获得可保存的图结构
print(cube_func_int32)
<tensorflow.python.eager.function.ConcreteFunction object at 0x0000027BC5041748>
print(cube_func_int32 is cube.get_concrete_function(
        tf.TensorSpec([5],tf.int32)))

print(cube_func_int32 is cube.get_concrete_function(
        tf.constant([1,2,3])))
True
True
cube_func_int32.graph
<tensorflow.python.framework.func_graph.FuncGraph at 0x27bc5309108>
cube_func_int32.graph.get_operations()
[<tf.Operation 'x' type=Placeholder>,
 <tf.Operation 'Pow/y' type=Const>,
 <tf.Operation 'Pow' type=Pow>,
 <tf.Operation 'Identity' type=Identity>]
pow_op = cube_func_int32.graph.get_operations()[2]
print(pow_op)
name: "Pow"
op: "Pow"
input: "x"
input: "Pow/y"
attr {
  key: "T"
  value {
    type: DT_INT32
  }
}
print(list(pow_op.inputs))
print(list(pow_op.outputs))
[<tf.Tensor 'x:0' shape=(None,) dtype=int32>, <tf.Tensor 'Pow/y:0' shape=() dtype=int32>]
[<tf.Tensor 'Pow:0' shape=(None,) dtype=int32>]
cube_func_int32.graph.get_operation_by_name('x') # Placeholder放输入
<tf.Operation 'x' type=Placeholder>
cube_func_int32.graph.get_tensor_by_name('x:0') # x:0 一般会加个0,习惯写法
<tf.Tensor 'x:0' shape=(None,) dtype=int32>
# cube_func_int32.graph.as_graph_def() # 显示完整的图
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值