tf底层的算子定义在tensorflow-1.15.0\tensorflow\core\ops\ops.pbtxt
raw_ops调用必须采用a = tf.raw_ops.Add(x=x, y=y)的形式,x=不能不写,可以采用下面这种方式解决:
name和data关联可以采用**arges
inputs = dict(zip(input_names, input_data))
result_tf = tf_op(**inputs, **attr_seting)
example 1 简单加法
import tensorflow as tf
import numpy
x = 10
y = 20
a = tf.raw_ops.Add(x=x, y=y)
# Start training
with tf.Session() as sess:
# Run the initializer
print(sess.run(a))
注意要使用关键字参数,即形如Add(x=x, y=y),具体的关键字参数是什么,要去算子介绍页面查看,例如tensorflow::ops::Add介绍页。
example 2 调用反向梯度优化op
import tensorflow as tf
import numpy as np
shape = (1,8)
var = tf.Variable(tf.ones(shape=shape),name='var')
alpha = 0.5
delta = tf.Variable(tf.ones(shape=shape),name='delta')
output = tf.raw_ops.ApplyGradientDescent( var=var, alpha=alpha, delta=delta)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Run the initializer
print(sess.run(output))
example 3 调用ApplyMomentum 反向梯度momentum优化op
import tensorflow as tf
import numpy as np
shape = (1,8)
lr = 1.0
momentum = 0.9
var = tf.Variable(tf.ones(shape=shape),name='var')
grad = tf.Variable(tf.ones(shape=shape),name='delta')
accum = tf.Variable(tf.ones(shape=shape),name='delta')
output = tf.raw_ops.ApplyMomentum(
var=var,
accum=accum,
lr=lr,
grad=grad,
momentum=momentum)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Run the initializer
print(sess.run(output))
print(sess.run(var))
print(sess.run(accum))
另一种简单的方法就是在TensorFlow的python包中找到算子定义包并导入调用,例如:
from tensorflow.python.training.gen_training_ops import apply_proximal_adagrad
example4
import tensorflow as tf
import numpy as np
tf.reset_default_graph()
pld1 = tf.placeholder(dtype="float32", shape=[None, 32])
pld2 = tf.placeholder(dtype="float32", shape=[None, 64])
pld3 = tf.placeholder(dtype="float32", shape=[None, 32])
pld4 = tf.placeholder(dtype="float32", shape=[None, 128])
batch = 1
exp1 = tf.raw_ops.Exp(x=pld1)
exp2 = tf.raw_ops.Exp(x=pld2)
exp3 = tf.raw_ops.Exp(x=pld3)
exp4 = tf.raw_ops.Exp(x=pld4)
values = [
exp1,
exp2,
exp3,
exp4,
]
input1_np = np.random.randn(*[batch, 32])
input2_np = np.random.randn(*[batch, 64])
input3_np = np.random.randn(*[batch, 32])
input4_np = np.random.randn(*[batch, 128])
feed_dict = dict(zip([pld1, pld2, pld3, pld4, ], [input1_np, input2_np, input3_np, input4_np]))
concat1 = tf.raw_ops.ConcatV2(values=values, axis=-1)
size_splits = tf.constant(
[32,64,32,128],
dtype="int32",
)
split1 = tf.raw_ops.SplitV(
value=concat1, size_splits=size_splits, axis=-1, num_split=4)
with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
print(sess.run(split1, feed_dict=feed_dict))
# dump graph as pb
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
dump_graph_name = "dump_graph.pb"
with tf.gfile.GFile(dump_graph_name, "wb") as f:
f.write(graph_def.SerializeToString())