一 tensorflow 实例
发现直接实现神经网络,理解起来有些困难,经过学习整理了几个 tensorflow 使用基础实例。
import tensorflow as tf
# 第一个例子
# tf.compat.v1.disable_eager_execution()
# message = tf.constant('hello world')
# with tf.compat.v1.Session() as sess:
# print(sess.run(message).decode())
# 第二个例子
# tf.compat.v1.disable_eager_execution()
# v1 = tf.constant([1, 2, 3, 4])
# v2 = tf.constant([2, 1, 5, 3])
# v_add = tf.add(v1, v2)
# with tf.compat.v1.Session() as sess:
# print(sess.run(v_add))
# 或者用以下格式替换 with 形式,每个会话都需要使用 close() 来明确关闭,而 with 格式可以在运行结束时隐式关闭会话。
# sess = tf.compat.v1.Session()
# print(sess.run(v_add))
# sess.close()
# 第三个例子,效果等同于第二个例子
# 使用 eval() 直接调用运行张量对象而不用显式调用会话
# sess = tf.compat.v1.InteractiveSession()
# v1 = tf.constant([1, 2, 3, 4])
# v2 = tf.constant([2, 1, 5, 3])
# v_add = tf.add(v1, v2)
# print(v_add.eval())
# sess.close()
# 第四个例子,占位符的使用
# tf.compat.v1.disable_eager_execution()
# x = tf.compat.v1.placeholder("float")
# y = 2 * x
# data = tf.compat.v1.random_uniform([4, 5], 10)
# with tf.compat.v1.Session() as sess:
# x_data = sess.run(data)
# print(sess.run(y, feed_dict={x: x_data}))
# 第五个例子, 加减乘除算法示例
# tf.compat.v1.disable_eager_execution()
# sess = tf.compat.v1.InteractiveSession()
# # ty.eye 构造一个单位矩阵
# I_matrix = tf.eye(5)
# print(I_matrix.eval())
# x = tf.compat.v1.Variable(tf.eye(10))
# x.initializer.run()
# print(x.eval())
# # tf.compat.v1.random_normal 从正态分布输出随机值
# A = tf.compat.v1.Variable(tf.compat.v1.random_normal([5, 10]))
# A.initializer.run()
# print(A.eval())
# product = tf.matmul(A, x)
# print(product.eval())
# # random_uniform 生成均匀分布,[0, 2] 为 0-1 中间的均匀分布,不包含2,dtype=tf.int32 意为是整数
# b = tf.compat.v1.Variable(tf.compat.v1.random_uniform([5, 10], 0, 2, dtype=tf.int32))
# b.initializer.run()
# print(b.eval())
# b_new = tf.cast(b, dtype=tf.float32)
# print("b =======\n", b_new.eval())
# t_sum = tf.add(product, b_new)
# t_sub = product - b_new
# print("A*x _b\n", t_sum.eval())
# print("A*x - b\n", t_sub.eval())
# a = tf.compat.v1.Variable(tf.compat.v1.random_normal([4, 5], stddev=2))
# b = tf.compat.v1.Variable(tf.compat.v1.random_normal([4, 5], stddev=2))
# A = a * b
# B = tf.scalar_mul(2, A)
# C = tf.compat.v1.div(a, b)
# D = tf.compat.v1.mod(a, b)
# init_op = tf.compat.v1.global_variables_initializer()
# with tf.compat.v1.Session() as sess2:
# sess2.run(init_op)
# writer = tf.compat.v1.summary.FileWriter('graphs', sess2.graph)
# a, b, A_R, B_R, C_R, D_R = sess2.run([a, b, A, B, C, D])
# print("a\n", a, "\nb\n", b, "\na*b\n", A_R, "\n2*a*b\n", B_R, "\na/b\n", C_R, "\na%b\n", D_R)
# writer.close()
# 第六个例子 tensorboard
# tf.compat.v1.disable_eager_execution()
# with tf.name_scope("input"):
# input1 = tf.constant(3.0, name="A")
# input2 = tf.constant(4.0, name="B")
# input3 = tf.constant(5.0, name="C")
# with tf.name_scope("op"):
# add = tf.add(input2, input3)
# mul = tf.multiply(input1, add)
# with tf.compat.v1.Session() as sess:
# writer = tf.compat.v1.summary.FileWriter('logs/', sess.graph)
# result = sess.run([mul, add])
# print(result)
# 在 cmd 控制台执行命令:tensorboard --logdir=C:\xxx\logs
# 然后在 浏览器中输入 http://localhost:6006 访问,即可看见图
逐一打开注释可以看到每个例子的运行结果。
二 总结
列出了一些基础用法,还有api 以及函数用法可以参考链接:
- https://www.tensorflow.org/api_docs/python/tf/all_symbols
- https://www.w3cschool.cn/tensorflow_python/tensorflow_python-led42j40.html