03-1 图,张量及会话
1. 定义两个不同的图
import tensorflow as tf
g1 = tf.Graph()
with g1.as_default():
v = tf.get_variable("v", [1], initializer = tf.zeros_initializer()) # 设置初始值为0
g2 = tf.Graph()
with g2.as_default():
v = tf.get_variable("v", [1], initializer = tf.ones_initializer()) # 设置初始值为1
with tf.Session(graph = g1) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope("", reuse=True):
print(sess.run(tf.get_variable("v")))
with tf.Session(graph = g2) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope("", reuse=True):
print(sess.run(tf.get_variable("v")))
[0.]
[1.]
2. 张量的概念
import tensorflow as tf
a = tf.constant([1.0, 2.0], name="a")
b = tf.constant([2.0, 3.0], name="b")
result = a + b
print(result)
sess = tf.InteractiveSession ()
print(result.eval())
sess.close()
Tensor(“add:0”, shape=(2,), dtype=float32)
[3. 5.]
3. 会话的使用
3.1 创建和关闭会话
# 创建一个会话。
sess = tf.Session()
# 使用会话得到之前计算的结果。
print(sess.run(result))
# 关闭会话使得本次运行中使用到的资源可以被释放。
sess.close()
[3. 5.]
3.2 使用with statement 来创建会话
with tf.Session() as sess:
print(sess.run(result))
[3. 5.]
3.3 指定默认会话
sess = tf.Session()
with sess.as_default():
print(result.eval())
[3. 5.]
sess = tf.Session()
# 下面的两个命令有相同的功能。
print(sess.run(result))
print(result.eval(session=sess))
[3. 5.]
[3. 5.]
4. 使用tf.InteractiveSession构建会话
sess = tf.InteractiveSession ()
print(result.eval())
sess.close()
[3. 5.]
5. 通过ConfigProto配置会话
config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
sess1 = tf.InteractiveSession(config=config)
sess2 = tf.Session(config=config)
03-2. 三层简单神经网络的前向传播算法
#1. 三层简单神经网络
import tensorflow as tf
#1.1 定义变量
w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
x = tf.constant([[0.7, 0.9]])
#1.2 定义前向传播的神经网络
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
#1.3 调用会话输出结果
sess = tf.Session()
sess.run(w1.initializer)
sess.run(w2.initializer)
print(sess.run(y))
sess.close()
[[3.957578]]
#2. 使用placeholder
x = tf.placeholder(tf.float32, shape=(1, 2), name="input")
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)
print(sess.run(y, feed_dict={x: [[0.7,0.9]]}))
[[3.957578]]
#3. 增加多个输入
x = tf.placeholder(tf.float32, shape=(3, 2), name="input")
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
sess = tf.Session()
#使用tf.global_variables_initializer()来初始化所有的变量
init_op = tf.global_variables_initializer()
sess.run(init_op)
print(sess.run(y, feed_dict={x: [[0.7,0.9],[0.1,0.4],[0.5,0.8]]}))
[[3.957578 ]
[1.1537654]
[3.1674924]]
03-3 完整神经网络样例程序
import tensorflow as tf
from numpy.random import RandomState
#1. 定义神经网络的参数,输入和输出节点。
batch_size = 8
w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
y_= tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
#2. 定义前向传播过程,损失函数及反向传播算法。
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
y = tf.sigmoid(y)
cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))
+ (1 - y_) * tf.log(tf.clip_by_value(1 - y, 1e-10, 1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
#3. 生成模拟数据集。
rdm = RandomState(1)
X = rdm.rand(128,2)
#x1+x2 < 1为正样本1,否则为负样本0
Y = [[int(x1+x2 < 1)] for (x1, x2) in X]
#4. 创建一个会话来运行TensorFlow程序。
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 输出目前(未经训练)的参数取值。
print(sess.run(w1))
print(sess.run(w2))
print("\n")
# 训练模型。
STEPS = 5000
for i in range(STEPS):
start = (i*batch_size) % 128
end = (i*batch_size) % 128 + batch_size
sess.run([train_step, y, y_], feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})
print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))
# 输出训练后的参数取值。
print("\n")
print(sess.run(w1))
print(sess.run(w2))
输出结果:
[[-0.8113182 1.4845988 0.06532937]
[-2.4427042 0.0992484 0.5912243 ]]
[[-0.8113182 ]
[ 1.4845988 ]
[ 0.06532937]]
After 0 training step(s), cross entropy on all data is 1.89805
After 1000 training step(s), cross entropy on all data is 0.655075
After 2000 training step(s), cross entropy on all data is 0.626172
After 3000 training step(s), cross entropy on all data is 0.615096
After 4000 training step(s), cross entropy on all data is 0.610309
[[ 0.02476969 0.5694868 1.6921943 ]
[-2.1977348 -0.23668927 1.1143894 ]]
[[-0.45544696]
[ 0.49110925]
[-0.98110336]]