在学习Tensorflow过程中,总是无法动态显示红色拟合曲线。测试代码如下(莫烦大佬):
# coding:UTF-8
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 忽略不必要的警告
# 添加隐藏层
def add_layer(inputs, in_size, out_size, activation_function=None):
loc_w = tf.Variable(tf.random_normal([in_size, out_size]))
loc_b = tf.Variable(tf.zeros([1, out_size]) + 0.1) # 官方推荐biases初始值不为0
loc_y = tf.matmul(inputs, loc_w) + loc_b
if activation_function is None:
loc_outputs = loc_y
else:
loc_outputs = activation_function(loc_y)
return loc_outputs
# 输入数据
x_data = np.linspace(-1, 1, 250)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) + noise
# 创建结构 start
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
l_1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) # 隐藏层,使用激励函数relu
outputs = add_layer(l_1, 10, 1, activation_function=None) # 输出层
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - outputs), reduction_indices=[1])) # 损失函数
# loss = tf.reduce_mean(tf.square(outputs - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.1) # 下降方法 梯度下降 参数表示训练效率,通常小于1,实践发现参数不能太大,参数太大会导致无法得到正确的训练结果
train = optimizer.minimize(loss) # 优化目标,使loss最小化
init = tf.global_variables_initializer() # 初始所有变量,这个必须要有
# 创建结构 end
# 开始训练
sess = tf.Session()
sess.run(init)
# 数据可视化,输出原始数据
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # 参数为输出图框的编号
ax.scatter(x_data, y_data)
plt.ion()
for i in range(1000):
sess.run(train, feed_dict={xs: x_data, ys: y_data})
if i % 100 == 0:
# 数据可视化
new_output = sess.run(outputs, feed_dict={xs: x_data})
try:
ax.lines.remove(lines[0])
except Exception:
pass
lines = ax.plot(x_data, new_output, 'r-', lw=5) # r-表示红色,lw表示线的宽度
print('i =', i, sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
plt.pause(0.3)
plt.ioff()
plt.show()
原以为是版本问题或是代码问题,最终尝试了多种方法,发现PYCharm显示动态图需单独弹出视窗,具体设置方法如下:
File->Settings->Tools->Python scientific->取消Show plots in tool window前的√
完美解决!