import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data = np.linspace(-1,1,300)[:,np.newaxis]
print(x_data.shape)
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) -0.5 + noise
def add_layer(input,in_size,out_size,activation_function=None):
with tf.name_scope('layer'):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size,out_size]),name='w')
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1,name='b')
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(input,Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
with tf.name_scope("inputs"):
xs = tf.placeholder(tf.float32,[None,1],name = "x_input")
ys = tf.placeholder(tf.float32,[None,1],name = "y_input")
l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction = add_layer(l1,10,1,activation_function=None)
with tf.name_scope("loss"):
loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
writer = tf.summary.FileWriter("logs/",sess.graph)
sess.run(init)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()#可以连续plot
plt.show()
for i in range(1000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i% 50 == 0 :
# print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_valu = sess.run(prediction,feed_dict={xs:x_data,ys:y_data})
lines = plt.plot(x_data,prediction_valu,'r-',lw = 5)
plt.show()
plt.pause(0.1)
运行以上代码会生成文件在默认目录下,打开文件的方式:
cmd进行文件目录
当上面一个代码运行出来的网址在浏览器中打不开时,换成下面一个,将网址输入浏览器得到可视化图