import tensorflow as tf
import numpy as np
x_data = np.linspace(-1,1,300)[:, np.newaxis]
#np.linspace 返回等差数列
#np.newaxis == None
noise = np.random.normal(0, 0.05, x_data.shape)
#加入噪点
y_data = np.square(x_data) - 0.5 +noise
def add_layer(inputs, in_size, out_size, activation_fuction=None):
#构建权重:in_size*out_size大小的矩阵
weights = tf.Variable(tf.random_normal([in_size, out_size]))
#构建偏置:1*out_size的矩阵
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
#矩阵相乘
Wx_plus_b = tf.matmul(inputs, weights) + biases
if activation_fuction is None:
outputs = Wx_plus_b
else:
outputs = activation_fuction(Wx_plus_b)
return outputs
#构建隐藏层,假设隐藏层有20个神经元
h1 = add_layer(xs, 1, 20, activation_fuction=tf.nn.relu)
#构建输出层,建设输出层和输入层一昂,有1个神经元
prediction = add_layer(h1, 20, 1, activation_fuction=None)
#构建损失函数
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#训练模型
init = tf.global_variables_initializer() #初始化所有变量
sess = tf.Session()
sess.run(init)
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0: # m每50次打印一次loss
print (sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
输出结果:
0.47076234
0.0053485185
0.0035443406
0.0032393
0.003112197
0.0030527024
0.00301335
0.0029667937
0.0029171768
0.0028694505
0.002820269
0.0027770796
0.0027455268
0.0027258783
0.002712878
0.002696983
0.002684594
0.0026757205
0.0026678413
0.0026608207