本次神经网络的搭建需要三层,第一层为输入层,第二层为隐藏层,第三层为输出层。以下贴出代码:
import tensorflow as tf
import numpy as np
#定义一个添加神经网络层数的函数,函数参数包括输入的变量值,输入的神经节点的个数
#输出的神经节点的个数和激活函数
def add_layer(inputs, in_size, out_size, activation_function=None):
#有TensorFlow的正态随机数发生器生成一个矩阵的变量
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
#tf.zeros生成了一个一行out_size列的里面元素都为0的行向量
biases = tf.Variable(tf.zeros([1, out_size])+0.1)
#Weights和inputs相乘再加上偏倚biases得到Wx_plus_b
Wx_plus_b = tf.matmul(inputs, Weights) + biases
#如果激励函数为零则直接输出Wx_plus_b
#若激励函数不为零则将Wx_plus_b传入activation_function,
#最后返回outputs
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
#输入层;x_data由np.linspace产生300个-1到1等差数列的值
#利用正态分布随机产生一个干扰值noise
#利用x_data的平方减去0.5再加上干扰值noise生成y_data
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
#定义placeholder
xs = tf.placeholder(tf.float32,[None, 1])
ys = tf.placeholder(tf.float32,[None, 1])
#增加隐藏层l1和输出层prediction
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, activation_function=None)
#求出预测值和观察值的损失函数loss
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
#设定训练效率为0.1并最小化损失函数loss
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#训练1000次,每50步输出一次loss
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))