TensorFlow实现由一个隐藏层的BP神经网络,TensorFlow中的梯度函数只用在最后一层输入损失函数就可以了,非常的方便。
import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size,out_size,activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function == None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
x_data=np.linspace(-1,1,300)[:,np.newaxis] #生成输入X值
noise=np.random.normal(0,0.05,x_data.shape)#生成误差值
y_data=np.square(x_data)-0.5+noise#生成输入Y值
xs=tf.placeholder(tf.float32,[None,1])#生成X占位符
ys=tf.placeholder(tf.float32,[None,1])#生成Y占位符
#定义隐含层,隐含层有10个神经元
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
#定义输出层,假设没有任何激活函数
prediction=add_layer(l1,10,1,activation_function=None)
loss=tf.reduce_mean(