import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size,out_size,activation=None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs,Weights) + biases
if activation is None:
output = Wx_plus_b
else:
output = activation(Wx_plus_b)
return output
x_data = np.linspace(-1,1,300)[:,np.newaxis].astype(np.float32)
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
xs = tf.placeholder(tf.float32,[None,1]) ##None的意思是xs可以是任意行的数据
ys = tf.placeholder(tf.float32,[None],1)
L1 = add_layer(xs,in_size=1,out_size=10,activation=tf.nn.relu)
prediction = add_layer(L1,in_size=10,out_size=1,activation=None)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1])) #reduction_indices=[1]表示按行求和,在本例中x只有一维,不求和也一样
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for i in range(1000):
session.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i % 50 == 0:
print(i,session.run(loss,feed_dict={xs:x_data,ys:y_data}))