import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size ,out_size,activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
Wx_plus_b = tf.matmul(inputs,Weights)+biases
if activation_function is None:
#线性
outputs = Wx_plus_b
else:
#非线性
outputs = activation_function(Wx_plus_b)
return outputs
#-1到1,取300个单位
x_data = np.linspace(-1,1,2)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data)-0.5+noise
#x_data 属性1
xs=tf.placeholder(tf.float32)
ys=tf.placeholder(tf.float32)
#hide
#l1 = add_layer(x_data,1,10,activation_function =tf.nn.relu)
l1 = add_layer(xs,1,10,activation_function =tf.nn.relu)
# out layer
prediction = add_layer(l1,10,1,activation_function =None)
#yu ce
#loss =tf.reduce_mean(tf.reduce_sum(tf.square(y_data -predition ),reduction_indices=[1]))
#tf.reduce_sum :降维 axis=0 隔维列的方向相加 两个m×n矩阵A和B的和,标记为A+B,一样是个m×n矩阵,其内的各元素为其相对应元素相加后的值
#axis=1 同维列的方向相加 axis=2 行的方向相加 缺省就是全部累加
#Keepdims 是否保留[]
#tf.reduce_mean Computes the mean of elements across dimensions of a tensor.
#x = tf.constant([[1., 1.], [2., 2.]])
#tf.reduce_mean(x) # 1.5
#tf.reduce_mean(x, 0) # [1.5, 1.5]
#tf.reduce_mean(x, 1) # [1., 2.]
loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys -prediction ),reduction_indices=[1]))
train_step =tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#必须初始化
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(10):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
print("x_data="+str(x_data) + "y_data="+str(y_data))
if(i%5 ==0):
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
https://www.tensorflow.org/api_docs/python/tf/reduce_mean