import tensorflow.compat.v1 as tfc
tfc.disable_v2_behavior()
import tensorflow as tf
import numpy as np
# tensorflow 2.3.0
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random.normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases # y = W*x + b
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
x_data = np.linspace(-1,1,300)[:,np.newaxis] # 300个例子
noise = np.random.normal(0, 0.05, x_data.shape) # 加噪声(均值,方差,格式)
y_data = np.square(x_data) - 0.5 + noise # x_data的平方减0.5
# 搭建神经网络
# 输入层输出层有多少个属性就有多少神经元(此例各1个)
# 隐藏层假设有10个神经元
xs = tfc.placeholder(tf.float32, [None, 1])
ys = tfc.placeholder(tf.float32, [None, 1])
# 隐藏层
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# 输出层
prediction = add_layer(l1, 10, 1, activation_function=None)
# 误差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), axis=[1]))
# 训练:从误差学习,对误差更正
train_step = tfc.train.GradientDescentOptimizer(0.1).minimize(loss) # 0.1学习率
# 初始(重要)
init = tfc.global_variables_initializer()
sess = tfc.Session()
sess.run(init)
# 学习1000步
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
0.6188716
0.009656661
0.00711929
0.006185833
0.0054736366
0.0049568852
0.004566364
0.0042579058
0.0040368345
0.0038626604
0.0037317642
0.0036194357
0.0035345475
0.0034664865
0.0034077906
0.0033406622
0.003278716
0.0032234853
0.0031807795
0.0031492526