import tensorflow as tf
import numpy as np
###定义添加神经层函数add_layer
def add_layer(inputs, in_size, out_size, activation_function = None):#输入值,输入量,输出量,激活函数默认:无
Weights = tf.Variable(tf.random_normal([in_size, out_size])) #随机初始化权重
biases = tf.add(tf.Variable(tf.zeros([1,out_size])), 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None: #默认无激活函数
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b) #调用激活函数,引入非线性因素
return outputs
###构造样本
x_data = np.linspace(-1, 1, 300)[:,np.newaxis] #输入值x: 1维
noise = np.random.normal(0,0.05,x_data.shape) #噪声,正态分布均值0,标准差0.05
y_data = np.square(x_data) - 0.5 + noise #输出值
xs = tf.placeholder(tf.float32, [None, 1]) #None: [None, 1]:输入维数为1
ys = tf.placeholder(tf.float32, [None, 1])
inputLayer = add_layer(xs, 1, 10, activation_function = tf.nn.relu) #输入层:输入量1,输出量10(隐含层),激活函数:relu
outputLayer = add_layer(inputLayer, 10, 1, activation_function = None) #输出层:输入量:10(隐含层),输出量1
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-outputLayer),1)) #reduce_sum:对所有元素求和,reduction_indices[1]按行求和,最后输出行向量
# reduction_indices[0]按列求和,输出行向量
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) #设定学习效率 <1
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data, ys:y_data}) #placeholder传参
if (i % 50 == 0):
print(sess.run(loss, feed_dict={xs:x_data, ys:y_data})) #每次调用run操作都需要placeholder传参
TensorFlow练习03-多层神经网络构造
最新推荐文章于 2023-03-01 14:20:08 发布