1.2简单的tensorflow神经网络
前言
使用的版本tensorflow1.0,2.0部分做了改动要改改语法,用一个200X1,1X10,10X1的神经网络预测一个二次函数,抛物线
提示:以下是本篇文章正文内容,下面案例可供参考
一、以一个简单5X1神经网络为例
5X1=>1X3=>3X1=>1
二、代码和结果
代码如下(示例):
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
print(x_data)
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data)+noise
#plt.figure()
#plt.scatter(x_data,y_data)
#plt.show()
x = tf.placeholder(tf.float32, [None, 1])#占位符
y = tf.placeholder(tf.float32, [None, 1])
#hide 10
w_l1 = tf.Variable(tf.random.normal([1,10]))
b_l1 = tf.Variable(tf.random.normal([1,10]))
w_b_l1 = tf.matmul(x,w_l1)+b_l1
L1 = tf.nn.tanh(w_b_l1)
#out
Weights_L2 = tf.Variable(tf.random.normal([10, 1]))
Biases_L2 = tf.Variable(tf.random.normal([1, 1]))
Wx_plus_b_L2 = tf.matmul(L1, Weights_L2) + Biases_L2
pred = tf.nn.tanh(Wx_plus_b_L2)
# 损失函数
loss = tf.reduce_mean(tf.square(y - pred))
# 训练
train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
sess.run(train,feed_dict={x:x_data,y:y_data})#feed投喂数据
print("第{0}次,loss = {1}".format(i, sess.run(loss,feed_dict={x: x_data, y: y_data})))
pred_vaule = sess.run(pred, feed_dict={x: x_data})
plt.figure()
plt.scatter(x_data, y_data)
plt.plot(x_data, pred_vaule, 'r-', lw=5)
plt.show()