前向传播代码实现
#前向传播算法占位符
#定义输入和参数
import tensorflow as tf
x=tf.placeholder(tf.float32,shape=(1,2))
w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
a=tf.matmul(x,w1)
y=tf.matmul(a,w2)
print(a)
with tf.Session() as sess:
init_ops=tf.global_variables_initializer()
sess.run(init_ops)
print(sess.run(y,feed_dict={x:[[0.7,0.5]]}))
反向传播代码实现
#反向传播算法
import tensorflow as tf
import numpy as np
BATCH_SIZE=8
seed=23455
rng=np.random.RandomState(seed)
X=rng.rand(32,2)
Y=[[int(x0+x1<1)] for (x0,x1) in X]
#print(X)
#print(Y)
x=tf.placeholder(tf.float32,shape=(None,2))
y_=tf.placeholder(tf.float32,shape=(None,1))
w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
a=tf.matmul(x,w1)
y=tf.matmul(a,w2)
#定义损失函数和反向传播方法
loss=tf.reduce_mean(tf.square(y-y_))
#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(loss)
#train_step=tf.train.MomentumOptimizer(0.001,0.9).minimize(loss)
train_step=tf.train.AdamOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
init_op=tf.global_variables_initializer()
sess.run(init_op)
print("没有训练之前的参数")
print(sess.run(w1))
print(sess.run(w2))
STEPS=3000
for i in range(STEPS):
start=(i*BATCH_SIZE)%32
end=start+BATCH_SIZE
sess.run(train_step,feed_dict={x:X[start:end],y_:Y[start:end]})
if i%500==0:
total_loss=sess.run(loss,feed_dict={x:X,y_:Y})
print("% loss:%g"%(i,total_loss))
print("训练的参数")
print(sess.run(w1))
print(sess.run(w2))