TensorFlow第六步: 继续挖坑 用tf重写BP并增加SGD

用 tf重写BP,并增加SGD:

# coding=utf-8
import os  
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2' # 只显示 warning 和 Error 
 
import numpy as np
import tensorflow as tf
import random
#import matplotlib.pyplot as plt
 
logs_path=r'c:/temp/log_mnist_softmax'
learning_rate=5.0 #当>0.05时误差很大
training_epochs=100
batch_size=3
 
trainData_in=np.array([[1.0,1.0,0.0,0.0],\
              [1.0,0.0,1.0,0.0],\
              [1.0,0.0,0.0,1.0],\
              [1.0,0.0,0.0,0.0],\
              [0.0,1.0,0.0,0.0],\
              [0.0,0.0,0.0,1.0]])
trainData_out=np.array([[0.0,1.0],\
               [0.0,1.0],\
               [0.0,1.0],\
               [1.0,0.0],\
               [1.0,0.0],\
               [1.0,0.0]])

testData_in=np.array([[0.0,0.0,0.6,0.8],\
                      [0.0,0.0,0.0,0.0],
                      [0.0,0.0,0.0,1.0],\
                      [0.0,1.0,1.0,0.0],\
                      [0.0,1.0,0.0,1.0],\
                      [0.0,0.0,1.0,1.0]])
testData_out=np.array([[0.0,1.0],\
                       [1.0,0.0],\
                       [1.0,0.0],\
                       [0.0,1.0],\
                       [0.0,1.0],\
                       [0.0,1.0]])
 
print(np.shape(trainData_in))
print(np.shape(trainData_out))

x_input=tf.placeholder(tf.float32, [None,4], name='x_input')
y_desired=tf.placeholder(tf.float32,[None,2],name='y_desired')
w1=tf.Variable(tf.truncated_normal([4,3],stddev=0.1),name='w1')
b1=tf.Variable(tf.zeros([3]),name='b1')
z1=tf.matmul(x_input,w1)+b1
y1=tf.nn.sigmoid(z1)

w=tf.Variable(tf.truncated_normal([3,2],stddev=0.1),name='w')
b=tf.Variable(tf.zeros([2]),name='b')
z=tf.matmul(y1,w)+b
y_output=tf.nn.softmax(z,name='y_output')
lossFun_crossEntropy=-tf.reduce_mean(y_desired*tf.log(y_output)) #交叉熵均值

#BP:
delta=tf.add(y_output,-y_desired)  #BP1
nabla_b=tf.reduce_sum(delta,axis=0,name='nabla_b')#在列方向上求和delta #BP3
nabla_w=tf.matmul(y1,delta,transpose_a=True,name='nabla_w') #BP4
dSigmod_z1=tf.nn.sigmoid(z1)*(1-tf.nn.sigmoid(z1))
delta=tf.matmul(delta,w,transpose_b=True)*dSigmod_z1 #BP2!!!
nabla_b1=tf.reduce_sum(delta,axis=0,name='nabla_b1')#在列方向上求和delta #BP3
nabla_w1=tf.matmul(x_input,delta,transpose_a=True,name='nabla_w1')  #BP4


feed_dict_trainData={x_input:trainData_in,y_desired:trainData_out}
feed_dict_testData={x_input:testData_in,y_desired:testData_out}

correct_prediction=tf.equal(tf.argmax(y_output,1),\
                             tf.argmax(y_desired,1)) #1:按行索引,每行得一索引值
accuracy=tf.reduce_mean(tf.cast(correct_prediction,\
                                tf.float32))#将逻辑型变成数字型,再求均值
###
#train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(lossFun_crossEntropy)
###
tf.summary.scalar('cost',lossFun_crossEntropy)
tf.summary.scalar('accuracy',accuracy)
summary_op=tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    logs_writer=tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
    for epoch in range(training_epochs):
#        _,summary=sess.run([train_step,summary_op],feed_dict=feed_dict_trainData)
        ######
        #SGD:
        trainData=list(zip(trainData_in,trainData_out))
        random.shuffle(trainData)
        trainData_in,trainData_out=zip(*trainData)
        batch_count=int(len(trainData_in)/batch_size)
        for i in range(batch_count):
            batch_x=trainData_in[batch_size*i:batch_size*(i+1)]
            batch_y=trainData_out[batch_size*i:batch_size*(i+1)]
            feed_dict_batch={x_input:batch_x,y_desired:batch_y}
            
            #update:
            w1_temp,b1_temp,w_temp,b_temp,\
            nabla_w1_temp,nabla_b1_temp,nabla_w_temp,nabla_b_temp=\
            sess.run([w1,b1,w,b,nabla_w1,nabla_b1,nabla_w,nabla_b],\
                 feed_dict=feed_dict_batch)
            m,n=np.shape(batch_y)
            update_w1=tf.assign(w1,w1_temp-learning_rate/m/n*nabla_w1_temp)
            update_b1=tf.assign(b1,b1_temp-learning_rate/m/n*nabla_b1_temp)
            update_w=tf.assign(w,w_temp-learning_rate/m/n*nabla_w_temp)
            update_b=tf.assign(b,b_temp-learning_rate/m/n*nabla_b_temp)
            sess.run([update_w1,update_b1,update_w,update_b])

            summary=sess.run(summary_op,feed_dict=feed_dict_trainData)
            logs_writer.add_summary(summary,epoch)
            print('Epoch',epoch)
            print('Accuracy_trainData:',accuracy.eval\
                  (feed_dict=feed_dict_trainData))
            print('Accuracy_testData:',accuracy.eval\
                  (feed_dict=feed_dict_testData))
            print('Done')
            
    try_input=testData_in[0] 
    try_desired=testData_out[0]  
    print(try_desired)
    print(y_output.eval(feed_dict={x_input:[try_input]}))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值