tensorflow: SimpleNeuralNetwork简单神经网络实现手写数字案例


#%%网络结构
def multilayer_perceptron(_X,_W,_b):
    layer_1=tf.nn.sigmoid(tf.add(tf.matmul(_X,_W['w1']),_b['b1']))#每次wx+b之后经过sigmoid函数激活
    layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,_W['w2']),_b['b2']))
    return tf.add(tf.matmul(layer_2,_W['w_out']),_b['b_out'])


网络使用sigmoid作为激活函数,网络深度2层,相当于两个全连接层加上sigmoid函数激活

#%%
'network topology'
n_input=784 #输入节点数
n_hidden_1=256 #第1个隐层神经元
n_hidden_2=128  #第2个隐层神经元
n_classes=10    #种类数量,输出节点数量

完整代码:

# -*- coding: utf-8 -*-
"""
Created on Sat Jul  7 20:30:21 2018

@author: 
"""
import numpy as np
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import input_data
#使用input。py导入数据
mnist=input_data.read_data_sets('data/',one_hot=True)

#%%
'network topology'
n_input=784 #输入节点数
n_hidden_1=256 #第1个隐层神经元
n_hidden_2=128  #第2个隐层神经元
n_classes=10    #种类数量,输出节点数量

'inputs and outputs'
x=tf.placeholder("float",[None,n_input])
y=tf.placeholder("float",[None,n_classes])

'神经网络'
stddev=.1
weights={
        "w1":tf.Variable(tf.random_normal([n_input,n_hidden_1],stddev=stddev)),
        "w2":tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2],stddev=stddev)),
        "w_out":tf.Variable(tf.random_normal([n_hidden_2,n_classes],stddev=stddev))
        }
bias={
      "b1":tf.Variable(tf.random_normal([n_hidden_1],stddev=stddev)),
      "b2":tf.Variable(tf.random_normal([n_hidden_2],stddev=stddev)),
      "b_out":tf.Variable(tf.random_normal([n_classes],stddev=stddev))
      }
print('network done')
#%%网络结构
def multilayer_perceptron(_X,_W,_b):
    layer_1=tf.nn.sigmoid(tf.add(tf.matmul(_X,_W['w1']),_b['b1']))#每次wx+b之后经过sigmoid函数激活
    layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,_W['w2']),_b['b2']))
    return tf.add(tf.matmul(layer_2,_W['w_out']),_b['b_out'])
#%%
pred=multilayer_perceptron(x,weights,bias)
'损失函数 softmax_cross_entropy_with_logits中 0.x版本和1.x版本不同的是1.x要加logits和labels'
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
'执行梯度下降'
optm=tf.train.GradientDescentOptimizer(learning_rate=.001).minimize(cost)
'如果相等,返回True'
corr=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
'先将其float化,通过统计平均值来进行计算准确率'
accr=tf.reduce_mean(tf.cast(corr,"float"))

'初始化'
init=tf.global_variables_initializer()
print('*'*10,'Function & Grpah Ready','*'*10)
#%%
training_epochs=50 #处理图片数量
batch_size=100 #每次处理的图片数量
display_step=2#每两次打印一次

'launch the graph'
sess=tf.Session()
sess.run(init)

#optimize
for epoch in range(training_epochs):
    avg_cost=0.
    total_batch=int(mnist.train.num_examples/batch_size)
    for i in range(total_batch):
        batch_xs,batch_ys=mnist.train.next_batch(batch_size)
        feeds={x:batch_xs,y:batch_ys}
        sess.run(optm,feed_dict=feeds)
        avg_cost+=sess.run(cost,feed_dict=feeds)
    avg_cost=avg_cost/total_batch
    
    
    'DISPLAY'
    if (epoch+1)%display_step==0:
        print ("*************Epoch: %03d/%03d cost: %.9f****************" % (epoch, training_epochs, avg_cost))
        feeds_train={x:batch_xs,y:batch_ys}
        train_acc=sess.run(accr,feed_dict=feeds_train)
        
        batch_test_x,batch_test_y=mnist.test.next_batch(2*batch_size)
        feeds_test={x:batch_test_x,y:batch_test_y}
        test_acc=sess.run(accr,feed_dict=feeds_test)
        print('train_accuray: %.3f'%train_acc,'test_accuray: %.3f'%test_acc)
print('Optimization Done!')

#%%
        
'''
********** Function & Grpah Ready **********
*********Epoch: 001/050    cost: 2.290258808 ********
train_accuray: 0.220 test_accuray: 0.150
*********Epoch: 003/050    cost: 2.263444160 ********
train_accuray: 0.260 test_accuray: 0.230
*********Epoch: 005/050    cost: 2.236716069 ********
train_accuray: 0.290 test_accuray: 0.340
*********Epoch: 007/050    cost: 2.209223012 ********
train_accuray: 0.450 test_accuray: 0.430
*********Epoch: 009/050    cost: 2.180447334 ********
train_accuray: 0.410 test_accuray: 0.540
*********Epoch: 011/050    cost: 2.150006443 ********
train_accuray: 0.480 test_accuray: 0.620
*********Epoch: 013/050    cost: 2.117547733 ********
train_accuray: 0.580 test_accuray: 0.490
*********Epoch: 015/050    cost: 2.082600254 ********
train_accuray: 0.540 test_accuray: 0.650
*********Epoch: 017/050    cost: 2.044873527 ********
train_accuray: 0.610 test_accuray: 0.630
*********Epoch: 019/050    cost: 2.004153874 ********
train_accuray: 0.580 test_accuray: 0.610
*********Epoch: 021/050    cost: 1.960296518 ********
train_accuray: 0.660 test_accuray: 0.610
*********Epoch: 023/050    cost: 1.913258088 ********
train_accuray: 0.630 test_accuray: 0.580
*********Epoch: 025/050    cost: 1.863089270 ********
train_accuray: 0.720 test_accuray: 0.710
*********Epoch: 027/050    cost: 1.810168548 ********
train_accuray: 0.710 test_accuray: 0.630
*********Epoch: 029/050    cost: 1.754891352 ********
train_accuray: 0.640 test_accuray: 0.710
*********Epoch: 031/050    cost: 1.697785769 ********
train_accuray: 0.620 test_accuray: 0.690
*********Epoch: 033/050    cost: 1.639589052 ********
train_accuray: 0.570 test_accuray: 0.750
*********Epoch: 035/050    cost: 1.581014476 ********
train_accuray: 0.680 test_accuray: 0.680
*********Epoch: 037/050    cost: 1.522843859 ********
train_accuray: 0.800 test_accuray: 0.690
*********Epoch: 039/050    cost: 1.465744308 ********
train_accuray: 0.690 test_accuray: 0.700
*********Epoch: 041/050    cost: 1.410335298 ********
train_accuray: 0.750 test_accuray: 0.730
*********Epoch: 043/050    cost: 1.357087436 ********
train_accuray: 0.830 test_accuray: 0.710
*********Epoch: 045/050    cost: 1.306355495 ********
train_accuray: 0.720 test_accuray: 0.750
*********Epoch: 047/050    cost: 1.258348243 ********
train_accuray: 0.780 test_accuray: 0.690
*********Epoch: 049/050    cost: 1.213152346 ********
train_accuray: 0.760 test_accuray: 0.700
Optimization Done!
'''


 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值