应用TensorFlow构建全连接神经网络

应用TensorFlow构建全连接神经网络

https://github.com/sjchoi86/tensorflow-101/blob/master/notebooks/mlp_mnist_xavier.ipynb

import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('data/', on_hot=True)

# Xavier初始化
def xavier_init(n_inputs, n_outputs, uniform=True):
    if uniform:#均匀分布
        init_range = tf.sqrt(6.0/(n_inputs+n_outputs))
        return tf.random_normal_initializer(-init_range,init_range)
    else:#正态分布
        stddev = tf.sqrt(3.0/(n_inputs+n_outputs))
        return tf.truncated_normal_initializer(stddev=stddev)
    
# 参数
learning_rate = 0.001
training_epochs = 50
batch_size = 128
display_step = 1
# 网络参数
n_input = 784
n_hidden_1 = 256
n_hidden_2 = 256
n_hidden_3 = 256
n_hidden_4 = 256
n_classes = 10

# tf图输入
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
dropout_keep_prob = tf.placeholder("float")

# 创建模型
def multilayer_perrceptron(_X, _weights, _biases, _keep_prob):
    layer_1 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])),_keep_prob)
    layer_2 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])),_keep_prob)
    layer_3 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(layer_2, _weights['h3']), _biases['b3'])),_keep_prob)
    layer_4 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(layer_3, _weights['h2']), _biases['b4'])),_keep_prob)
    return (tf.matmul(layer_4, _weights['out'])+_biases['out'])

# 初始化权重及偏置
weights={
        'h1':tf.get_variable('h1', shape=[n_input, n_hidden_1], initializer=xavier_init(n_input, n_hidden_1)),
        'h2':tf.get_variable('h2', shape=[n_hidden_1, n_hidden_2], initializer=xavier_init(n_hidden_1, n_hidden_2)),
        'h3':tf.get_variable('h3', shape=[n_hidden_2, n_hidden_3], initializer=xavier_init(n_hidden_2, n_hidden_3)),
        'h4':tf.get_variable('h4', shape=[n_hidden_3, n_hidden_4], initializer=xavier_init(n_hidden_3, n_hidden_4)),
        'out':tf.get_variable('out', shape=[n_hidden_4, n_classes], initializer=xavier_init(n_hidden_4, n_classes))
        }
biases={
        'b1':tf.Variable(tf.random_normal([n_hidden_1])),
        'b2':tf.Variable(tf.random_normal([n_hidden_2])),
        'b3':tf.Variable(tf.random_normal([n_hidden_3])),
        'b4':tf.Variable(tf.random_normal([n_hidden_4])),
        'out':tf.Variable(tf.random_normal([n_classes]))
        }

# 组装模型
pred = multilayer_perrceptron(x,weights,biases,dropout_keep_prob)

# 定义损失函数及优化器
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# 计算准确度
correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))

# 初始化变量
init = tf.global_variables_initializer()
# 运行图
sess = tf.Session()
sess.run(init)

# 训练
for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(mnist.train.num_examples/batch_size)
    
    for i in range(total_batch):
        batch_xs,batch_ys = mnist.train.next_batch(batch_size)
        sess.run(optimizer, feed_dict={x:batch_xs, y:batch_ys, dropout_keep_prob:0.7})
        # 计算平均损失,注意保留比例为1
        avg_cost += sess.run(cost, feed_dict={x:batch_xs, y:batch_ys, dropout_keep_prob:1.})
    
    if epoch % display_step==0:
        print("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
        # 此处数据是否该改为全部训练集数据
        train_acc = sess.run(accuracy, feed_dict={x:batch_xs, y:batch_ys, dropout_keep_prob:1.})
        print ("Training accuracy: %.3f" % (train_acc))
        
# 测试集准确度
test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, dropout_keep_prob:1.})
print ("Testing accuracy: %.3f" % (test_acc))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值