Tensorflow实现全链接神经网络

import tensorflow as tf
from  tensorflow.examples.tutorials.mnist import input_data


##神经网络参数初始化
def init_NN():
    mnist = input_data.read_data_sets('data/', one_hot=True)
    ##定义神经网络参数 输入->输出
    n_input = 784  ##输入层
    n_hidden_1 = 256  ##隐藏层1
    n_hidden_2 = 128  ##隐藏层2
    n_classes = 10    ##输出层

    x = tf.placeholder('float', [None, n_input])
    y = tf.placeholder('float', [None, n_classes])

    stddev = 0.1  ##标准差
    ##初始化每层神经网络的权重
    weigths = {
        'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),
        'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),
        'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev)),
    }
    ##初始化每层神经网络的偏移量
    biases = {
        'b1': tf.Variable(tf.random_normal([n_hidden_1])),
        'b2': tf.Variable(tf.random_normal([n_hidden_2])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }
    return x, y, weigths, biases, mnist


##前向传播
def multilayer_perceptron(_x, _weights, _biase):
    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_x, _weights['w1']), _biase['b1']))
    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biase['b2']))
    return tf.add(tf.matmul(layer_2, _weights['out']), _biase['out'])


##反向传播
def back_propagation(_pred, _y):
    ##学习率
    learnint_rate = 0.01
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, _y))
    optm = tf.train.GradientDescentOptimizer(learning_rate=learnint_rate).minimize(cost)
    corr = tf.equal(tf.arg_max(_pred, 1), tf.arg_max(_y, 1))
    accu = tf.reduce_mean(tf.cast(corr, 'float'))
    return cost, optm, accu


##开始训练
def start_train(_cost, _optm, _accu, _mnist):
    training_epochs = 20
    batch_size = 100
    display_step = 4
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    for epoch in range(1, training_epochs + 1):
        avg_cost = 0
        total_batch = int(_mnist.train.num_examples / batch_size)
        for i in range(total_batch):
            batch_xs, batch_ys = _mnist.train.next_batch(batch_size)
            feeds = {x: batch_xs, y: batch_ys}
            sess.run(_optm, feed_dict=feeds)
            avg_cost += sess.run(_cost, feeds)
        avg_cost = avg_cost / total_batch
        if epoch % display_step == 0:
            print("Epoch:%03d/%03d cos:%.9f" % (epoch, training_epochs, avg_cost))
            train_acc = sess.run(_accu, feeds)
            print("train_acc:%.3f" % (train_acc))
            test_feeds = {x: mnist.test.images, y: mnist.test.labels}
            test_acc = sess.run(_accu, test_feeds)
            print("test_acc:%.3f" % (test_acc))
    print('FINISH')


if __name__ == '__main__':
    x, y, weigths, biases, mnist = init_NN()
    ##前向传播公式
    pred = multilayer_perceptron(x, weigths, biases)
    ##反向传播公式
    cost, optm, accu = back_propagation(pred, y)
    ##训练数据
    start_train(cost, optm, accu, mnist)

##输出结果

 
 
Epoch:004/020 cos:1.799211222 train_acc:0.570 test_acc:0.625 Epoch:008/020 cos:0.995448108 train_acc:0.830 test_acc:0.808 Epoch:012/020 cos:0.662830535 train_acc:0.790 test_acc:0.854 Epoch:016/020 cos:0.525997586 train_acc:0.870 test_acc:0.873 Epoch:020/020 cos:0.454229037 train_acc:0.890 test_acc:0.887 FINISH
Process finished with exit code 0

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值