DL5-简单神经网络实现多分类任务

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import input_data
mnist = input_data.read_data_sets('./data/MNIST/', one_hot=True)

神经网络模型架构

# NETWORK TOPOLOGIES
n_hidden_1 = 256
n_hidden_2 = 128
n_input = 784
n_classes = 10

# INPUTS AND OUTPUTS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])

# NETWORK PARAMETERS
stddev = 0.1
weights = {
    'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),
    'w2': tf.Variable(
        tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),
    'out': tf.Variable(
        tf.random_normal([n_hidden_2, n_classes], stddev=stddev))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}
print("NETWORK READY")
NETWORK READY
 #前向神经网络结构
def multilayer_perceptron(_X, _weights, _biases):
    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) 
    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2']))
    return (tf.matmul(layer_2, _weights['out']) + _biases['out'])
# PREDICTION
pred = multilayer_perceptron(x, weights, biases)

# LOSS AND OPTIMIZER
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred)) 
optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost) 
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))    
accr = tf.reduce_mean(tf.cast(corr, "float"))

# INITIALIZER
init = tf.global_variables_initializer()
print ("FUNCTIONS READY")
FUNCTIONS READY
training_epochs = 400
batch_size      = 100
display_step    = 10
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# OPTIMIZE
for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(mnist.train.num_examples/batch_size)
    # ITERATION
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        feeds = {x: batch_xs, y: batch_ys}
        sess.run(optm, feed_dict=feeds)
        avg_cost += sess.run(cost, feed_dict=feeds)
    avg_cost = avg_cost / total_batch
    # DISPLAY
    if (epoch) % display_step == 0:
        print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
        feeds = {x: batch_xs, y: batch_ys}
        train_acc = sess.run(accr, feed_dict=feeds)
        print ("TRAIN ACCURACY: %.3f" % (train_acc))
        feeds = {x: mnist.test.images, y: mnist.test.labels}
        test_acc = sess.run(accr, feed_dict=feeds)
        print ("TEST ACCURACY: %.3f" % (test_acc))
print ("OPTIMIZATION FINISHED")
Epoch: 000/400 cost: 2.452986904
TRAIN ACCURACY: 0.180
TEST ACCURACY: 0.172
Epoch: 010/400 cost: 2.185003601
TRAIN ACCURACY: 0.480
TEST ACCURACY: 0.434
Epoch: 020/400 cost: 2.042294119
TRAIN ACCURACY: 0.590
TEST ACCURACY: 0.580
Epoch: 030/400 cost: 1.831668826
TRAIN ACCURACY: 0.680
TEST ACCURACY: 0.656
Epoch: 040/400 cost: 1.570585941
TRAIN ACCURACY: 0.670
TEST ACCURACY: 0.706
Epoch: 050/400 cost: 1.320060859
TRAIN ACCURACY: 0.740
TEST ACCURACY: 0.743
Epoch: 060/400 cost: 1.119256323
TRAIN ACCURACY: 0.830
TEST ACCURACY: 0.773
Epoch: 070/400 cost: 0.969276139
TRAIN ACCURACY: 0.820
TEST ACCURACY: 0.797
Epoch: 080/400 cost: 0.857764270
TRAIN ACCURACY: 0.780
TEST ACCURACY: 0.816
Epoch: 090/400 cost: 0.773080944
TRAIN ACCURACY: 0.830
TEST ACCURACY: 0.828
Epoch: 100/400 cost: 0.707080016
TRAIN ACCURACY: 0.820
TEST ACCURACY: 0.839
Epoch: 110/400 cost: 0.654420159
TRAIN ACCURACY: 0.890
TEST ACCURACY: 0.848
Epoch: 120/400 cost: 0.611597223
TRAIN ACCURACY: 0.890
TEST ACCURACY: 0.854
Epoch: 130/400 cost: 0.576186454
TRAIN ACCURACY: 0.820
TEST ACCURACY: 0.861
Epoch: 140/400 cost: 0.546534869
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.867
Epoch: 150/400 cost: 0.521429843
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.871
Epoch: 160/400 cost: 0.499914402
TRAIN ACCURACY: 0.880
TEST ACCURACY: 0.874
Epoch: 170/400 cost: 0.481385442
TRAIN ACCURACY: 0.860
TEST ACCURACY: 0.878
Epoch: 180/400 cost: 0.465256707
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.882
Epoch: 190/400 cost: 0.451132424
TRAIN ACCURACY: 0.890
TEST ACCURACY: 0.885
Epoch: 200/400 cost: 0.438655051
TRAIN ACCURACY: 0.870
TEST ACCURACY: 0.887
Epoch: 210/400 cost: 0.427586571
TRAIN ACCURACY: 0.870
TEST ACCURACY: 0.889
Epoch: 220/400 cost: 0.417698937
TRAIN ACCURACY: 0.840
TEST ACCURACY: 0.892
Epoch: 230/400 cost: 0.408797454
TRAIN ACCURACY: 0.870
TEST ACCURACY: 0.893
Epoch: 240/400 cost: 0.400745667
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.895
Epoch: 250/400 cost: 0.393431878
TRAIN ACCURACY: 0.930
TEST ACCURACY: 0.896
Epoch: 260/400 cost: 0.386751812
TRAIN ACCURACY: 0.880
TEST ACCURACY: 0.897
Epoch: 270/400 cost: 0.380604704
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.899
Epoch: 280/400 cost: 0.374926474
TRAIN ACCURACY: 0.910
TEST ACCURACY: 0.900
Epoch: 290/400 cost: 0.369659365
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.901
Epoch: 300/400 cost: 0.364751811
TRAIN ACCURACY: 0.860
TEST ACCURACY: 0.902
Epoch: 310/400 cost: 0.360186188
TRAIN ACCURACY: 0.900
TEST ACCURACY: 0.903
Epoch: 320/400 cost: 0.355889409
TRAIN ACCURACY: 0.880
TEST ACCURACY: 0.904
Epoch: 330/400 cost: 0.351845439
TRAIN ACCURACY: 0.910
TEST ACCURACY: 0.905
Epoch: 340/400 cost: 0.348040061
TRAIN ACCURACY: 0.910
TEST ACCURACY: 0.906
Epoch: 350/400 cost: 0.344424021
TRAIN ACCURACY: 0.850
TEST ACCURACY: 0.906
Epoch: 360/400 cost: 0.340995019
TRAIN ACCURACY: 0.930
TEST ACCURACY: 0.907
Epoch: 370/400 cost: 0.337742235
TRAIN ACCURACY: 0.910
TEST ACCURACY: 0.908
Epoch: 380/400 cost: 0.334627790
TRAIN ACCURACY: 0.920
TEST ACCURACY: 0.909
Epoch: 390/400 cost: 0.331656928
TRAIN ACCURACY: 0.930
TEST ACCURACY: 0.909
OPTIMIZATION FINISHED

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值