Tensorflow 神经网络模型架构
Tensorflow 神经网络模型架构
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
print ("packs loaded")
packs loaded
mnist = input_data.read_data_sets('data/', one_hot=True)
Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
tf.compat.v1.disable_eager_execution()
神经网络模型
# NETWORK TOPOLOGIES
n_hidden_1 = 256 # 第一层神经元个数
n_hidden_2 = 128 # 第二层神经元个数
n_input = 784 # 输入像素点个数
n_classes = 10 # 分类类别
# INPUTS AND OUTPUTS
x = tf.compat.v1.placeholder("float", [None, n_input])
y = tf.compat.v1.placeholder("float", [None, n_classes])
# NETWORK PARAMETERS
stddev = 0.1
weights = {
'w1': tf.Variable(tf.random.normal([n_input, n_hidden_1], stddev=stddev)),
'w2': tf.Variable(tf.random.normal([n_hidden_1, n_hidden_2], stddev=stddev)),
'out': tf.Variable(tf.random.normal([n_hidden_2, n_classes], stddev=stddev))
}
biases = {
'b1': tf.Variable(tf.random.normal([n_hidden_1])),
'b2': tf.Variable(tf.random.normal([n_hidden_2])),
'out': tf.Variable(tf.random.normal([n_classes]))
}
print ("NETWORK READY")
NETWORK READY
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2']))
return tf.add(tf.matmul(layer_2, _weights['out']), _biases['out'])
# PREDICTION
pred = multilayer_perceptron(x, weights, biases)
# LOSS AND OPTIMIZER
# 损失函数:交叉熵函数 - tf.nn.softmax_cross_entropy_with_logits()
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
# 梯度下降优化器
optm = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
# 精确度
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# 精确值
accr = tf.reduce_mean(tf.cast(corr, "float"))
# INITIALIZER
init = tf.compat.v1.global_variables_initializer()
print ("FUNCTIONS READY")
FUNCTIONS READY
training_epochs = 20
batch_size = 100
display_step = 4
# LAUNCH THE GRAPH
sess = tf.compat.v1.Session()
sess.run(init)
# OPTIMIZE
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# ITERATION
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x: batch_xs, y: batch_ys}
# 梯度下降求解; feed_dict:填充数据
sess.run(optm, feed_dict=feeds)
# 计算损失值
avg_cost += sess.run(cost, feed_dict=feeds)
avg_cost = avg_cost / total_batch
# DISPLAY
if (epoch+1) % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
feeds = {x: batch_xs, y: batch_ys}
train_acc = sess.run(accr, feed_dict=feeds)
print ("TRAIN ACCURACY: %.3f" % (train_acc))
feeds = {x: mnist.test.images, y: mnist.test.labels}
test_acc = sess.run(accr, feed_dict=feeds)
print ("TEST ACCURACY: %.3f" % (test_acc))
print ("OPTIMIZATION FINISHED")
Epoch: 003/020 cost: -137763.534588068
TRAIN ACCURACY: 0.100
TEST ACCURACY: 0.113
Epoch: 007/020 cost: -295983.534829545
TRAIN ACCURACY: 0.110
TEST ACCURACY: 0.113
Epoch: 011/020 cost: -454205.143522727
TRAIN ACCURACY: 0.100
TEST ACCURACY: 0.113
Epoch: 015/020 cost: -612429.083977273
TRAIN ACCURACY: 0.130
TEST ACCURACY: 0.113
Epoch: 019/020 cost: -770653.138977273
TRAIN ACCURACY: 0.170
TEST ACCURACY: 0.113
OPTIMIZATION FINISHED
30
TEST ACCURACY: 0.113
Epoch: 019/020 cost: -770653.138977273
TRAIN ACCURACY: 0.170
TEST ACCURACY: 0.113
OPTIMIZATION FINISHED