# 神经网络 权重可视化

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

import pylab
import tensorflow.examples.tutorials.mnist.input_data as input_data
x = tf.placeholder(tf.float32, [None, 784])
y_actual = tf.placeholder(tf.float32, shape=[None, 10])
W1 = tf.Variable(tf.random_normal([784,10]))        #初始化权值W
b1 = tf.Variable(tf.random_normal([10]))            #初始化偏置项b
layer1=tf.nn.sigmoid(tf.matmul(x,W1) + b1)
y_predict = tf.nn.softmax(layer1)     #加权变换并进行softmax回归，得到预测概率
regularizer=tf.contrib.layers.l2_regularizer(0.05)
regloss=regularizer(W1)

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_actual*tf.log(y_predict),reduction_indices=1))   #损失函数为交叉熵
totalloss=cross_entropy+regloss

correct_prediction = tf.equal(tf.argmax(y_predict,1), tf.argmax(y_actual,1))   #在测试阶段，测试准确度计算
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))                #多个批次的准确度均值

init = tf.initialize_all_variables()

with tf.Session() as sess:
sess.run(init)

for i in range(10000):               #训练阶段，迭代10000次
batch_xs, batch_ys = mnist.train.next_batch(100)           #按批次训练，每批100行数据
sess.run(train_step, feed_dict={x: batch_xs, y_actual: batch_ys})   #执行训练
if(i%1000==0):                  #每训练1000次，测试一次
print ("accuracy:",sess.run(accuracy, feed_dict={x: mnist.test.images, y_actual: mnist.test.labels}))

W_1=W1.eval() # 把权重矩阵保存成array,array.shape=(784,10)
for i in range(10):  # 绘制10个隐层神经元与输入层神经元个之间的权重
im=W_1[:,i].reshape(28,28)  #把矩阵的每一列 变化层 28*28 的矩阵
plt.imshow(im)  #绘图
plt.show()

• 评论

• 上一篇