import tensorflow as tf <strong>
import numpy as np
import matplotlib.pyplot as plt
import input_data
minist = input_data.read_data_sets("data/", one_hot=True)
// 导入minist数据集
trainimg = minist.train.images
trainlabel = minist.train.labels
testimg = minist.test.images
testlabel = minist.test.labels
print(trainimg.shape)
print(trainlabel.shape)
print(testimg.shape)
print(testlabel.shape)
(55000, 784)
(55000, 10)
(10000, 784)
(10000, 10)
x = tf.placeholder("float", [None,784])
y = tf.placeholder("float", [None,10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
# print(b.shape) (10,)
actv = tf.nn.softmax(tf.matmul(x,W) + b)
// 计算损失
loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv), reduction_indices=1))
learing_rate = 0.01
optm = tf.train.GradientDescentOptimizer(learing_rate).minimize(loss)
# PREDICTION
pred = tf.equal(tf.argmax(actv, 1), tf.argmax(y, 1)) # 每一个样本都会有10分类的概率值
# ACCURACY
accr = tf.reduce_mean(tf.cast(pred, "float")) # 这里将pred 中的 true 和false 转换成0,1,然后就可以做准确率的衡量
# INITIALIZER
init = tf.global_variables_initializer()
training_epochs = 10 #把所有的样本迭代10次
batch_size = 100 #每进行一次迭代,多少的样本
display_step = 5
# SESSION
sess = tf.Session()
sess.run(init) #run一下初始化的操作
# MINI-BATCH LEARNING
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(minist.train.num_examples/batch_size)
for i in range(num_batch):
batch_xs, batch_ys = minist.train.next_batch(batch_size)
feeds = {x: batch_xs, y: batch_ys}
sess.run(optm, feed_dict= feeds)
avg_cost += sess.run(loss, feed_dict=feeds)/num_batch #每一个epoch的打印
# DISPLAY
if epoch % display_step == 0:
feeds_train = {x: batch_xs, y: batch_ys}
feeds_test = {x: minist.test.images, y: minist.test.labels}
train_acc = sess.run(accr, feed_dict=feeds_train)
test_acc = sess.run(accr, feed_dict=feeds_test)
print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f test_acc: %.3f"
% (epoch, training_epochs, avg_cost, train_acc, test_acc))
print ("DONE")