# 神经网络-tensorflow实现mnist手写数字识别

optimizer = tf.train.AdamOptimizer().minimize(cost)

https://www.tensorflow.org/versions/r0.12/get_started/index.html

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
minist = input_data.read_data_sets("/tmp/data/",one_hot = True)

0 = [1,0,0,0,0,0,0,0,0,0]

1 = [0,1,0,0,0,0,0,0,0,0]

2 = [0,0,1,0,0,0,0,0,0,0]

。。。

hidden_1layer = {'weights':tf.Variable(tf.random_normal([784,n_nodes_hdl1])),'biases':tf.Variable(tf.random_normal([n_nodes_hdl1]))}

output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hdl3,n_classes])),'biases':tf.Variable(tf.random_normal([n_classes]))}

l2 = tf.add(tf.matmul(l1,hidden_2layer['weights']) , hidden_2layer['biases'])
l2 = tf.nn.relu(l2)

output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']

softmax_cross_entropy_with_logits 这个函数把softmax与交叉熵函数结合在一起，softmax函数的作用相当于把每个输出量化为一个概率，这里返回一个向量，需要求均值得出实际输出与期望输出的损失。

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,y))

optimizer = tf.train.AdamOptimizer().minimize(cost)

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
minist = input_data.read_data_sets("/tmp/data/",one_hot = True) #one_hot means put the input data transfor '0' or '1'

'''
input > weight >hidden layer 1 (activation function) > weights > hidden l 2..
cross entropy(how not fit of the moddel)
optimization function to minmize the cost

BP NN
'''
# 10 class ,0-9

'''
one_hot means
0 = [1,0,0,0,0,0,0,0,0,0]
1 = [0,1,0,0,0,0,0,0,0,0]
'''
# first build the number of neural
n_nodes_hdl1 = 500
n_nodes_hdl2 = 500
n_nodes_hdl3 = 500
#each hidden layer has 500 neural nodes
n_classes = 10
batch_size = 100 #to fit the weight about 100 times ???

x = tf.placeholder('float')#placeholder to define the pre-input data ,just give the type and shape,the 2nd paremeter is to leave something thire
y = tf.placeholder('float')

def neural_network_model(data):
hidden_1layer = {'weights':tf.Variable(tf.random_normal([784,n_nodes_hdl1])),'biases':tf.Variable(tf.random_normal([n_nodes_hdl1]))}
hidden_2layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hdl1,n_nodes_hdl2])),'biases':tf.Variable(tf.random_normal([n_nodes_hdl2]))}
hidden_3layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hdl2,n_nodes_hdl3])),'biases':tf.Variable(tf.random_normal([n_nodes_hdl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hdl3,n_classes])),'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.nn.relu(l1) #active function reli function

l2 = tf.nn.relu(l2)

l3 = tf.nn.relu(l3)

output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']

return output

def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,y))

hm_epochs = 10

with tf.Session() as sess:
sess.run(tf.initialize_all_variables())

for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(minist.train.num_examples/batch_size)):
epoch_x,epoch_y = minist.train.next_batch(batch_size)
_,c = sess.run([optimizer,cost], feed_dict = {x: epoch_x,y: epoch_y})
epoch_loss += c
print('Epoch',epoch,'completed out of',hm_epochs,'loss',epoch_loss)
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))

accuracy = tf.reduce_mean(tf.cast(correct,'float'))
print('accuracy',accuracy.eval({x:minist.test.images, y:minist.test.labels}))
train_neural_network(x)		

• 本文已收录于以下专栏：

举报原因： 您举报文章：神经网络-tensorflow实现mnist手写数字识别 色情 政治 抄袭 广告 招聘 骂人 其他 (最多只允许输入30个字)