TensorFlow学习笔记----TensorBoard_2

,使用全连接识别MNIST,需要命名空间更多,程序更灵活,但基本的函数换是那些。

 
 
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf . app . flags
FLAGS = flags . FLAGS
flags . DEFINE_boolean ( 'fake_data' , False , 'If true, uses fake data '
'for unit testing.' )
flags . DEFINE_integer ( 'max_steps' , 1000 , 'Number of steps to run trainer.' )
flags . DEFINE_float ( 'learning_rate' , 0.001 , 'Initial learning rate.' )
flags . DEFINE_float ( 'dropout' , 0.5 , 'Keep probability for training dropout.' )
flags . DEFINE_string ( 'data_dir' , '/tmp/data' , 'Directory for storing data' )
flags . DEFINE_string ( 'summaries_dir' , '/tmp/mnist_logs' , 'Summaries directory' )
# Import data
mnist = input_data . read_data_sets ( FLAGS . data_dir , one_hot = True , fake_data = FLAGS . fake_data )
sess = tf . InteractiveSession ()
# We can't initialize these variables to 0 - the network will get stuck.
def weight_variable ( shape ):
"""Create a weight variable with appropriate initialization."""
initial = tf . truncated_normal ( shape , stddev = 0.1 )
return tf . Variable ( initial )
def bias_variable ( shape ):
"""Create a bias variable with appropriate initialization."""
initial = tf . constant ( 0.1 , shape = shape )
return tf . Variable ( initial )
def variable_summaries ( var , name ):
"""Attach a lot of summaries to a Tensor."""
with tf . name_scope ( 'summaries' ):
mean = tf . reduce_mean ( var )
tf . scalar_summary ( 'mean/' + name , mean )
with tf . name_scope ( 'stddev' ):
stddev = tf . sqrt ( tf . reduce_sum ( tf . square ( var - mean )))
tf . scalar_summary ( 'sttdev/' + name , stddev )
tf . scalar_summary ( 'max/' + name , tf . reduce_max ( var ))
tf . scalar_summary ( 'min/' + name , tf . reduce_min ( var ))
tf . histogram_summary ( name , var )
def nn_layer ( input_tensor , input_dim , output_dim , layer_name , act = tf . nn . relu ):
with tf . name_scope ( layer_name ):
# This Variable will hold the state of the weights for the layer
with tf . name_scope ( 'weights' ):
weights = weight_variable ([ input_dim , output_dim ])
variable_summaries ( weights , layer_name + '/weights' )
with tf . name_scope ( 'biases' ):
biases = bias_variable ([ output_dim ])
variable_summaries ( biases , layer_name + '/biases' )
with tf . name_scope ( 'Wx_plus_b' ):
preactivate = tf . matmul ( input_tensor , weights ) + biases
tf . histogram_summary ( layer_name + '/pre_activations' , preactivate )
activations = act ( preactivate , 'activation' )
tf . histogram_summary ( layer_name + '/activations' , activations )
return activations
# Input placehoolders
with tf . name_scope ( 'input' ):
x = tf . placeholder ( tf . float32 , [ None , 784 ], name = 'x-input' )
y_ = tf . placeholder ( tf . float32 , [ None , 10 ], name = 'y-input' )
keep_prob = tf . placeholder ( tf . float32 )
def feed_dict ( train ):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if train or FLAGS . fake_data :
xs , ys = mnist . train . next_batch ( 100 , fake_data = FLAGS . fake_data )
k = FLAGS . dropout
else :
xs , ys = mnist . test . images , mnist . test . labels
k = 1.0
return { x : xs , y_ : ys , keep_prob : k }
def train ():
with tf . name_scope ( 'input_reshape' ):
image_shaped_input = tf . reshape ( x , [ - 1 , 28 , 28 , 1 ])
tf . image_summary ( 'input' , image_shaped_input , 10 )
hidden1 = nn_layer ( x , 784 , 500 , 'layer1' )
with tf . name_scope ( 'dropout1' ):
tf . scalar_summary ( 'dropout_keep_probability1' , keep_prob )
dropped1 = tf . nn . dropout ( hidden1 , keep_prob )
hidden2 = nn_layer ( dropped1 , 500 , 300 , 'layer2' )
with tf . name_scope ( 'dropout2' ):
tf . scalar_summary ( 'dropout_keep_probability2' , keep_prob )
dropped2 = tf . nn . dropout ( hidden2 , keep_prob )
y = nn_layer ( dropped2 , 300 , 10 , 'layer3' , act = tf . nn . softmax )
with tf . name_scope ( 'cross_entropy' ):
diff = y_ * tf . log ( y )
with tf . name_scope ( 'total' ):
cross_entropy = - tf . reduce_mean ( diff )
tf . scalar_summary ( 'cross entropy' , cross_entropy )
with tf . name_scope ( 'train' ):
train_step = tf . train . AdamOptimizer ( FLAGS . learning_rate ) . minimize (
cross_entropy )
with tf . name_scope ( 'accuracy' ):
with tf . name_scope ( 'correct_prediction' ):
correct_prediction = tf . equal ( tf . argmax ( y , 1 ), tf . argmax ( y_ , 1 ))
with tf . name_scope ( 'accuracy' ):
accuracy = tf . reduce_mean ( tf . cast ( correct_prediction , tf . float32 ))
tf . scalar_summary ( 'accuracy' , accuracy )
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf . merge_all_summaries ()
train_writer = tf . train . SummaryWriter ( FLAGS . summaries_dir + '/train' , sess . graph )
test_writer = tf . train . SummaryWriter ( FLAGS . summaries_dir + '/test' )
tf . initialize_all_variables () . run ()
#
for i in range ( FLAGS . max_steps ):
if i % 10 == 0 : # Record summaries and test-set accuracy
summary , acc = sess . run ([ merged , accuracy ], feed_dict = feed_dict ( False ))
test_writer . add_summary ( summary , i )
print ( 'Accuracy at step %s : %s ' % ( i , acc ))
else : # Record train set summaries, and train
summary , _ = sess . run ([ merged , train_step ], feed_dict = feed_dict ( True ))
train_writer . add_summary ( summary , i )
def main ( _ ):
if tf . gfile . Exists ( FLAGS . summaries_dir ):
tf . gfile . DeleteRecursively ( FLAGS . summaries_dir )
tf . gfile . MakeDirs ( FLAGS . summaries_dir )
train ()
if __name__ == '__main__' :
tf . app . run ()
函数可以用在其他地方,打开目录定位 /tmp/mnist_logs图表中可以有两条曲线,一个是是训练一个是测试

注意:如果保存数据过于频繁,会显著增加运行时间!毕竟硬盘读取的速度太慢,即使是SSD也不必要全部保存数据(程序速度能快一点是一点),一般的做法是每个100---1000步保存一下数据供图表显示即可。


0

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值