with tf.name_scope('loss_value'):
loss=tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=y_, logits=y))+tf.add_n(tf.get_collection('losses'))
# loss=np.square(y-y_))
# 监视loss值
tf.summary.scalar('loss_value',loss)
# 指数衰减学习率
learning_rate=tf.train.exponential_decay(Cfg.LEARNING_RATE_BASE,global_step,1000,
Cfg.LEARNING_RATE_DECAY,staircase=True)
tf.summary.scalar("learningRate",learning_rate)
# 优化损失函数
train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
# 反向传播,更新神经网络中参数和每一个参数的滑动平均值
with tf.control_dependencies([train_step,variables_averages_op]):
train_op=tf.no_op(name='train')
merged=tf.summary.merge_all()
with tf.Session() as sess:
tf.global_variables_initializer().run()
# 写入日志
summary_writer=tf.summary.FileWriter('/path/to/log',sess.graph)
# 迭代地训练神经网络
for i in range(Cfg.TRAINING_STEPS):
# 产生这一轮使用的一个batch的训练数据,并运行训练
train_data , train_label = DATA.train_batch()
xs=np.reshape(train_data,(Cfg.BATCH_SIZE,Cfg.IMAGE_SIZE,Cfg.IMAGE_SIZE,Cfg.NUM_CHANNELS))
ys=np.reshape(train_label,[Cfg.BATCH_SIZE,Cfg.OUTPUT_NODE])
run_options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata=tf.RunMetadata()
_,loss_value,step,summary=sess.run([train_op,loss,global_step,merged],feed_dict={x:xs,y_:ys}
,options=run_options,run_metadata=run_metadata)
if i % 50 == 0:
# 向日志中写入信息
summary_writer.add_run_metadata(run_metadata,'step%03d'%i)
summary_writer.add_summary(summary,i)
代码中配置好后,进入cmd,输入
tensorboard --logdir=/path/to/log ###注意该路径与代码中定义的保存日志的路径一致
配置好后打开浏览器,进入
http://desktop-g63nqii:6006 ###前面为自己计算机的名称