指定执行运算的设备,用于存在多GPU时
#打印设备分配日志,如果指定设备不存在允许自动分配 ,
config = tf.ConfigProto(log_device_placement = True,allow_soft_placement=True)
config.gpu_options.allow_growth = True 允 许按需分配资源
config.gpu_options.per_process_gpu_memory_fraction = 0.7 #分配固定大小的资源
with tf.Session(config=config) as sess:
with tf.device("/gpu:1"):
a = tf.placeholder(tf.int32)
b = tf.placeholder(tf.int32)
add = tf.add(a,b)
print(sess.run(add,feed_dict={a:4,b:6}))
保存模型:
saver = tf.train.Saver()
saver.save(sess,"a/myModel")
恢复模型:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess,"a/myModel")
# 执行预测,使用恢复的模型
print(sess.run(z, feed_dict={X: 0.2}))
打印模型信息:
print_tensors_in_checkpoint_file('a/myModel',None,True)
指定次数保存模型
saver.save(sess,"a/myModel",global_step=epoch)
指定次数恢复模型:
kpt = tf.train.latest_checkpoint('a')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=2)
saver.restore(sess,kpt)
定时保存模型:
gloab_step = tf.train.get_or_create_global_step()
step = tf.assign_add(gloab_step,1)
with tf.train.MonitoredTrainingSession(checkpoint_dir='a',save_checkpoint_secs=2) as sess:
print(sess.run([gloab_step]))
while not sess.should_stop():
i = sess.run(step)
print(i)
使用tensorboard:
z = tf.multiply(X,W)+b
# 直方图
tf.summary.histogram('z',z)
cost = tf.reduce_mean(tf.square(Y-z)) #损失函数
# 标量形式
tf.summary.scalar('lost_function',cost);
with tf.Session() as sess:
sess.run(init)
merged_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('a/sum',sess.graph)
pltdata = {'batchsize':[],"loss":[]}#存放批次值和损失值
# 向模型填充数据
saver = tf.train.Saver(max_to_keep=1)
for epoch in range(train_epochs):
for (x,y) in zip(train_X,train_Y):
sess.run(optimizer,feed_dict={X:x,Y:y})
summary_str = sess.run(merged_summary_op,feed_dict={X:x,Y:y})
summary_writer.add_summary(summary_str,epoch)
if epoch % display_step ==0:
loss = sess.run(cost,feed_dict={X:train_X,Y:train_Y})
print("Epoch",epoch+1,"cost",loss,"W=",sess.run(W),"b=",sess.run(b))
if not (loss =='NA'):
pltdata['batchsize'].append(epoch)
pltdata['loss'].append(loss)
saver.save(sess, "a/myModel")
print('完成了。')
在日志目录下,使用如下命令后,启动浏览器。
D:\py_project\p4\a\sum>tensorboard --logdir D:\py_project\p4\a\sum
下面表示启动成功
(deepl) D:\py_project\p4\a\sum>tensorboard --logdir D:\py_project\p4\a\sum
TensorBoard 1.10.0 at http://LAPTOP-TJDALU36:6006 (Press CTRL+C to quit)