import tensorflow as tf
import sys
import numpy as np
def main():
#(1)定义网络模型
sess=tf.Session()
x=tf.placeholder(tf.float32,shape=[None],name="input")
y=tf.placeholder(tf.float32,shape=[None],name="output")
lr=tf.placeholder(tf.float32,name="learning_rate")
w=tf.Variable(0,dtype=tf.float32)
b=tf.Variable(0,dtype=tf.float32)
_y=x*w+b
loss=tf.reduce_sum(tf.square(y-_y))
step=tf.Variable(0,trainable=False)
#(2) 定义summary,参数一是名字,参数二是Tensor
loss_summary=tf.summary.scalar("Loss",loss)
lr_summary=tf.summary.scalar("Learning Rate",lr)
#(3) 合并所有summary,并定义tf.summmary.FileWriter,写入文件
all_summary=tf.summary.merge_all()
writer=tf.summary.FileWriter("./tmp/",sess.graph)
train_op = tf.train.AdamOptimizer(lr).minimize(loss, global_step=step) #每次执行train_op,step都会自动+1
sess.run(tf.global_variables_initializer())
for i in range(0,100):
x_data=np.random.random([40])
y_data=x_data*2-10
feed_dict={
x:x_data,
y:y_data,
lr:i/10000
}
#(4)执行run时,讲之前合并的all_summary放进去,得到其值summaryValue直接用writer.add_summary记录。第二个参数是横坐标,一般用step表示。
_,summaryValue,currentStep,wval,bval,loss_val=sess.run([train_op,all_summary,step,w,b,loss],feed_dict=feed_dict)
print("loss=%s w=%s b=%s summary=%s" %(loss,wval,bval,summaryValue))
writer.add_summary(summaryValue,currentStep)
return
if __name__ == "__main__":
sys.exit(int(main() or 0))
直接传递一个float值而不通过run(summary)的到的实现方法:
import tensorflow as tf
import math
def scalar_logger(tf_writer,name,value,step):
v=tf.Summary.Value(tag=name,simple_value= value)
s=tf.Summary(value=[v])
tf_writer.add_summary(s,step)
return
if __name__ == "__main__":
writer=tf.summary.FileWriter("./",tf.Session().graph)
for x in range(0,1000):
scalar_logger(writer,"arc",math.sqrt(x),x)