【Tensorflow】Tensorboard学习

1、使用Tensorboard

1)构建图graph,定义需要追踪的Tensor

定义方法1:tf.summary.histogram(变量名,变量值)

记录和汇总变量为直方图,如权重等。

定义方法2:tf.summary.scalar(变量名,变量值)

汇总标量数据,如loss。

2、将上述定义的汇总和记录进行合并,以便可以统一运行。

合并的方法:定义op

merged=tf.summary.merge_all()

3、定义运行后存放数据记录和日志的位置和写入器。

writer=tf.summary.FileWriter(log_save_path,sess.graph)

4、启动Session,并用writer记录merged中的数据记录。

writer.add_summary(result,i)

5、程序运行结束,在terminal中运行python脚本的路径下,输入tensorboard --logdir='log_save_path'。

demo

#搭建cnn
x=tf.placeholder(tf.float32,[None,w,h,c],name='x')
y_=tf.placeholder(tf.float32,[None,30],name='y_')
keep_prob=tf.placeholder(tf.float32,name='keep_prob')

def inference(input_tensor,train,regularizer):
    
    with tf.variable_scope('layer1-conv1'):
        conv1_weights=tf.get_variable('weight',[3,3,c,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases=tf.get_variable('bias',[32],initializer=tf.constant_initializer(0.0))
        conv1=tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='VALID')
      
        relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
        
        #define the variable who need to show histogram in tensorboard
        tf.summary.histogram('layer1-conv1',conv1_weights)
        
    with tf.name_scope('layer2_pool1'):
        pool1=tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
        
    with tf.variable_scope('layer3-conv2'):
        conv2_weights=tf.get_variable('weight',[2,2,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases=tf.get_variable('bias',[64],initializer=tf.constant_initializer(0.0))
        conv2=tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='VALID')
        relu2=tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
       
    with tf.name_scope('layer4_pool2'):
        pool2=tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
        
  
    with tf.variable_scope('layer5-conv3'):
 
        conv3_weights=tf.get_variable('weight',[2,2,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv3_biases=tf.get_variable('bias',[128],initializer=tf.constant_initializer(0.0))
        conv3=tf.nn.conv2d(pool2,conv3_weights,strides=[1,1,1,1],padding='VALID')
        relu3=tf.nn.relu(tf.nn.bias_add(conv3,conv3_biases))
        

    with tf.name_scope('layer6_pool3'):
        pool3=tf.nn.max_pool(relu3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
  
    pool_shape=pool3.get_shape().as_list()
    nodes=pool_shape[1]*pool_shape[2]*pool_shape[3]
   reshaped=tf.reshape(pool3,[-1,nodes])
    
    with tf.variable_scope('layer7-fc1'):
        fc1_weights=tf.get_variable('weight',[nodes,500],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases=tf.get_variable('bias',[500],initializer=tf.constant_initializer(0.1))
        fc1=tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
        if train:
            fc1=tf.nn.dropout(fc1,0.5)
    
    with tf.variable_scope('layer8-fc2'):
        fc2_weights=tf.get_variable('weight',[500,500],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases=tf.get_variable('bias',[500],initializer=tf.truncated_normal_initializer(stddev=0.1))
        fc2=tf.nn.relu(tf.matmul(fc1,fc2_weights)+fc2_biases)
        if train:
            fc2=tf.nn.dropout(fc2,0.5)
   

    with tf.variable_scope('layer9-fc3'):
        fc3_weights=tf.get_variable('weight',[500,30],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection('losses',regularizer(fc3_weights))
        fc3_biases=tf.get_variable('bias',[30],initializer=tf.truncated_normal_initializer(stddev=0.1))
        logit=tf.matmul(fc2,fc3_weights)+fc3_biases
        rmse=tf.sqrt(tf.reduce_mean(tf.square(y_-logit)))
        tf.summary.scalar('layer9-fc3',rmse)

    return logit,rmse


regularizer=tf.contrib.layers.l2_regularizer(0.001)
y,loss=inference(x,False,regularizer)
train_op=tf.train.AdamOptimizer(0.001).minimize(loss)

sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

#test TB
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter('/tmp/tensorlogs/test',sess.graph)

   
train_num=1000
batch_size=64
validation_size=100
EARLY_STOP_PATIENCE=20
best_validation_loss=1000000.0
current_epoch=0
valid_data,valid_label=train_data[:validation_size],train_label[:validation_size]
_train_data,_train_label=train_data[validation_size:],train_label[validation_size:]

count=0
for i in range(train_num):
    #print("epoch:=={}".format(i))
              
    train_loss,train_acc,batch_num=0,0,0
    for train_data_batch,train_label_batch in get_train_batch(train_data,train_label,batch_size):
        result,train_op_=sess.run([merged,train_op],feed_dict={x:train_data_batch,y_:train_label_batch,keep_prob:0.5})
        #train_op.run(feed_dict={x:train_data_batch,y_:train_label_batch,keep_prob:0.5})
        
        #validation_loss=loss.eval(feed_dict={x:train_data_batch,y_:train_label_batch,keep_prob:1.0})
        writer.add_summary(result,count)
        count=count+1
        validation_loss=loss.eval(feed_dict={x:train_data_batch,y_:train_label_batch,keep_prob:1.0})
        
        
        if validation_loss < best_validation_loss: 
            best_validation_loss=validation_loss
            cureent_epoch=i
        elif (i-current_epoch)>=EARLY_STOP_PATIENCE:
            #print ('early stopping')
            break



                
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值