Blob进阶6 (显示代码行号)

先讲讲怎么看代码。

Linux系统打开文件通常不带行号的,怎么显示呢

1,打开代码文本

2,点击最上角的Edit

3.点击 Preferences

4.点击display line numbers.

5.点击最后一行,可以和VS一样匹配括号,很方便



进入正题,Caffe数据读取(DataLayer )是Layer的派生类把除了读取LMDB,LEVELDB之外,也可以从原始图像直接读取(ImageDataLayer).

数据读取层的实现位于src/caffe/layers/base_data_layer.cpp中,现在贴出了。

    #include <boost/thread.hpp>  
    #include <vector>  
      
    #include "caffe/blob.hpp"  
    #include "caffe/data_transformer.hpp"  
    #include "caffe/internal_thread.hpp"  
    #include "caffe/layer.hpp"  
    #include "caffe/layers/base_data_layer.hpp"  
    #include "caffe/proto/caffe.pb.h"  
    #include "caffe/util/blocking_queue.hpp"  
      
    namespace caffe {  
      
    template <typename Dtype>  
    BaseDataLayer<Dtype>::BaseDataLayer(const LayerParameter& param)  
        : Layer<Dtype>(param),  
          transform_param_(param.transform_param()) {  
    }  
      
    template <typename Dtype>  
    void BaseDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,  
          const vector<Blob<Dtype>*>& top) {  
      if (top.size() == 1) {  
        output_labels_ = false;  
      } else {  
        output_labels_ = true;  
      }  
      data_transformer_.reset(  
          new DataTransformer<Dtype>(transform_param_, this->phase_));  
      data_transformer_->InitRand();  
      // The subclasses should setup the size of bottom and top  
      DataLayerSetUp(bottom, top);  
    }  
      
    template <typename Dtype>  
    BasePrefetchingDataLayer<Dtype>::BasePrefetchingDataLayer(  
        const LayerParameter& param)  
        : BaseDataLayer<Dtype>(param),  
          prefetch_free_(), prefetch_full_() {  
      for (int i = 0; i < PREFETCH_COUNT; ++i) {  
        prefetch_free_.push(&prefetch_[i]);  
      }  
    }  
      
    template <typename Dtype>  
    void BasePrefetchingDataLayer<Dtype>::LayerSetUp(  
        const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {  
      BaseDataLayer<Dtype>::LayerSetUp(bottom, top);  
      // Before starting the prefetch thread, we make cpu_data and gpu_data  
      // calls so that the prefetch thread does not accidentally make simultaneous  
      // cudaMalloc calls when the main thread is running. In some GPUs this  
      // seems to cause failures if we do not so.  
      for (int i = 0; i < PREFETCH_COUNT; ++i) {  
        prefetch_[i].data_.mutable_cpu_data();  
        if (this->output_labels_) {  
          prefetch_[i].label_.mutable_cpu_data();  
        }  
      }  
    #ifndef CPU_ONLY  
      if (Caffe::mode() == Caffe::GPU) {  
        for (int i = 0; i < PREFETCH_COUNT; ++i) {  
          prefetch_[i].data_.mutable_gpu_data();  
          if (this->output_labels_) {  
            prefetch_[i].label_.mutable_gpu_data();  
          }  
        }  
      }  
    #endif  
      DLOG(INFO) << "Initializing prefetch";  
      this->data_transformer_->InitRand();  
      StartInternalThread();  
      DLOG(INFO) << "Prefetch initialized.";  
    }  
      
    template <typename Dtype>  
    void BasePrefetchingDataLayer<Dtype>::InternalThreadEntry() {  
    #ifndef CPU_ONLY  
      cudaStream_t stream;  
      if (Caffe::mode() == Caffe::GPU) {  
        CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));  
      }  
    #endif  
      
      try {  
        while (!must_stop()) {  
          Batch<Dtype>* batch = prefetch_free_.pop();  
          load_batch(batch);  
    #ifndef CPU_ONLY  
          if (Caffe::mode() == Caffe::GPU) {  
            batch->data_.data().get()->async_gpu_push(stream);  
            CUDA_CHECK(cudaStreamSynchronize(stream));  
          }  
    #endif  
          prefetch_full_.push(batch);  
        }  
      } catch (boost::thread_interrupted&) {  
        // Interrupted exception is expected on shutdown  
      }  
    #ifndef CPU_ONLY  
      if (Caffe::mode() == Caffe::GPU) {  
        CUDA_CHECK(cudaStreamDestroy(stream));  
      }  
    #endif  
    }  
      
    template <typename Dtype>  
    void BasePrefetchingDataLayer<Dtype>::Forward_cpu(  
        const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {  
      Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");  
      // Reshape to loaded data.  
      top[0]->ReshapeLike(batch->data_);  
      // Copy the data  
      caffe_copy(batch->data_.count(), batch->data_.cpu_data(),  
                 top[0]->mutable_cpu_data());  
      DLOG(INFO) << "Prefetch copied";  
      if (this->output_labels_) {  
        // Reshape to loaded labels.  
        top[1]->ReshapeLike(batch->label_);  
        // Copy the labels.  
        caffe_copy(batch->label_.count(), batch->label_.cpu_data(),  
            top[1]->mutable_cpu_data());  
      }  
      
      prefetch_free_.push(batch);  
    }  
      
    #ifdef CPU_ONLY  
    STUB_GPU_FORWARD(BasePrefetchingDataLayer, Forward);  
    #endif  
      
    INSTANTIATE_CLASS(BaseDataLayer);  
    INSTANTIATE_CLASS(BasePrefetchingDataLayer);  
      
    }  // namespace caffe  


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值