caffe各个层的类成员含义

3 篇文章 0 订阅


卷积源码

1blob

protected:
 shared_ptr<SyncedMemory> data_;
 shared_ptr<SyncedMemory> diff_;
 shared_ptr<SyncedMemory> shape_data_;
 vector<int> shape_;
 int count_;
 int capacity_;

data_ : 用来存放正向传播时的数据
diff_ : 用来存放反向传播时的梯度
shape_data_: 用来存储Blob的形状数据的SyncedMemor智能指针
shape_ : 用来存储Blob的形状数据
count_ : 表示Blob中的元素个数,等于 num×channels×height×width
capacity_ : 表示Blob的容量

2net

/// @brief The network name
string name_;  //网络对应的名字
/// @brief The phase: TRAIN or TEST
Phase phase_;  //对应的阶段TRAIN或者TEST
/// @brief Individual layers in the net
vector<shared_ptr<Layer<Dtype> > > layers_; //定义各层
vector<string> layer_names_; //各层的名字
map<string, int> layer_names_index_; //各层对应的名字和index
vector<bool> layer_need_backward_; //各层是否需要回传
/// @brief the blobs storing intermediate results between the layer.
vector<shared_ptr<Blob<Dtype> > > blobs_; //各层之间存储中间结果
vector<string> blob_names_; //各个blob的名字
map<string, int> blob_names_index_; //各个blob名字及对应的index
vector<bool> blob_need_backward_;
/// bottom_vecs stores the vectors containing the input for each layer.
/// They don't actually host the blobs (blobs_ does), so we simply store
/// pointers.
vector<vector<Blob<Dtype>*> > bottom_vecs_; //存放各个层的输入,vector的vector,每层的输入是一个vector<Blob<Dtype>*>
vector<vector<int> > bottom_id_vecs_; //存放各个层的输入blob对应的id
vector<vector<bool> > bottom_need_backward_;
/// top_vecs stores the vectors containing the output for each layer
vector<vector<Blob<Dtype>*> > top_vecs_; //存放各个层的输出,每个层是一个vector<Blob<Dtype>*>
vector<vector<int> > top_id_vecs_; //存放各个层输出的blob的id
/// Vector of weight in the loss (or objective) function of each net blob,
/// indexed by blob_id.
vector<Dtype> blob_loss_weights_;  //loss blob的权重
vector<vector<int> > param_id_vecs_; // 
vector<int> param_owners_;
vector<string> param_display_names_;
vector<pair<int, int> > param_layer_indices_;
map<string, int> param_names_index_;
/// blob indices for the input and the output of the net
vector<int> net_input_blob_indices_;   //网络输入blob的index
vector<int> net_output_blob_indices_;  //网络输出blob的index
vector<Blob<Dtype>*> net_input_blobs_; //网络输入的blobs,存放的是指针
vector<Blob<Dtype>*> net_output_blobs_;//网络输出的blobs,存放的是指针
/// The parameters in the network.
vector<shared_ptr<Blob<Dtype> > > params_; //网络的参数
vector<Blob<Dtype>*> learnable_params_; //网络学习参数
/**
 * The mapping from params_ -> learnable_params_: we have
 * learnable_param_ids_.size() == params_.size(),
 * and learnable_params_[learnable_param_ids_[i]] == params_[i].get()
 * if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer
 * and learnable_params_[learnable_param_ids_[i]] gives its owner.
 */
vector<int> learnable_param_ids_;
/// the learning rate multipliers for learnable_params_
vector<float> params_lr_; //参数的学习率
vector<bool> has_params_lr_; //是否有学习参数
/// the weight decay multipliers for learnable_params_
vector<float> params_weight_decay_; //权重衰减
vector<bool> has_params_decay_;     //是否有权重衰减
/// The bytes of memory used by this net
size_t memory_used_;    //网络使用的内存
/// Whether to compute and display debug info for the net.
bool debug_info_;
/// The root net that actually holds the shared layers in data parallelism
const Net* const root_net_;
DISABLE_COPY_AND_ASSIGN(Net);

layer

caffe源码

baseconvulution

protected:
 /// @brief The spatial dimensions of a filter kernel.
 Blob<int> kernel_shape_;  //卷积核形状
 /// @brief The spatial dimensions of the stride.
 Blob<int> stride_;        //步进
 /// @brief The spatial dimensions of the padding.
 Blob<int> pad_;           //补充
 /// @brief The spatial dimensions of the dilation.
 Blob<int> dilation_;      //膨胀系数
 /// @brief The spatial dimensions of the convolution input.
 Blob<int> conv_input_shape_; //卷积的输入形状
 /// @brief The spatial dimensions of the col_buffer.
 vector<int> col_buffer_shape_; //
 /// @brief The spatial dimensions of the output.
 vector<int> output_shape_;     //输出的形状
 const vector<int>* bottom_shape_; //
 int num_spatial_axes_;  //空间的轴个数
 int bottom_dim_;        //输入维度
 int top_dim_;           //输出维度
 int channel_axis_;      //通道轴的索引
 int num_;               //
 int channels_;          //输入的通道数
 int group_;             //卷积组的大小
 int out_spatial_dim_;   //输出的空间维度
 int weight_offset_;     //使用卷积组时用到的
 int num_output_;        //卷积后的通道数
 bool bias_term_;        //是否使用偏置
 bool is_1x1_;           //是不是1*1的卷积
 bool force_nd_im2col_;  //强制使用n维通用卷积
 
 private:
 
 int num_kernels_im2col_; //
 int num_kernels_col2im_;
 int conv_out_channels_;    //卷积的输出通道数
 int conv_in_channels_;     //卷积的输入通道数
 int conv_out_spatial_dim_; //卷积的输出空间维度=卷积后的h*w
 int kernel_dim_;           //
 int col_offset_;           //
 int output_offset_;
 Blob<Dtype> col_buffer_;   //使用im2col时使用的存储空间
 Blob<Dtype> bias_multiplier_;

重要的参数

LayerParameter layer_param_;           // 这个是protobuf文件中存储的layer参数
vector<share_ptr<Blob<Dtype>>> blobs_; // 这个存储的是layer的参数,在程序中用的
vector<bool> param_propagate_down_;    // 这个bool表示是否计算各个blob参数的diff,即传播误差
vector<Dtype> loss_;
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小涵涵

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值